Exemple #1
0
/* Estimate the error of the SPME Ewald sum. This estimate is based upon
 * a) a homogeneous distribution of the charges
 * b) a total charge of zero.
 */
static void estimate_PME_error(t_inputinfo *info, t_state *state,
                               gmx_mtop_t *mtop, FILE *fp_out, gmx_bool bVerbose, unsigned int seed,
                               t_commrec *cr)
{
    rvec *x     = NULL; /* The coordinates */
    real *q     = NULL; /* The charges     */
    real  edir  = 0.0;  /* real space error */
    real  erec  = 0.0;  /* reciprocal space error */
    real  derr  = 0.0;  /* difference of real and reciprocal space error */
    real  derr0 = 0.0;  /* difference of real and reciprocal space error */
    real  beta  = 0.0;  /* splitting parameter beta */
    real  beta0 = 0.0;  /* splitting parameter beta */
    int   ncharges;     /* The number of atoms with charges */
    int   nsamples;     /* The number of samples used for the calculation of the
                         * self-energy error term */
    int   i = 0;

    if (MASTER(cr))
    {
        fprintf(fp_out, "\n--- PME ERROR ESTIMATE ---\n");
    }

    /* Prepare an x and q array with only the charged atoms */
    ncharges = prepare_x_q(&q, &x, mtop, state->x, cr);
    if (MASTER(cr))
    {
        calc_q2all(mtop, &(info->q2all), &(info->q2allnr));
        info->ewald_rtol[0] = gmx_erfc(info->rcoulomb[0]*info->ewald_beta[0]);
        /* Write some info to log file */
        fprintf(fp_out, "Box volume              : %g nm^3\n", info->volume);
        fprintf(fp_out, "Number of charged atoms : %d (total atoms %d)\n", ncharges, info->natoms);
        fprintf(fp_out, "Coulomb radius          : %g nm\n", info->rcoulomb[0]);
        fprintf(fp_out, "Ewald_rtol              : %g\n", info->ewald_rtol[0]);
        fprintf(fp_out, "Ewald parameter beta    : %g\n", info->ewald_beta[0]);
        fprintf(fp_out, "Interpolation order     : %d\n", info->pme_order[0]);
        fprintf(fp_out, "Fourier grid (nx,ny,nz) : %d x %d x %d\n",
                info->nkx[0], info->nky[0], info->nkz[0]);
        fflush(fp_out);

    }

    if (PAR(cr))
    {
        bcast_info(info, cr);
    }


    /* Calculate direct space error */
    info->e_dir[0] = estimate_direct(info);

    /* Calculate reciprocal space error */
    info->e_rec[0] = estimate_reciprocal(info, x, q, ncharges, fp_out, bVerbose,
                                         seed, &nsamples, cr);

    if (PAR(cr))
    {
        bcast_info(info, cr);
    }

    if (MASTER(cr))
    {
        fprintf(fp_out, "Direct space error est. : %10.3e kJ/(mol*nm)\n", info->e_dir[0]);
        fprintf(fp_out, "Reciprocal sp. err. est.: %10.3e kJ/(mol*nm)\n", info->e_rec[0]);
        fprintf(fp_out, "Self-energy error term was estimated using %d samples\n", nsamples);
        fflush(fp_out);
        fprintf(stderr, "Direct space error est. : %10.3e kJ/(mol*nm)\n", info->e_dir[0]);
        fprintf(stderr, "Reciprocal sp. err. est.: %10.3e kJ/(mol*nm)\n", info->e_rec[0]);
    }

    i = 0;

    if (info->bTUNE)
    {
        if (MASTER(cr))
        {
            fprintf(stderr, "Starting tuning ...\n");
        }
        edir  = info->e_dir[0];
        erec  = info->e_rec[0];
        derr0 = edir-erec;
        beta0 = info->ewald_beta[0];
        if (derr > 0.0)
        {
            info->ewald_beta[0] += 0.1;
        }
        else
        {
            info->ewald_beta[0] -= 0.1;
        }
        info->e_dir[0] = estimate_direct(info);
        info->e_rec[0] = estimate_reciprocal(info, x, q, ncharges, fp_out, bVerbose,
                                             seed, &nsamples, cr);

        if (PAR(cr))
        {
            bcast_info(info, cr);
        }


        edir = info->e_dir[0];
        erec = info->e_rec[0];
        derr = edir-erec;
        while (fabs(derr/std::min(erec, edir)) > 1e-4)
        {

            beta                = info->ewald_beta[0];
            beta               -= derr*(info->ewald_beta[0]-beta0)/(derr-derr0);
            beta0               = info->ewald_beta[0];
            info->ewald_beta[0] = beta;
            derr0               = derr;

            info->e_dir[0] = estimate_direct(info);
            info->e_rec[0] = estimate_reciprocal(info, x, q, ncharges, fp_out, bVerbose,
                                                 seed, &nsamples, cr);

            if (PAR(cr))
            {
                bcast_info(info, cr);
            }

            edir = info->e_dir[0];
            erec = info->e_rec[0];
            derr = edir-erec;

            if (MASTER(cr))
            {
                i++;
                fprintf(stderr, "difference between real and rec. space error (step %d): %g\n", i, fabs(derr));
                fprintf(stderr, "old beta: %f\n", beta0);
                fprintf(stderr, "new beta: %f\n", beta);
            }
        }

        info->ewald_rtol[0] = gmx_erfc(info->rcoulomb[0]*info->ewald_beta[0]);

        if (MASTER(cr))
        {
            /* Write some info to log file */
            fflush(fp_out);
            fprintf(fp_out, "=========  After tuning ========\n");
            fprintf(fp_out, "Direct space error est. : %10.3e kJ/(mol*nm)\n", info->e_dir[0]);
            fprintf(fp_out, "Reciprocal sp. err. est.: %10.3e kJ/(mol*nm)\n", info->e_rec[0]);
            fprintf(stderr, "Direct space error est. : %10.3e kJ/(mol*nm)\n", info->e_dir[0]);
            fprintf(stderr, "Reciprocal sp. err. est.: %10.3e kJ/(mol*nm)\n", info->e_rec[0]);
            fprintf(fp_out, "Ewald_rtol              : %g\n", info->ewald_rtol[0]);
            fprintf(fp_out, "Ewald parameter beta    : %g\n", info->ewald_beta[0]);
            fflush(fp_out);

        }

    }

}
Exemple #2
0
int gmx_pme_error(int argc, char *argv[])
{
    const char     *desc[] = {
        "[THISMODULE] estimates the error of the electrostatic forces",
        "if using the sPME algorithm. The flag [TT]-tune[tt] will determine",
        "the splitting parameter such that the error is equally",
        "distributed over the real and reciprocal space part.",
        "The part of the error that stems from self interaction of the particles "
        "is computationally demanding. However, a good a approximation is to",
        "just use a fraction of the particles for this term which can be",
        "indicated by the flag [TT]-self[tt].[PAR]",
    };

    real            fs        = 0.0; /* 0 indicates: not set by the user */
    real            user_beta = -1.0;
    real            fracself  = 1.0;
    t_inputinfo     info;
    t_state         state;     /* The state from the tpr input file */
    gmx_mtop_t      mtop;      /* The topology from the tpr input file */
    t_inputrec     *ir = NULL; /* The inputrec from the tpr file */
    FILE           *fp = NULL;
    t_commrec      *cr;
    unsigned long   PCA_Flags;
    gmx_bool        bTUNE    = FALSE;
    gmx_bool        bVerbose = FALSE;
    int             seed     = 0;


    static t_filenm fnm[] = {
        { efTPR, "-s",     NULL,    ffREAD },
        { efOUT, "-o",    "error",  ffWRITE },
        { efTPR, "-so",   "tuned",  ffOPTWR }
    };

    output_env_t    oenv = NULL;

    t_pargs         pa[] = {
        { "-beta",     FALSE, etREAL, {&user_beta},
          "If positive, overwrite ewald_beta from [TT].tpr[tt] file with this value" },
        { "-tune",     FALSE, etBOOL, {&bTUNE},
          "Tune the splitting parameter such that the error is equally distributed between real and reciprocal space" },
        { "-self",     FALSE, etREAL, {&fracself},
          "If between 0.0 and 1.0, determine self interaction error from just this fraction of the charged particles" },
        { "-seed",     FALSE, etINT,  {&seed},
          "Random number seed used for Monte Carlo algorithm when [TT]-self[tt] is set to a value between 0.0 and 1.0" },
        { "-v",        FALSE, etBOOL, {&bVerbose},
          "Be loud and noisy" }
    };


#define NFILE asize(fnm)

    cr = init_commrec();

    PCA_Flags  = PCA_NOEXIT_ON_ARGS;

    if (!parse_common_args(&argc, argv, PCA_Flags,
                           NFILE, fnm, asize(pa), pa, asize(desc), desc,
                           0, NULL, &oenv))
    {
        return 0;
    }

    if (!bTUNE)
    {
        bTUNE = opt2bSet("-so", NFILE, fnm);
    }

    info.n_entries = 1;

    /* Allocate memory for the inputinfo struct: */
    create_info(&info);
    info.fourier_sp[0] = fs;

    /* Read in the tpr file and open logfile for reading */
    if (MASTER(cr))
    {
        snew(ir, 1);
        read_tpr_file(opt2fn("-s", NFILE, fnm), &info, &state, &mtop, ir, user_beta, fracself);

        fp = fopen(opt2fn("-o", NFILE, fnm), "w");
    }

    /* Check consistency if the user provided fourierspacing */
    if (fs > 0 && MASTER(cr))
    {
        /* Recalculate the grid dimensions using fourierspacing from user input */
        info.nkx[0] = 0;
        info.nky[0] = 0;
        info.nkz[0] = 0;
        calc_grid(stdout, state.box, info.fourier_sp[0], &(info.nkx[0]), &(info.nky[0]), &(info.nkz[0]));
        if ( (ir->nkx != info.nkx[0]) || (ir->nky != info.nky[0]) || (ir->nkz != info.nkz[0]) )
        {
            gmx_fatal(FARGS, "Wrong fourierspacing %f nm, input file grid = %d x %d x %d, computed grid = %d x %d x %d",
                      fs, ir->nkx, ir->nky, ir->nkz, info.nkx[0], info.nky[0], info.nkz[0]);
        }
    }

    /* Estimate (S)PME force error */

    /* Determine the volume of the simulation box */
    if (MASTER(cr))
    {
        info.volume = det(state.box);
        calc_recipbox(state.box, info.recipbox);
        info.natoms = mtop.natoms;
        info.bTUNE  = bTUNE;
    }

    if (PAR(cr))
    {
        bcast_info(&info, cr);
    }

    /* Get an error estimate of the input tpr file and do some tuning if requested */
    estimate_PME_error(&info, &state, &mtop, fp, bVerbose, seed, cr);

    if (MASTER(cr))
    {
        /* Write out optimized tpr file if requested */
        if (opt2bSet("-so", NFILE, fnm) || bTUNE)
        {
            ir->ewald_rtol = info.ewald_rtol[0];
            write_tpx_state(opt2fn("-so", NFILE, fnm), ir, &state, &mtop);
        }
        please_cite(fp, "Wang2010");
        fclose(fp);
    }

    return 0;
}
Exemple #3
0
/* Estimate the reciprocal space part error of the SPME Ewald sum. */
static real estimate_reciprocal(
        t_inputinfo       *info,
        rvec               x[], /* array of particles */
        real               q[], /* array of charges */
        int                nr,  /* number of charges = size of the charge array */
        FILE  gmx_unused  *fp_out,
        gmx_bool           bVerbose,
        unsigned int       seed,     /* The seed for the random number generator */
        int               *nsamples, /* Return the number of samples used if Monte Carlo
                                      * algorithm is used for self energy error estimate */
        t_commrec         *cr)
{
    real     e_rec   = 0; /* reciprocal error estimate */
    real     e_rec1  = 0; /* Error estimate term 1*/
    real     e_rec2  = 0; /* Error estimate term 2*/
    real     e_rec3  = 0; /* Error estimate term 3 */
    real     e_rec3x = 0; /* part of Error estimate term 3 in x */
    real     e_rec3y = 0; /* part of Error estimate term 3 in y */
    real     e_rec3z = 0; /* part of Error estimate term 3 in z */
    int      i, ci;
    int      nx, ny, nz;  /* grid coordinates */
    real     q2_all = 0;  /* sum of squared charges */
    rvec     gridpx;      /* reciprocal grid point in x direction*/
    rvec     gridpxy;     /* reciprocal grid point in x and y direction*/
    rvec     gridp;       /* complete reciprocal grid point in 3 directions*/
    rvec     tmpvec;      /* template to create points from basis vectors */
    rvec     tmpvec2;     /* template to create points from basis vectors */
    real     coeff  = 0;  /* variable to compute coefficients of the error estimate */
    real     coeff2 = 0;  /* variable to compute coefficients of the error estimate */
    real     tmp    = 0;  /* variables to compute different factors from vectors */
    real     tmp1   = 0;
    real     tmp2   = 0;
    gmx_bool bFraction;

    /* Random number generator */
    gmx_rng_t rng     = NULL;
    int      *numbers = NULL;

    /* Index variables for parallel work distribution */
    int startglobal, stopglobal;
    int startlocal, stoplocal;
    int x_per_core;
    int xtot;

#ifdef TAKETIME
    double t0 = 0.0;
    double t1 = 0.0;
#endif

    rng = gmx_rng_init(seed);

    clear_rvec(gridpx);
    clear_rvec(gridpxy);
    clear_rvec(gridp);
    clear_rvec(tmpvec);
    clear_rvec(tmpvec2);

    for (i = 0; i < nr; i++)
    {
        q2_all += q[i]*q[i];
    }

    /* Calculate indices for work distribution */
    startglobal = -info->nkx[0]/2;
    stopglobal  = info->nkx[0]/2;
    xtot        = stopglobal*2+1;
    if (PAR(cr))
    {
        x_per_core = static_cast<int>(ceil(static_cast<real>(xtot) / cr->nnodes));
        startlocal = startglobal + x_per_core*cr->nodeid;
        stoplocal  = startlocal + x_per_core -1;
        if (stoplocal > stopglobal)
        {
            stoplocal = stopglobal;
        }
    }
    else
    {
        startlocal = startglobal;
        stoplocal  = stopglobal;
        x_per_core = xtot;
    }
/*
   #ifdef GMX_LIB_MPI
    MPI_Barrier(MPI_COMM_WORLD);
   #endif
 */

#ifdef GMX_LIB_MPI
#ifdef TAKETIME
    if (MASTER(cr))
    {
        t0 = MPI_Wtime();
    }
#endif
#endif

    if (MASTER(cr))
    {

        fprintf(stderr, "Calculating reciprocal error part 1 ...");

    }

    for (nx = startlocal; nx <= stoplocal; nx++)
    {
        svmul(nx, info->recipbox[XX], gridpx);
        for (ny = -info->nky[0]/2; ny < info->nky[0]/2+1; ny++)
        {
            svmul(ny, info->recipbox[YY], tmpvec);
            rvec_add(gridpx, tmpvec, gridpxy);
            for (nz = -info->nkz[0]/2; nz < info->nkz[0]/2+1; nz++)
            {
                if (0 == nx &&  0 == ny &&  0 == nz)
                {
                    continue;
                }
                svmul(nz, info->recipbox[ZZ], tmpvec);
                rvec_add(gridpxy, tmpvec, gridp);
                tmp    = norm2(gridp);
                coeff  = exp(-1.0 * M_PI * M_PI * tmp / info->ewald_beta[0] / info->ewald_beta[0] );
                coeff /= 2.0 * M_PI * info->volume * tmp;
                coeff2 = tmp;


                tmp  = eps_poly2(nx, info->nkx[0], info->pme_order[0]);
                tmp += eps_poly2(ny, info->nkx[0], info->pme_order[0]);
                tmp += eps_poly2(nz, info->nkx[0], info->pme_order[0]);

                tmp1 = eps_poly1(nx, info->nkx[0], info->pme_order[0]);
                tmp2 = eps_poly1(ny, info->nky[0], info->pme_order[0]);

                tmp += 2.0 * tmp1 * tmp2;

                tmp1 = eps_poly1(nz, info->nkz[0], info->pme_order[0]);
                tmp2 = eps_poly1(ny, info->nky[0], info->pme_order[0]);

                tmp += 2.0 * tmp1 * tmp2;

                tmp1 = eps_poly1(nz, info->nkz[0], info->pme_order[0]);
                tmp2 = eps_poly1(nx, info->nkx[0], info->pme_order[0]);

                tmp += 2.0 * tmp1 * tmp2;

                tmp1  = eps_poly1(nx, info->nkx[0], info->pme_order[0]);
                tmp1 += eps_poly1(ny, info->nky[0], info->pme_order[0]);
                tmp1 += eps_poly1(nz, info->nkz[0], info->pme_order[0]);

                tmp += tmp1 * tmp1;

                e_rec1 += 32.0 * M_PI * M_PI * coeff * coeff * coeff2 * tmp  * q2_all * q2_all / nr;

                tmp1  = eps_poly3(nx, info->nkx[0], info->pme_order[0]);
                tmp1 *= info->nkx[0];
                tmp2  = iprod(gridp, info->recipbox[XX]);

                tmp = tmp1*tmp2;

                tmp1  = eps_poly3(ny, info->nky[0], info->pme_order[0]);
                tmp1 *= info->nky[0];
                tmp2  = iprod(gridp, info->recipbox[YY]);

                tmp += tmp1*tmp2;

                tmp1  = eps_poly3(nz, info->nkz[0], info->pme_order[0]);
                tmp1 *= info->nkz[0];
                tmp2  = iprod(gridp, info->recipbox[ZZ]);

                tmp += tmp1*tmp2;

                tmp *= 4.0 * M_PI;

                tmp1  = eps_poly4(nx, info->nkx[0], info->pme_order[0]);
                tmp1 *= norm2(info->recipbox[XX]);
                tmp1 *= info->nkx[0] * info->nkx[0];

                tmp += tmp1;

                tmp1  = eps_poly4(ny, info->nky[0], info->pme_order[0]);
                tmp1 *= norm2(info->recipbox[YY]);
                tmp1 *= info->nky[0] * info->nky[0];

                tmp += tmp1;

                tmp1  = eps_poly4(nz, info->nkz[0], info->pme_order[0]);
                tmp1 *= norm2(info->recipbox[ZZ]);
                tmp1 *= info->nkz[0] * info->nkz[0];

                tmp += tmp1;

                e_rec2 += 4.0 * coeff * coeff * tmp * q2_all * q2_all / nr;

            }
        }
        if (MASTER(cr))
        {
            fprintf(stderr, "\rCalculating reciprocal error part 1 ... %3.0f%%", 100.0*(nx-startlocal+1)/(x_per_core));
        }

    }

    if (MASTER(cr))
    {
        fprintf(stderr, "\n");
    }

    /* Use just a fraction of all charges to estimate the self energy error term? */
    bFraction =  (info->fracself > 0.0) && (info->fracself < 1.0);

    if (bFraction)
    {
        /* Here xtot is the number of samples taken for the Monte Carlo calculation
         * of the average of term IV of equation 35 in Wang2010. Round up to a
         * number of samples that is divisible by the number of nodes */
        x_per_core  = static_cast<int>(ceil(info->fracself * nr / cr->nnodes));
        xtot        = x_per_core * cr->nnodes;
    }
    else
    {
        /* In this case we use all nr particle positions */
        xtot       = nr;
        x_per_core = static_cast<int>(ceil(static_cast<real>(xtot) / cr->nnodes));
    }

    startlocal = x_per_core *  cr->nodeid;
    stoplocal  = std::min(startlocal + x_per_core, xtot);  /* min needed if xtot == nr */

    if (bFraction)
    {
        /* Make shure we get identical results in serial and parallel. Therefore,
         * take the sample indices from a single, global random number array that
         * is constructed on the master node and that only depends on the seed */
        snew(numbers, xtot);
        if (MASTER(cr))
        {
            for (i = 0; i < xtot; i++)
            {
                numbers[i] = static_cast<int>(floor(gmx_rng_uniform_real(rng) * nr));
            }
        }
        /* Broadcast the random number array to the other nodes */
        if (PAR(cr))
        {
            nblock_bc(cr, xtot, numbers);
        }

        if (bVerbose && MASTER(cr))
        {
            fprintf(stdout, "Using %d sample%s to approximate the self interaction error term",
                    xtot, xtot == 1 ? "" : "s");
            if (PAR(cr))
            {
                fprintf(stdout, " (%d sample%s per rank)", x_per_core, x_per_core == 1 ? "" : "s");
            }
            fprintf(stdout, ".\n");
        }
    }

    /* Return the number of positions used for the Monte Carlo algorithm */
    *nsamples = xtot;

    for (i = startlocal; i < stoplocal; i++)
    {
        e_rec3x = 0;
        e_rec3y = 0;
        e_rec3z = 0;

        if (bFraction)
        {
            /* Randomly pick a charge */
            ci = numbers[i];
        }
        else
        {
            /* Use all charges */
            ci = i;
        }

        /* for(nx=startlocal; nx<=stoplocal; nx++)*/
        for (nx = -info->nkx[0]/2; nx < info->nkx[0]/2+1; nx++)
        {
            svmul(nx, info->recipbox[XX], gridpx);
            for (ny = -info->nky[0]/2; ny < info->nky[0]/2+1; ny++)
            {
                svmul(ny, info->recipbox[YY], tmpvec);
                rvec_add(gridpx, tmpvec, gridpxy);
                for (nz = -info->nkz[0]/2; nz < info->nkz[0]/2+1; nz++)
                {

                    if (0 == nx && 0 == ny && 0 == nz)
                    {
                        continue;
                    }

                    svmul(nz, info->recipbox[ZZ], tmpvec);
                    rvec_add(gridpxy, tmpvec, gridp);
                    tmp      = norm2(gridp);
                    coeff    = exp(-1.0 * M_PI * M_PI * tmp / info->ewald_beta[0] / info->ewald_beta[0] );
                    coeff   /= tmp;
                    e_rec3x += coeff*eps_self(nx, info->nkx[0], info->recipbox[XX], info->pme_order[0], x[ci]);
                    e_rec3y += coeff*eps_self(ny, info->nky[0], info->recipbox[YY], info->pme_order[0], x[ci]);
                    e_rec3z += coeff*eps_self(nz, info->nkz[0], info->recipbox[ZZ], info->pme_order[0], x[ci]);

                }
            }
        }

        clear_rvec(tmpvec2);

        svmul(e_rec3x, info->recipbox[XX], tmpvec);
        rvec_inc(tmpvec2, tmpvec);
        svmul(e_rec3y, info->recipbox[YY], tmpvec);
        rvec_inc(tmpvec2, tmpvec);
        svmul(e_rec3z, info->recipbox[ZZ], tmpvec);
        rvec_inc(tmpvec2, tmpvec);

        e_rec3 += q[ci]*q[ci]*q[ci]*q[ci]*norm2(tmpvec2) / ( xtot * M_PI * info->volume * M_PI * info->volume);
        if (MASTER(cr))
        {
            fprintf(stderr, "\rCalculating reciprocal error part 2 ... %3.0f%%",
                    100.0*(i+1)/stoplocal);

        }
    }

    if (MASTER(cr))
    {
        fprintf(stderr, "\n");
    }

#ifdef GMX_LIB_MPI
#ifdef TAKETIME
    if (MASTER(cr))
    {
        t1 = MPI_Wtime() - t0;
        fprintf(fp_out, "Recip. err. est. took   : %lf s\n", t1);
    }
#endif
#endif

#ifdef DEBUG
    if (PAR(cr))
    {
        fprintf(stderr, "Rank %3d: nx=[%3d...%3d]  e_rec3=%e\n",
                cr->nodeid, startlocal, stoplocal, e_rec3);
    }
#endif

    if (PAR(cr))
    {
        gmx_sum(1, &e_rec1, cr);
        gmx_sum(1, &e_rec2, cr);
        gmx_sum(1, &e_rec3, cr);
    }

    /* e_rec1*=8.0 * q2_all / info->volume / info->volume / nr ;
       e_rec2*=  q2_all / M_PI / M_PI / info->volume / info->volume / nr ;
       e_rec3/= M_PI * M_PI * info->volume * info->volume * nr ;
     */
    e_rec = sqrt(e_rec1+e_rec2+e_rec3);


    return ONE_4PI_EPS0 * e_rec;
}
Exemple #4
0
int main(int argc,char *argv[])
{
  int       mmm[] = { 8, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40,
		      45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 100 };
  int       nnn[] = { 24, 32, 48, 60, 72, 84, 96 };
#define NNN asize(nnn)
  FILE      *fp,*fplog;
  int       *niter;
  int       i,j,n,nit,ntot,n3,rsize;
  double    t,nflop,start;
  double    *rt,*ct;
  t_fftgrid *g;
  t_commrec *cr;
  static gmx_bool bReproducible = FALSE;
  static int  nnode    = 1;
  static int  nitfac  = 1;
  t_pargs pa[] = {
    { "-reproducible",   FALSE, etBOOL, {&bReproducible}, 
      "Request binary reproducible results" },
    { "-np",    FALSE, etINT, {&nnode},
      "Number of NODEs" },
    { "-itfac", FALSE, etINT, {&nitfac},
      "Multiply number of iterations by this" }
  };
  static t_filenm fnm[] = {
    { efLOG, "-g", "fft",      ffWRITE },
    { efXVG, "-o", "fft",      ffWRITE }
  };
#define NFILE asize(fnm)
  
  cr = init_par(&argc,&argv);
  if (MASTER(cr))
    CopyRight(stdout,argv[0]);
  parse_common_args(&argc,argv,
		    PCA_CAN_SET_DEFFNM | (MASTER(cr) ? 0 : PCA_QUIET),
		    NFILE,fnm,asize(pa),pa,0,NULL,0,NULL);
  gmx_log_open(ftp2fn(efLOG,NFILE,fnm),cr,1,0,&fplog);

  snew(niter,NNN);
  snew(ct,NNN);
  snew(rt,NNN);
  rsize = sizeof(real);
  for(i=0; (i<NNN); i++) {
    n  = nnn[i];
    if (n < 16)
      niter[i] = 50;
    else if (n < 26)
      niter[i] = 20;
    else if (n < 51)
      niter[i] = 10;
    else
      niter[i] = 5;
    niter[i] *= nitfac;
    nit = niter[i];
    
    if (MASTER(cr))
      fprintf(stderr,"\r3D FFT (%s precision) %3d^3, niter %3d     ",
	      (rsize == 8) ? "Double" : "Single",n,nit);
    
    g  = mk_fftgrid(n,n,n,NULL,NULL,cr,bReproducible);

    if (PAR(cr))
      start = time(NULL);
    else
      start_time();
    for(j=0; (j<nit); j++) {
      gmxfft3D(g,GMX_FFT_REAL_TO_COMPLEX,cr);
      gmxfft3D(g,GMX_FFT_COMPLEX_TO_REAL,cr);
    }
    if (PAR(cr)) 
      rt[i] = time(NULL)-start;
    else {
      update_time();
      rt[i] = node_time();
    }
    done_fftgrid(g);
    sfree(g);
  }
  if (MASTER(cr)) {
    fprintf(stderr,"\n");
    fp=xvgropen(ftp2fn(efXVG,NFILE,fnm),
		"FFT timings","n^3","t (s)");
    for(i=0; (i<NNN); i++) {
      n3 = 2*niter[i]*nnn[i]*nnn[i]*nnn[i];
      fprintf(fp,"%10d  %10g\n",nnn[i],rt[i]/(2*niter[i]));
    }
    gmx_fio_fclose(fp);
  }
  return 0;
}
Exemple #5
0
int main(int argc,char *argv[])
{
  static char *desc[] = {
    "The [TT]pmetest[tt] program tests the scaling of the PME code. When only given",
    "a [TT].tpr[tt] file it will compute PME for one frame. When given a trajectory",
    "it will do so for all the frames in the trajectory. Before the PME",
    "routine is called the coordinates are sorted along the X-axis.[PAR]",
    "As an extra service to the public the program can also compute",
    "long-range Coulomb energies for components of the system. When the",
    "[TT]-groups[tt] flag is given to the program the energy groups",
    "from the [TT].tpr[tt] file will be read, and half an energy matrix computed."
  };
  t_commrec    *cr,*mcr;
  static t_filenm fnm[] = {
    { efTPX, NULL,      NULL,       ffREAD  },
    { efTRN, "-o",      NULL,       ffWRITE },
    { efLOG, "-g",      "pme",      ffWRITE },
    { efTRX, "-f",      NULL,       ffOPTRD },
    { efXVG, "-x",      "ener-pme", ffWRITE }
  };
#define NFILE asize(fnm)

  /* Command line options ! */
  static gmx_bool bVerbose=FALSE;
  static gmx_bool bOptFFT=FALSE;
  static gmx_bool bSort=FALSE;
  static int  ewald_geometry=eewg3D;
  static int  nnodes=1;
  static int  pme_order=0;
  static rvec grid = { -1, -1, -1 };
  static real rc   = 0.0;
  static real dtol = 0.0;
  static gmx_bool bGroups = FALSE;
  static t_pargs pa[] = {
    { "-np",      FALSE, etINT, {&nnodes},
      "Number of nodes, must be the same as used for [TT]grompp[tt]" },
    { "-v",       FALSE, etBOOL,{&bVerbose},  
      "Be loud and noisy" },
    { "-sort",    FALSE, etBOOL,{&bSort},  
      "Sort coordinates. Crucial for domain decomposition." },
    { "-grid",    FALSE, etRVEC,{&grid},
      "Number of grid cells in X, Y, Z dimension (if -1 use from [TT].tpr[tt])" },
    { "-order",   FALSE, etINT, {&pme_order},
      "Order of the PME spreading algorithm" },
    { "-groups",  FALSE, etBOOL, {&bGroups},
      "Compute half an energy matrix based on the energy groups in your [TT].tpr[tt] file" },
    { "-rc",      FALSE, etREAL, {&rc},
      "Rcoulomb for Ewald summation" },
    { "-tol",     FALSE, etREAL, {&dtol},
      "Tolerance for Ewald summation" }
  };
  FILE        *fp;
  t_inputrec  *ir;
  t_topology  top;
  t_tpxheader tpx;
  t_nrnb      nrnb;
  t_nsborder  *nsb;
  t_forcerec  *fr;
  t_mdatoms   *mdatoms;
  char        title[STRLEN];
  int         natoms,step,status,i,ncg,root;
  real        t,lambda,ewaldcoeff,qtot;
  rvec        *x,*f,*xbuf;
  int         *index;
  gmx_bool        bCont;
  real        *charge,*qbuf,*qqbuf;
  matrix      box;
  
  /* Start the actual parallel code if necessary */
  cr   = init_par(&argc,&argv);
  root = 0;
  
  if (MASTER(cr)) 
    CopyRight(stderr,argv[0]);
  
  /* Parse command line on all processors, arguments are passed on in 
   * init_par (see above)
   */
  parse_common_args(&argc,argv,
		    PCA_KEEP_ARGS | PCA_NOEXIT_ON_ARGS | PCA_BE_NICE |
		    PCA_CAN_SET_DEFFNM | (MASTER(cr) ? 0 : PCA_QUIET),
		    NFILE,fnm,asize(pa),pa,asize(desc),desc,0,NULL);
  
#ifndef GMX_MPI
  if (nnodes > 1) 
    gmx_fatal(FARGS,"GROMACS compiled without MPI support - can't do parallel runs");
#endif

  /* Open log files on all processors */
  open_log(ftp2fn(efLOG,NFILE,fnm),cr);
  snew(ir,1);
  
  if (MASTER(cr)) {
    /* Read tpr file etc. */
    read_tpxheader(ftp2fn(efTPX,NFILE,fnm),&tpx,FALSE,NULL,NULL);
    snew(x,tpx.natoms);
    read_tpx(ftp2fn(efTPX,NFILE,fnm),&step,&t,&lambda,ir,
	     box,&natoms,x,NULL,NULL,&top);
    /* Charges */
    qtot = 0;
    snew(charge,natoms);
    for(i=0; (i<natoms); i++) {
      charge[i] = top.atoms.atom[i].q;
      qtot += charge[i];
    }
  
    /* Grid stuff */
    if (opt2parg_bSet("-grid",asize(pa),pa)) {
      ir->nkx = grid[XX];
      ir->nky = grid[YY];
      ir->nkz = grid[ZZ];
    }
    /* Check command line parameters for consistency */
    if ((ir->nkx <= 0) || (ir->nky <= 0) || (ir->nkz <= 0))
      gmx_fatal(FARGS,"PME grid = %d %d %d",ir->nkx,ir->nky,ir->nkz);
    if (opt2parg_bSet("-rc",asize(pa),pa)) 
      ir->rcoulomb = rc;
    if (ir->rcoulomb <= 0)
      gmx_fatal(FARGS,"rcoulomb should be > 0 (not %f)",ir->rcoulomb);
    if (opt2parg_bSet("-order",asize(pa),pa)) 
      ir->pme_order = pme_order;
    if (ir->pme_order <= 0)
      gmx_fatal(FARGS,"pme_order should be > 0 (not %d)",ir->pme_order);
    if (opt2parg_bSet("-tol",asize(pa),pa))
      ir->ewald_rtol = dtol;
    if (ir->ewald_rtol <= 0)
      gmx_fatal(FARGS,"ewald_tol should be > 0 (not %f)",ir->ewald_rtol);
  }
  else {
    init_top(&top);
  }

  /* Add parallellization code here */
  snew(nsb,1);
  if (MASTER(cr)) {
    ncg = top.blocks[ebCGS].multinr[0];
    for(i=0; (i<cr->nnodes-1); i++)
      top.blocks[ebCGS].multinr[i] = min(ncg,(ncg*(i+1))/cr->nnodes);
    for( ; (i<MAXNODES); i++)
      top.blocks[ebCGS].multinr[i] = ncg;
  }
  if (PAR(cr)) {
    /* Set some variables to zero to avoid core dumps */
    ir->opts.ngtc = ir->opts.ngacc = ir->opts.ngfrz = ir->opts.ngener = 0;
#ifdef GMX_MPI
    /* Distribute the data over processors */
    MPI_Bcast(&natoms,1,MPI_INT,root,MPI_COMM_WORLD);
    MPI_Bcast(ir,sizeof(*ir),MPI_BYTE,root,MPI_COMM_WORLD);
    MPI_Bcast(&qtot,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
#endif

    /* Call some dedicated communication routines, master sends n-1 times */
    if (MASTER(cr)) {
      for(i=1; (i<cr->nnodes); i++) {
	mv_block(i,&(top.blocks[ebCGS]));
	mv_block(i,&(top.atoms.excl));
      }
    }
    else {
      ld_block(root,&(top.blocks[ebCGS]));
      ld_block(root,&(top.atoms.excl));
    }
    if (!MASTER(cr)) {
      snew(charge,natoms);
      snew(x,natoms);
    }
#ifdef GMX_MPI
    MPI_Bcast(charge,natoms,GMX_MPI_REAL,root,MPI_COMM_WORLD);
#endif
  }
  ewaldcoeff = calc_ewaldcoeff(ir->rcoulomb,ir->ewald_rtol);
  
  
  if (bVerbose)
    pr_inputrec(stdlog,0,"Inputrec",ir);

  /* Allocate memory for temp arrays etc. */
  snew(xbuf,natoms);
  snew(f,natoms);
  snew(qbuf,natoms);
  snew(qqbuf,natoms);
  snew(index,natoms);

  /* Initialize the PME code */  
  init_pme(stdlog,cr,ir->nkx,ir->nky,ir->nkz,ir->pme_order,
	   natoms,FALSE,bOptFFT,ewald_geometry);
	   
  /* MFlops accounting */
  init_nrnb(&nrnb);
  
  /* Initialize the work division */
  calc_nsb(stdlog,&(top.blocks[ebCGS]),cr->nnodes,nsb,0);
  nsb->nodeid = cr->nodeid;
  print_nsb(stdlog,"pmetest",nsb);  

  /* Initiate forcerec */
  mdatoms = atoms2md(stdlog,&top.atoms,ir->opts.nFreeze,ir->eI,
		     ir->delta_t,0,ir->opts.tau_t,FALSE,FALSE);
  snew(fr,1);
  init_forcerec(stdlog,fr,ir,&top,cr,mdatoms,nsb,box,FALSE,NULL,NULL,FALSE);
  
  /* First do PME based on coordinates in tpr file, send them to
   * other processors if needed.
   */
  if (MASTER(cr))
    fprintf(stdlog,"-----\n"
	    "Results based on tpr file %s\n",ftp2fn(efTPX,NFILE,fnm));
#ifdef GMX_MPI
  if (PAR(cr)) {
    MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
    MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
    MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
  }
#endif
  do_my_pme(stdlog,0,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,
	    cr,nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,NULL,
	    bGroups ? ir->opts.ngener : 1,mdatoms->cENER);

  /* If we have a trajectry file, we will read the frames in it and compute
   * the PME energy.
   */
  if (ftp2bSet(efTRX,NFILE,fnm)) {
    fprintf(stdlog,"-----\n"
	    "Results based on trx file %s\n",ftp2fn(efTRX,NFILE,fnm));
    if (MASTER(cr)) {
      sfree(x);
      natoms = read_first_x(&status,ftp2fn(efTRX,NFILE,fnm),&t,&x,box); 
      if (natoms != top.atoms.nr)
	gmx_fatal(FARGS,"natoms in trx = %d, in tpr = %d",natoms,top.atoms.nr);
      fp = xvgropen(ftp2fn(efXVG,NFILE,fnm),"PME Energy","Time (ps)","E (kJ/mol)");
    }
    else
      fp = NULL;
    do {
      /* Send coordinates, box and time to the other nodes */
#ifdef GMX_MPI
      if (PAR(cr)) {
	MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
	MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD);
	MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD);
      }
#endif
      rm_pbc(&top.idef,nsb->natoms,box,x,x);
      /* Call the PME wrapper function */
      do_my_pme(stdlog,t,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,cr,
		nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,fp,
		bGroups ? ir->opts.ngener : 1,mdatoms->cENER);
      /* Only the master processor reads more data */
      if (MASTER(cr))
          bCont = read_next_x(status,&t,natoms,x,box);
      /* Check whether we need to continue */
#ifdef GMX_MPI
      if (PAR(cr))
          MPI_Bcast(&bCont,1,MPI_INT,root,MPI_COMM_WORLD);
#endif
      
    } while (bCont);
    
    /* Finish I/O, close files */
    if (MASTER(cr)) {
      close_trx(status);
      ffclose(fp);
    }
  }
  
  if (bVerbose) {
    /* Do some final I/O about performance, might be useful in debugging */
    fprintf(stdlog,"-----\n");
    print_nrnb(stdlog,&nrnb);
  }
  
  /* Finish the parallel stuff */  
  if (gmx_parallel_env_initialized())
    gmx_finalize(cr);

  /* Thank the audience, as usual */
  if (MASTER(cr)) 
    gmx_thanx(stderr);

  return 0;
}
Exemple #6
0
double do_tpi(FILE *fplog, t_commrec *cr,
              int nfile, const t_filenm fnm[],
              const output_env_t oenv, gmx_bool bVerbose, gmx_bool gmx_unused bCompact,
              int gmx_unused nstglobalcomm,
              gmx_vsite_t gmx_unused *vsite, gmx_constr_t gmx_unused constr,
              int gmx_unused stepout,
              t_inputrec *inputrec,
              gmx_mtop_t *top_global, t_fcdata *fcd,
              t_state *state,
              t_mdatoms *mdatoms,
              t_nrnb *nrnb, gmx_wallcycle_t wcycle,
              gmx_edsam_t gmx_unused ed,
              t_forcerec *fr,
              int gmx_unused repl_ex_nst, int gmx_unused repl_ex_nex, int gmx_unused repl_ex_seed,
              gmx_membed_t gmx_unused membed,
              real gmx_unused cpt_period, real gmx_unused max_hours,
              const char gmx_unused *deviceOptions,
              unsigned long gmx_unused Flags,
              gmx_runtime_t *runtime)
{
    const char     *TPI = "Test Particle Insertion";
    gmx_localtop_t *top;
    gmx_groups_t   *groups;
    gmx_enerdata_t *enerd;
    rvec           *f;
    real            lambda, t, temp, beta, drmax, epot;
    double          embU, sum_embU, *sum_UgembU, V, V_all, VembU_all;
    t_trxstatus    *status;
    t_trxframe      rerun_fr;
    gmx_bool        bDispCorr, bCharge, bRFExcl, bNotLastFrame, bStateChanged, bNS, bOurStep;
    tensor          force_vir, shake_vir, vir, pres;
    int             cg_tp, a_tp0, a_tp1, ngid, gid_tp, nener, e;
    rvec           *x_mol;
    rvec            mu_tot, x_init, dx, x_tp;
    int             nnodes, frame, nsteps, step;
    int             i, start, end;
    gmx_rng_t       tpi_rand;
    FILE           *fp_tpi = NULL;
    char           *ptr, *dump_pdb, **leg, str[STRLEN], str2[STRLEN];
    double          dbl, dump_ener;
    gmx_bool        bCavity;
    int             nat_cavity  = 0, d;
    real           *mass_cavity = NULL, mass_tot;
    int             nbin;
    double          invbinw, *bin, refvolshift, logV, bUlogV;
    real            dvdl, prescorr, enercorr, dvdlcorr;
    gmx_bool        bEnergyOutOfBounds;
    const char     *tpid_leg[2] = {"direct", "reweighted"};

    /* Since there is no upper limit to the insertion energies,
     * we need to set an upper limit for the distribution output.
     */
    real bU_bin_limit      = 50;
    real bU_logV_bin_limit = bU_bin_limit + 10;

    nnodes = cr->nnodes;

    top = gmx_mtop_generate_local_top(top_global, inputrec);

    groups = &top_global->groups;

    bCavity = (inputrec->eI == eiTPIC);
    if (bCavity)
    {
        ptr = getenv("GMX_TPIC_MASSES");
        if (ptr == NULL)
        {
            nat_cavity = 1;
        }
        else
        {
            /* Read (multiple) masses from env var GMX_TPIC_MASSES,
             * The center of mass of the last atoms is then used for TPIC.
             */
            nat_cavity = 0;
            while (sscanf(ptr, "%lf%n", &dbl, &i) > 0)
            {
                srenew(mass_cavity, nat_cavity+1);
                mass_cavity[nat_cavity] = dbl;
                fprintf(fplog, "mass[%d] = %f\n",
                        nat_cavity+1, mass_cavity[nat_cavity]);
                nat_cavity++;
                ptr += i;
            }
            if (nat_cavity == 0)
            {
                gmx_fatal(FARGS, "Found %d masses in GMX_TPIC_MASSES", nat_cavity);
            }
        }
    }

    /*
       init_em(fplog,TPI,inputrec,&lambda,nrnb,mu_tot,
       state->box,fr,mdatoms,top,cr,nfile,fnm,NULL,NULL);*/
    /* We never need full pbc for TPI */
    fr->ePBC = epbcXYZ;
    /* Determine the temperature for the Boltzmann weighting */
    temp = inputrec->opts.ref_t[0];
    if (fplog)
    {
        for (i = 1; (i < inputrec->opts.ngtc); i++)
        {
            if (inputrec->opts.ref_t[i] != temp)
            {
                fprintf(fplog, "\nWARNING: The temperatures of the different temperature coupling groups are not identical\n\n");
                fprintf(stderr, "\nWARNING: The temperatures of the different temperature coupling groups are not identical\n\n");
            }
        }
        fprintf(fplog,
                "\n  The temperature for test particle insertion is %.3f K\n\n",
                temp);
    }
    beta = 1.0/(BOLTZ*temp);

    /* Number of insertions per frame */
    nsteps = inputrec->nsteps;

    /* Use the same neighborlist with more insertions points
     * in a sphere of radius drmax around the initial point
     */
    /* This should be a proper mdp parameter */
    drmax = inputrec->rtpi;

    /* An environment variable can be set to dump all configurations
     * to pdb with an insertion energy <= this value.
     */
    dump_pdb  = getenv("GMX_TPI_DUMP");
    dump_ener = 0;
    if (dump_pdb)
    {
        sscanf(dump_pdb, "%lf", &dump_ener);
    }

    atoms2md(top_global, inputrec, 0, NULL, 0, top_global->natoms, mdatoms);
    update_mdatoms(mdatoms, inputrec->fepvals->init_lambda);

    snew(enerd, 1);
    init_enerdata(groups->grps[egcENER].nr, inputrec->fepvals->n_lambda, enerd);
    snew(f, top_global->natoms);

    /* Print to log file  */
    runtime_start(runtime);
    print_date_and_time(fplog, cr->nodeid,
                        "Started Test Particle Insertion", runtime);
    wallcycle_start(wcycle, ewcRUN);

    /* The last charge group is the group to be inserted */
    cg_tp = top->cgs.nr - 1;
    a_tp0 = top->cgs.index[cg_tp];
    a_tp1 = top->cgs.index[cg_tp+1];
    if (debug)
    {
        fprintf(debug, "TPI cg %d, atoms %d-%d\n", cg_tp, a_tp0, a_tp1);
    }
    if (a_tp1 - a_tp0 > 1 &&
            (inputrec->rlist < inputrec->rcoulomb ||
             inputrec->rlist < inputrec->rvdw))
    {
        gmx_fatal(FARGS, "Can not do TPI for multi-atom molecule with a twin-range cut-off");
    }
    snew(x_mol, a_tp1-a_tp0);

    bDispCorr = (inputrec->eDispCorr != edispcNO);
    bCharge   = FALSE;
    for (i = a_tp0; i < a_tp1; i++)
    {
        /* Copy the coordinates of the molecule to be insterted */
        copy_rvec(state->x[i], x_mol[i-a_tp0]);
        /* Check if we need to print electrostatic energies */
        bCharge |= (mdatoms->chargeA[i] != 0 ||
                    (mdatoms->chargeB && mdatoms->chargeB[i] != 0));
    }
    bRFExcl = (bCharge && EEL_RF(fr->eeltype) && fr->eeltype != eelRF_NEC);

    calc_cgcm(fplog, cg_tp, cg_tp+1, &(top->cgs), state->x, fr->cg_cm);
    if (bCavity)
    {
        if (norm(fr->cg_cm[cg_tp]) > 0.5*inputrec->rlist && fplog)
        {
            fprintf(fplog, "WARNING: Your TPI molecule is not centered at 0,0,0\n");
            fprintf(stderr, "WARNING: Your TPI molecule is not centered at 0,0,0\n");
        }
    }
    else
    {
        /* Center the molecule to be inserted at zero */
        for (i = 0; i < a_tp1-a_tp0; i++)
        {
            rvec_dec(x_mol[i], fr->cg_cm[cg_tp]);
        }
    }

    if (fplog)
    {
        fprintf(fplog, "\nWill insert %d atoms %s partial charges\n",
                a_tp1-a_tp0, bCharge ? "with" : "without");

        fprintf(fplog, "\nWill insert %d times in each frame of %s\n",
                nsteps, opt2fn("-rerun", nfile, fnm));
    }

    if (!bCavity)
    {
        if (inputrec->nstlist > 1)
        {
            if (drmax == 0 && a_tp1-a_tp0 == 1)
            {
                gmx_fatal(FARGS, "Re-using the neighborlist %d times for insertions of a single atom in a sphere of radius %f does not make sense", inputrec->nstlist, drmax);
            }
            if (fplog)
            {
                fprintf(fplog, "Will use the same neighborlist for %d insertions in a sphere of radius %f\n", inputrec->nstlist, drmax);
            }
        }
    }
    else
    {
        if (fplog)
        {
            fprintf(fplog, "Will insert randomly in a sphere of radius %f around the center of the cavity\n", drmax);
        }
    }

    ngid   = groups->grps[egcENER].nr;
    gid_tp = GET_CGINFO_GID(fr->cginfo[cg_tp]);
    nener  = 1 + ngid;
    if (bDispCorr)
    {
        nener += 1;
    }
    if (bCharge)
    {
        nener += ngid;
        if (bRFExcl)
        {
            nener += 1;
        }
        if (EEL_FULL(fr->eeltype))
        {
            nener += 1;
        }
    }
    snew(sum_UgembU, nener);

    /* Initialize random generator */
    tpi_rand = gmx_rng_init(inputrec->ld_seed);

    if (MASTER(cr))
    {
        fp_tpi = xvgropen(opt2fn("-tpi", nfile, fnm),
                          "TPI energies", "Time (ps)",
                          "(kJ mol\\S-1\\N) / (nm\\S3\\N)", oenv);
        xvgr_subtitle(fp_tpi, "f. are averages over one frame", oenv);
        snew(leg, 4+nener);
        e = 0;
        sprintf(str, "-kT log(<Ve\\S-\\betaU\\N>/<V>)");
        leg[e++] = strdup(str);
        sprintf(str, "f. -kT log<e\\S-\\betaU\\N>");
        leg[e++] = strdup(str);
        sprintf(str, "f. <e\\S-\\betaU\\N>");
        leg[e++] = strdup(str);
        sprintf(str, "f. V");
        leg[e++] = strdup(str);
        sprintf(str, "f. <Ue\\S-\\betaU\\N>");
        leg[e++] = strdup(str);
        for (i = 0; i < ngid; i++)
        {
            sprintf(str, "f. <U\\sVdW %s\\Ne\\S-\\betaU\\N>",
                    *(groups->grpname[groups->grps[egcENER].nm_ind[i]]));
            leg[e++] = strdup(str);
        }
        if (bDispCorr)
        {
            sprintf(str, "f. <U\\sdisp c\\Ne\\S-\\betaU\\N>");
            leg[e++] = strdup(str);
        }
        if (bCharge)
        {
            for (i = 0; i < ngid; i++)
            {
                sprintf(str, "f. <U\\sCoul %s\\Ne\\S-\\betaU\\N>",
                        *(groups->grpname[groups->grps[egcENER].nm_ind[i]]));
                leg[e++] = strdup(str);
            }
            if (bRFExcl)
            {
                sprintf(str, "f. <U\\sRF excl\\Ne\\S-\\betaU\\N>");
                leg[e++] = strdup(str);
            }
            if (EEL_FULL(fr->eeltype))
            {
                sprintf(str, "f. <U\\sCoul recip\\Ne\\S-\\betaU\\N>");
                leg[e++] = strdup(str);
            }
        }
        xvgr_legend(fp_tpi, 4+nener, (const char**)leg, oenv);
        for (i = 0; i < 4+nener; i++)
        {
            sfree(leg[i]);
        }
        sfree(leg);
    }
    clear_rvec(x_init);
    V_all     = 0;
    VembU_all = 0;

    invbinw = 10;
    nbin    = 10;
    snew(bin, nbin);

    bNotLastFrame = read_first_frame(oenv, &status, opt2fn("-rerun", nfile, fnm),
                                     &rerun_fr, TRX_NEED_X);
    frame = 0;

    if (rerun_fr.natoms - (bCavity ? nat_cavity : 0) !=
            mdatoms->nr - (a_tp1 - a_tp0))
    {
        gmx_fatal(FARGS, "Number of atoms in trajectory (%d)%s "
                  "is not equal the number in the run input file (%d) "
                  "minus the number of atoms to insert (%d)\n",
                  rerun_fr.natoms, bCavity ? " minus one" : "",
                  mdatoms->nr, a_tp1-a_tp0);
    }

    refvolshift = log(det(rerun_fr.box));

#ifdef GMX_X86_SSE2
    /* Make sure we don't detect SSE overflow generated before this point */
    gmx_mm_check_and_reset_overflow();
#endif

    while (bNotLastFrame)
    {
        lambda = rerun_fr.lambda;
        t      = rerun_fr.time;

        sum_embU = 0;
        for (e = 0; e < nener; e++)
        {
            sum_UgembU[e] = 0;
        }

        /* Copy the coordinates from the input trajectory */
        for (i = 0; i < rerun_fr.natoms; i++)
        {
            copy_rvec(rerun_fr.x[i], state->x[i]);
        }
        copy_mat(rerun_fr.box, state->box);

        V    = det(state->box);
        logV = log(V);

        bStateChanged = TRUE;
        bNS           = TRUE;
        for (step = 0; step < nsteps; step++)
        {
            /* In parallel all nodes generate all random configurations.
             * In that way the result is identical to a single cpu tpi run.
             */
            if (!bCavity)
            {
                /* Random insertion in the whole volume */
                bNS = (step % inputrec->nstlist == 0);
                if (bNS)
                {
                    /* Generate a random position in the box */
                    x_init[XX] = gmx_rng_uniform_real(tpi_rand)*state->box[XX][XX];
                    x_init[YY] = gmx_rng_uniform_real(tpi_rand)*state->box[YY][YY];
                    x_init[ZZ] = gmx_rng_uniform_real(tpi_rand)*state->box[ZZ][ZZ];
                }
                if (inputrec->nstlist == 1)
                {
                    copy_rvec(x_init, x_tp);
                }
                else
                {
                    /* Generate coordinates within |dx|=drmax of x_init */
                    do
                    {
                        dx[XX] = (2*gmx_rng_uniform_real(tpi_rand) - 1)*drmax;
                        dx[YY] = (2*gmx_rng_uniform_real(tpi_rand) - 1)*drmax;
                        dx[ZZ] = (2*gmx_rng_uniform_real(tpi_rand) - 1)*drmax;
                    }
                    while (norm2(dx) > drmax*drmax);
                    rvec_add(x_init, dx, x_tp);
                }
            }
            else
            {
                /* Random insertion around a cavity location
                 * given by the last coordinate of the trajectory.
                 */
                if (step == 0)
                {
                    if (nat_cavity == 1)
                    {
                        /* Copy the location of the cavity */
                        copy_rvec(rerun_fr.x[rerun_fr.natoms-1], x_init);
                    }
                    else
                    {
                        /* Determine the center of mass of the last molecule */
                        clear_rvec(x_init);
                        mass_tot = 0;
                        for (i = 0; i < nat_cavity; i++)
                        {
                            for (d = 0; d < DIM; d++)
                            {
                                x_init[d] +=
                                    mass_cavity[i]*rerun_fr.x[rerun_fr.natoms-nat_cavity+i][d];
                            }
                            mass_tot += mass_cavity[i];
                        }
                        for (d = 0; d < DIM; d++)
                        {
                            x_init[d] /= mass_tot;
                        }
                    }
                }
                /* Generate coordinates within |dx|=drmax of x_init */
                do
                {
                    dx[XX] = (2*gmx_rng_uniform_real(tpi_rand) - 1)*drmax;
                    dx[YY] = (2*gmx_rng_uniform_real(tpi_rand) - 1)*drmax;
                    dx[ZZ] = (2*gmx_rng_uniform_real(tpi_rand) - 1)*drmax;
                }
                while (norm2(dx) > drmax*drmax);
                rvec_add(x_init, dx, x_tp);
            }

            if (a_tp1 - a_tp0 == 1)
            {
                /* Insert a single atom, just copy the insertion location */
                copy_rvec(x_tp, state->x[a_tp0]);
            }
            else
            {
                /* Copy the coordinates from the top file */
                for (i = a_tp0; i < a_tp1; i++)
                {
                    copy_rvec(x_mol[i-a_tp0], state->x[i]);
                }
                /* Rotate the molecule randomly */
                rotate_conf(a_tp1-a_tp0, state->x+a_tp0, NULL,
                            2*M_PI*gmx_rng_uniform_real(tpi_rand),
                            2*M_PI*gmx_rng_uniform_real(tpi_rand),
                            2*M_PI*gmx_rng_uniform_real(tpi_rand));
                /* Shift to the insertion location */
                for (i = a_tp0; i < a_tp1; i++)
                {
                    rvec_inc(state->x[i], x_tp);
                }
            }

            /* Check if this insertion belongs to this node */
            bOurStep = TRUE;
            if (PAR(cr))
            {
                switch (inputrec->eI)
                {
                case eiTPI:
                    bOurStep = ((step / inputrec->nstlist) % nnodes == cr->nodeid);
                    break;
                case eiTPIC:
                    bOurStep = (step % nnodes == cr->nodeid);
                    break;
                default:
                    gmx_fatal(FARGS, "Unknown integrator %s", ei_names[inputrec->eI]);
                }
            }
            if (bOurStep)
            {
                /* Clear some matrix variables  */
                clear_mat(force_vir);
                clear_mat(shake_vir);
                clear_mat(vir);
                clear_mat(pres);

                /* Set the charge group center of mass of the test particle */
                copy_rvec(x_init, fr->cg_cm[top->cgs.nr-1]);

                /* Calc energy (no forces) on new positions.
                 * Since we only need the intermolecular energy
                 * and the RF exclusion terms of the inserted molecule occur
                 * within a single charge group we can pass NULL for the graph.
                 * This also avoids shifts that would move charge groups
                 * out of the box.
                 *
                 * Some checks above ensure than we can not have
                 * twin-range interactions together with nstlist > 1,
                 * therefore we do not need to remember the LR energies.
                 */
                /* Make do_force do a single node force calculation */
                cr->nnodes = 1;
                do_force(fplog, cr, inputrec,
                         step, nrnb, wcycle, top, top_global, &top_global->groups,
                         state->box, state->x, &state->hist,
                         f, force_vir, mdatoms, enerd, fcd,
                         state->lambda,
                         NULL, fr, NULL, mu_tot, t, NULL, NULL, FALSE,
                         GMX_FORCE_NONBONDED | GMX_FORCE_ENERGY |
                         (bNS ? GMX_FORCE_DYNAMICBOX | GMX_FORCE_NS | GMX_FORCE_DO_LR : 0) |
                         (bStateChanged ? GMX_FORCE_STATECHANGED : 0));
                cr->nnodes    = nnodes;
                bStateChanged = FALSE;
                bNS           = FALSE;

                /* Calculate long range corrections to pressure and energy */
                calc_dispcorr(fplog, inputrec, fr, step, top_global->natoms, state->box,
                              lambda, pres, vir, &prescorr, &enercorr, &dvdlcorr);
                /* figure out how to rearrange the next 4 lines MRS 8/4/2009 */
                enerd->term[F_DISPCORR]  = enercorr;
                enerd->term[F_EPOT]     += enercorr;
                enerd->term[F_PRES]     += prescorr;
                enerd->term[F_DVDL_VDW] += dvdlcorr;

                epot               = enerd->term[F_EPOT];
                bEnergyOutOfBounds = FALSE;
#ifdef GMX_X86_SSE2
                /* With SSE the energy can overflow, check for this */
                if (gmx_mm_check_and_reset_overflow())
                {
                    if (debug)
                    {
                        fprintf(debug, "Found an SSE overflow, assuming the energy is out of bounds\n");
                    }
                    bEnergyOutOfBounds = TRUE;
                }
#endif
                /* If the compiler doesn't optimize this check away
                 * we catch the NAN energies.
                 * The epot>GMX_REAL_MAX check catches inf values,
                 * which should nicely result in embU=0 through the exp below,
                 * but it does not hurt to check anyhow.
                 */
                /* Non-bonded Interaction usually diverge at r=0.
                 * With tabulated interaction functions the first few entries
                 * should be capped in a consistent fashion between
                 * repulsion, dispersion and Coulomb to avoid accidental
                 * negative values in the total energy.
                 * The table generation code in tables.c does this.
                 * With user tbales the user should take care of this.
                 */
                if (epot != epot || epot > GMX_REAL_MAX)
                {
                    bEnergyOutOfBounds = TRUE;
                }
                if (bEnergyOutOfBounds)
                {
                    if (debug)
                    {
                        fprintf(debug, "\n  time %.3f, step %d: non-finite energy %f, using exp(-bU)=0\n", t, step, epot);
                    }
                    embU = 0;
                }
                else
                {
                    embU      = exp(-beta*epot);
                    sum_embU += embU;
                    /* Determine the weighted energy contributions of each energy group */
                    e                = 0;
                    sum_UgembU[e++] += epot*embU;
                    if (fr->bBHAM)
                    {
                        for (i = 0; i < ngid; i++)
                        {
                            sum_UgembU[e++] +=
                                (enerd->grpp.ener[egBHAMSR][GID(i, gid_tp, ngid)] +
                                 enerd->grpp.ener[egBHAMLR][GID(i, gid_tp, ngid)])*embU;
                        }
                    }
                    else
                    {
                        for (i = 0; i < ngid; i++)
                        {
                            sum_UgembU[e++] +=
                                (enerd->grpp.ener[egLJSR][GID(i, gid_tp, ngid)] +
                                 enerd->grpp.ener[egLJLR][GID(i, gid_tp, ngid)])*embU;
                        }
                    }
                    if (bDispCorr)
                    {
                        sum_UgembU[e++] += enerd->term[F_DISPCORR]*embU;
                    }
                    if (bCharge)
                    {
                        for (i = 0; i < ngid; i++)
                        {
                            sum_UgembU[e++] +=
                                (enerd->grpp.ener[egCOULSR][GID(i, gid_tp, ngid)] +
                                 enerd->grpp.ener[egCOULLR][GID(i, gid_tp, ngid)])*embU;
                        }
                        if (bRFExcl)
                        {
                            sum_UgembU[e++] += enerd->term[F_RF_EXCL]*embU;
                        }
                        if (EEL_FULL(fr->eeltype))
                        {
                            sum_UgembU[e++] += enerd->term[F_COUL_RECIP]*embU;
                        }
                    }
                }

                if (embU == 0 || beta*epot > bU_bin_limit)
                {
                    bin[0]++;
                }
                else
                {
                    i = (int)((bU_logV_bin_limit
                               - (beta*epot - logV + refvolshift))*invbinw
                              + 0.5);
                    if (i < 0)
                    {
                        i = 0;
                    }
                    if (i >= nbin)
                    {
                        realloc_bins(&bin, &nbin, i+10);
                    }
                    bin[i]++;
                }

                if (debug)
                {
                    fprintf(debug, "TPI %7d %12.5e %12.5f %12.5f %12.5f\n",
                            step, epot, x_tp[XX], x_tp[YY], x_tp[ZZ]);
                }

                if (dump_pdb && epot <= dump_ener)
                {
                    sprintf(str, "t%g_step%d.pdb", t, step);
                    sprintf(str2, "t: %f step %d ener: %f", t, step, epot);
                    write_sto_conf_mtop(str, str2, top_global, state->x, state->v,
                                        inputrec->ePBC, state->box);
                }
            }
        }

        if (PAR(cr))
        {
            /* When running in parallel sum the energies over the processes */
            gmx_sumd(1,    &sum_embU, cr);
            gmx_sumd(nener, sum_UgembU, cr);
        }

        frame++;
        V_all     += V;
        VembU_all += V*sum_embU/nsteps;

        if (fp_tpi)
        {
            if (bVerbose || frame%10 == 0 || frame < 10)
            {
                fprintf(stderr, "mu %10.3e <mu> %10.3e\n",
                        -log(sum_embU/nsteps)/beta, -log(VembU_all/V_all)/beta);
            }

            fprintf(fp_tpi, "%10.3f %12.5e %12.5e %12.5e %12.5e",
                    t,
                    VembU_all == 0 ? 20/beta : -log(VembU_all/V_all)/beta,
                    sum_embU == 0  ? 20/beta : -log(sum_embU/nsteps)/beta,
                    sum_embU/nsteps, V);
            for (e = 0; e < nener; e++)
            {
                fprintf(fp_tpi, " %12.5e", sum_UgembU[e]/nsteps);
            }
            fprintf(fp_tpi, "\n");
            fflush(fp_tpi);
        }

        bNotLastFrame = read_next_frame(oenv, status, &rerun_fr);
    } /* End of the loop  */
    runtime_end(runtime);

    close_trj(status);

    if (fp_tpi != NULL)
    {
        gmx_fio_fclose(fp_tpi);
    }

    if (fplog != NULL)
    {
        fprintf(fplog, "\n");
        fprintf(fplog, "  <V>  = %12.5e nm^3\n", V_all/frame);
        fprintf(fplog, "  <mu> = %12.5e kJ/mol\n", -log(VembU_all/V_all)/beta);
    }

    /* Write the Boltzmann factor histogram */
    if (PAR(cr))
    {
        /* When running in parallel sum the bins over the processes */
        i = nbin;
        global_max(cr, &i);
        realloc_bins(&bin, &nbin, i);
        gmx_sumd(nbin, bin, cr);
    }
    if (MASTER(cr))
    {
        fp_tpi = xvgropen(opt2fn("-tpid", nfile, fnm),
                          "TPI energy distribution",
                          "\\betaU - log(V/<V>)", "count", oenv);
        sprintf(str, "number \\betaU > %g: %9.3e", bU_bin_limit, bin[0]);
        xvgr_subtitle(fp_tpi, str, oenv);
        xvgr_legend(fp_tpi, 2, (const char **)tpid_leg, oenv);
        for (i = nbin-1; i > 0; i--)
        {
            bUlogV = -i/invbinw + bU_logV_bin_limit - refvolshift + log(V_all/frame);
            fprintf(fp_tpi, "%6.2f %10d %12.5e\n",
                    bUlogV,
                    (int)(bin[i]+0.5),
                    bin[i]*exp(-bUlogV)*V_all/VembU_all);
        }
        gmx_fio_fclose(fp_tpi);
    }
    sfree(bin);

    sfree(sum_UgembU);

    runtime->nsteps_done = frame*inputrec->nsteps;

    return 0;
}
Exemple #7
0
void init_multisystem(t_commrec *cr, int nsim, char **multidirs,
                      int nfile, const t_filenm fnm[], gmx_bool bParFn)
{
    gmx_multisim_t *ms;
    int             nnodes, nnodpersim, sim, i, ftp;
    char            buf[256];
#ifdef GMX_MPI
    MPI_Group       mpi_group_world;
#endif
    int            *rank;

#ifndef GMX_MPI
    if (nsim > 1)
    {
        gmx_fatal(FARGS, "This binary is compiled without MPI support, can not do multiple simulations.");
    }
#endif

    nnodes  = cr->nnodes;
    if (nnodes % nsim != 0)
    {
        gmx_fatal(FARGS, "The number of nodes (%d) is not a multiple of the number of simulations (%d)", nnodes, nsim);
    }

    nnodpersim = nnodes/nsim;
    sim        = cr->nodeid/nnodpersim;

    if (debug)
    {
        fprintf(debug, "We have %d simulations, %d nodes per simulation, local simulation is %d\n", nsim, nnodpersim, sim);
    }

    snew(ms, 1);
    cr->ms   = ms;
    ms->nsim = nsim;
    ms->sim  = sim;
#ifdef GMX_MPI
    /* Create a communicator for the master nodes */
    snew(rank, ms->nsim);
    for (i = 0; i < ms->nsim; i++)
    {
        rank[i] = i*nnodpersim;
    }
    MPI_Comm_group(MPI_COMM_WORLD, &mpi_group_world);
    MPI_Group_incl(mpi_group_world, nsim, rank, &ms->mpi_group_masters);
    sfree(rank);
    MPI_Comm_create(MPI_COMM_WORLD, ms->mpi_group_masters,
                    &ms->mpi_comm_masters);

#if !defined(GMX_THREAD_MPI) && !defined(MPI_IN_PLACE_EXISTS)
    /* initialize the MPI_IN_PLACE replacement buffers */
    snew(ms->mpb, 1);
    ms->mpb->ibuf        = NULL;
    ms->mpb->libuf       = NULL;
    ms->mpb->fbuf        = NULL;
    ms->mpb->dbuf        = NULL;
    ms->mpb->ibuf_alloc  = 0;
    ms->mpb->libuf_alloc = 0;
    ms->mpb->fbuf_alloc  = 0;
    ms->mpb->dbuf_alloc  = 0;
#endif

#endif

    /* Reduce the intra-simulation communication */
    cr->sim_nodeid = cr->nodeid % nnodpersim;
    cr->nnodes     = nnodpersim;
#ifdef GMX_MPI
    MPI_Comm_split(MPI_COMM_WORLD, sim, cr->sim_nodeid, &cr->mpi_comm_mysim);
    cr->mpi_comm_mygroup = cr->mpi_comm_mysim;
    cr->nodeid           = cr->sim_nodeid;
#endif

    if (debug)
    {
        fprintf(debug, "This is simulation %d", cr->ms->sim);
        if (PAR(cr))
        {
            fprintf(debug, ", local number of nodes %d, local nodeid %d",
                    cr->nnodes, cr->sim_nodeid);
        }
        fprintf(debug, "\n\n");
    }

    if (multidirs)
    {
        int ret;
        if (debug)
        {
            fprintf(debug, "Changing to directory %s\n", multidirs[cr->ms->sim]);
        }
        if (chdir(multidirs[cr->ms->sim]) != 0)
        {
            gmx_fatal(FARGS, "Couldn't change directory to %s: %s",
                      multidirs[cr->ms->sim],
                      strerror(errno));
        }
    }
    else if (bParFn)
    {
        /* Patch output and tpx, cpt and rerun input file names */
        for (i = 0; (i < nfile); i++)
        {
            /* Because of possible multiple extensions per type we must look
             * at the actual file name
             */
            if (is_output(&fnm[i]) ||
                    fnm[i].ftp == efTPX || fnm[i].ftp == efCPT ||
                    strcmp(fnm[i].opt, "-rerun") == 0)
            {
                ftp = fn2ftp(fnm[i].fns[0]);
                par_fn(fnm[i].fns[0], ftp, cr, TRUE, FALSE, buf, 255);
                sfree(fnm[i].fns[0]);
                fnm[i].fns[0] = gmx_strdup(buf);
            }
        }
    }
}
Exemple #8
0
void init_disres(FILE *fplog, const gmx_mtop_t *mtop,
                 t_inputrec *ir, const t_commrec *cr,
                 t_fcdata *fcd, t_state *state, gmx_bool bIsREMD)
{
    int                  fa, nmol, npair, np;
    t_disresdata        *dd;
    history_t           *hist;
    gmx_mtop_ilistloop_t iloop;
    t_ilist             *il;
    char                *ptr;

    dd = &(fcd->disres);

    if (gmx_mtop_ftype_count(mtop, F_DISRES) == 0)
    {
        dd->nres = 0;

        return;
    }

    if (fplog)
    {
        fprintf(fplog, "Initializing the distance restraints\n");
    }


    if (ir->eDisre == edrEnsemble)
    {
        gmx_fatal(FARGS, "Sorry, distance restraints with ensemble averaging over multiple molecules in one system are not functional in this version of GROMACS");
    }

    dd->dr_weighting = ir->eDisreWeighting;
    dd->dr_fc        = ir->dr_fc;
    if (EI_DYNAMICS(ir->eI))
    {
        dd->dr_tau   = ir->dr_tau;
    }
    else
    {
        dd->dr_tau   = 0.0;
    }
    if (dd->dr_tau == 0.0)
    {
        dd->dr_bMixed = FALSE;
        dd->ETerm     = 0.0;
    }
    else
    {
        dd->dr_bMixed = ir->bDisreMixed;
        dd->ETerm     = std::exp(-(ir->delta_t/ir->dr_tau));
    }
    dd->ETerm1        = 1.0 - dd->ETerm;

    dd->nres  = 0;
    dd->npair = 0;
    iloop     = gmx_mtop_ilistloop_init(mtop);
    while (gmx_mtop_ilistloop_next(iloop, &il, &nmol))
    {
        np = 0;
        for (fa = 0; fa < il[F_DISRES].nr; fa += 3)
        {
            np++;
            npair = mtop->ffparams.iparams[il[F_DISRES].iatoms[fa]].disres.npair;
            if (np == npair)
            {
                dd->nres  += (ir->eDisre == edrEnsemble ? 1 : nmol)*npair;
                dd->npair += nmol*npair;
                np         = 0;
            }
        }
    }

    if (cr && PAR(cr))
    {
        /* Temporary check, will be removed when disre is implemented with DD */
        const char *notestr = "NOTE: atoms involved in distance restraints should be within the same domain. If this is not the case mdrun generates a fatal error. If you encounter this, use a single MPI rank (Verlet+OpenMP+GPUs work fine).";

        if (MASTER(cr))
        {
            fprintf(stderr, "\n%s\n\n", notestr);
        }
        if (fplog)
        {
            fprintf(fplog, "%s\n", notestr);
        }

        if (dd->dr_tau != 0 || ir->eDisre == edrEnsemble || cr->ms != NULL ||
            dd->nres != dd->npair)
        {
            gmx_fatal(FARGS, "Time or ensemble averaged or multiple pair distance restraints do not work (yet) with domain decomposition, use a single MPI rank%s", cr->ms ? " per simulation" : "");
        }
        if (ir->nstdisreout != 0)
        {
            if (fplog)
            {
                fprintf(fplog, "\nWARNING: Can not write distance restraint data to energy file with domain decomposition\n\n");
            }
            if (MASTER(cr))
            {
                fprintf(stderr, "\nWARNING: Can not write distance restraint data to energy file with domain decomposition\n");
            }
            ir->nstdisreout = 0;
        }
    }

    snew(dd->rt, dd->npair);

    if (dd->dr_tau != 0.0)
    {
        hist = &state->hist;
        /* Set the "history lack" factor to 1 */
        state->flags     |= (1<<estDISRE_INITF);
        hist->disre_initf = 1.0;
        /* Allocate space for the r^-3 time averages */
        state->flags     |= (1<<estDISRE_RM3TAV);
        hist->ndisrepairs = dd->npair;
        snew(hist->disre_rm3tav, hist->ndisrepairs);
    }
    /* Allocate space for a copy of rm3tav,
     * so we can call do_force without modifying the state.
     */
    snew(dd->rm3tav, dd->npair);

    /* Allocate Rt_6 and Rtav_6 consecutively in memory so they can be
     * averaged over the processors in one call (in calc_disre_R_6)
     */
    snew(dd->Rt_6, 2*dd->nres);
    dd->Rtav_6 = &(dd->Rt_6[dd->nres]);

    ptr = getenv("GMX_DISRE_ENSEMBLE_SIZE");
    if (cr && cr->ms != NULL && ptr != NULL && !bIsREMD)
    {
#if GMX_MPI
        dd->nsystems = 0;
        sscanf(ptr, "%d", &dd->nsystems);
        if (fplog)
        {
            fprintf(fplog, "Found GMX_DISRE_ENSEMBLE_SIZE set to %d systems per ensemble\n", dd->nsystems);
        }
        /* This check is only valid on MASTER(cr), so probably
         * ensemble-averaged distance restraints are broken on more
         * than one processor per simulation system. */
        if (MASTER(cr))
        {
            check_multi_int(fplog, cr->ms, dd->nsystems,
                            "the number of systems per ensemble",
                            FALSE);
        }
        gmx_bcast_sim(sizeof(int), &dd->nsystems, cr);

        /* We use to allow any value of nsystems which was a divisor
         * of ms->nsim. But this required an extra communicator which
         * was stored in t_fcdata. This pulled in mpi.h in nearly all C files.
         */
        if (!(cr->ms->nsim == 1 || cr->ms->nsim == dd->nsystems))
        {
            gmx_fatal(FARGS, "GMX_DISRE_ENSEMBLE_SIZE (%d) is not equal to 1 or the number of systems (option -multi) %d", dd->nsystems, cr->ms->nsim);
        }
        if (fplog)
        {
            fprintf(fplog, "Our ensemble consists of systems:");
            for (int i = 0; i < dd->nsystems; i++)
            {
                fprintf(fplog, " %d",
                        (cr->ms->sim/dd->nsystems)*dd->nsystems+i);
            }
            fprintf(fplog, "\n");
        }
        snew(dd->Rtl_6, dd->nres);
#endif
    }
    else
    {
        dd->nsystems = 1;
        dd->Rtl_6    = dd->Rt_6;
    }

    if (dd->npair > 0)
    {
        if (fplog)
        {
            fprintf(fplog, "There are %d distance restraints involving %d atom pairs\n", dd->nres, dd->npair);
        }
        /* Have to avoid g_disre de-referencing cr blindly, mdrun not
         * doing consistency checks for ensemble-averaged distance
         * restraints when that's not happening, and only doing those
         * checks from appropriate processes (since check_multi_int is
         * too broken to check whether the communication will
         * succeed...) */
        if (cr && cr->ms && dd->nsystems > 1 && MASTER(cr))
        {
            check_multi_int(fplog, cr->ms, fcd->disres.nres,
                            "the number of distance restraints",
                            FALSE);
        }
        please_cite(fplog, "Tropp80a");
        please_cite(fplog, "Torda89a");
    }
}
Exemple #9
0
/* TODO Specialize this routine into init-time and loop-time versions?
   e.g. bReadEkin is only true when restoring from checkpoint */
void compute_globals(FILE *fplog, gmx_global_stat *gstat, t_commrec *cr, t_inputrec *ir,
                     t_forcerec *fr, gmx_ekindata_t *ekind,
                     t_state *state, t_mdatoms *mdatoms,
                     t_nrnb *nrnb, t_vcm *vcm, gmx_wallcycle_t wcycle,
                     gmx_enerdata_t *enerd, tensor force_vir, tensor shake_vir, tensor total_vir,
                     tensor pres, rvec mu_tot, gmx_constr_t constr,
                     gmx::SimulationSignaller *signalCoordinator,
                     matrix box, int *totalNumberOfBondedInteractions,
                     gmx_bool *bSumEkinhOld, int flags)
{
    tensor   corr_vir, corr_pres;
    gmx_bool bEner, bPres, bTemp;
    gmx_bool bStopCM, bGStat,
             bReadEkin, bEkinAveVel, bScaleEkin, bConstrain;
    real     prescorr, enercorr, dvdlcorr, dvdl_ekin;

    /* translate CGLO flags to gmx_booleans */
    bStopCM       = flags & CGLO_STOPCM;
    bGStat        = flags & CGLO_GSTAT;
    bReadEkin     = (flags & CGLO_READEKIN);
    bScaleEkin    = (flags & CGLO_SCALEEKIN);
    bEner         = flags & CGLO_ENERGY;
    bTemp         = flags & CGLO_TEMPERATURE;
    bPres         = (flags & CGLO_PRESSURE);
    bConstrain    = (flags & CGLO_CONSTRAINT);

    /* we calculate a full state kinetic energy either with full-step velocity verlet
       or half step where we need the pressure */

    bEkinAveVel = (ir->eI == eiVV || (ir->eI == eiVVAK && bPres) || bReadEkin);

    /* in initalization, it sums the shake virial in vv, and to
       sums ekinh_old in leapfrog (or if we are calculating ekinh_old) for other reasons */

    /* ########## Kinetic energy  ############## */

    if (bTemp)
    {
        /* Non-equilibrium MD: this is parallellized, but only does communication
         * when there really is NEMD.
         */

        if (PAR(cr) && (ekind->bNEMD))
        {
            accumulate_u(cr, &(ir->opts), ekind);
        }
        if (!bReadEkin)
        {
            calc_ke_part(state, &(ir->opts), mdatoms, ekind, nrnb, bEkinAveVel);
        }
    }

    /* Calculate center of mass velocity if necessary, also parallellized */
    if (bStopCM)
    {
        calc_vcm_grp(0, mdatoms->homenr, mdatoms,
                     as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), vcm);
    }

    if (bTemp || bStopCM || bPres || bEner || bConstrain)
    {
        if (!bGStat)
        {
            /* We will not sum ekinh_old,
             * so signal that we still have to do it.
             */
            *bSumEkinhOld = TRUE;

        }
        else
        {
            gmx::ArrayRef<real> signalBuffer = signalCoordinator->getCommunicationBuffer();
            if (PAR(cr))
            {
                wallcycle_start(wcycle, ewcMoveE);
                global_stat(gstat, cr, enerd, force_vir, shake_vir, mu_tot,
                            ir, ekind, constr, bStopCM ? vcm : NULL,
                            signalBuffer.size(), signalBuffer.data(),
                            totalNumberOfBondedInteractions,
                            *bSumEkinhOld, flags);
                wallcycle_stop(wcycle, ewcMoveE);
            }
            signalCoordinator->finalizeSignals();
            *bSumEkinhOld = FALSE;
        }
    }

    if (!ekind->bNEMD && debug && bTemp && (vcm->nr > 0))
    {
        correct_ekin(debug,
                     0, mdatoms->homenr,
                     as_rvec_array(state->v.data()), vcm->group_p[0],
                     mdatoms->massT, mdatoms->tmass, ekind->ekin);
    }

    /* Do center of mass motion removal */
    if (bStopCM)
    {
        check_cm_grp(fplog, vcm, ir, 1);
        do_stopcm_grp(0, mdatoms->homenr, mdatoms->cVCM,
                      as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), vcm);
        inc_nrnb(nrnb, eNR_STOPCM, mdatoms->homenr);
    }

    if (bEner)
    {
        /* Calculate the amplitude of the cosine velocity profile */
        ekind->cosacc.vcos = ekind->cosacc.mvcos/mdatoms->tmass;
    }

    if (bTemp)
    {
        /* Sum the kinetic energies of the groups & calc temp */
        /* compute full step kinetic energies if vv, or if vv-avek and we are computing the pressure with inputrecNptTrotter */
        /* three maincase:  VV with AveVel (md-vv), vv with AveEkin (md-vv-avek), leap with AveEkin (md).
           Leap with AveVel is not supported; it's not clear that it will actually work.
           bEkinAveVel: If TRUE, we simply multiply ekin by ekinscale to get a full step kinetic energy.
           If FALSE, we average ekinh_old and ekinh*ekinscale_nhc to get an averaged half step kinetic energy.
         */
        enerd->term[F_TEMP] = sum_ekin(&(ir->opts), ekind, &dvdl_ekin,
                                       bEkinAveVel, bScaleEkin);
        enerd->dvdl_lin[efptMASS] = (double) dvdl_ekin;

        enerd->term[F_EKIN] = trace(ekind->ekin);
    }

    /* ##########  Long range energy information ###### */

    if (bEner || bPres || bConstrain)
    {
        calc_dispcorr(ir, fr, box, state->lambda[efptVDW],
                      corr_pres, corr_vir, &prescorr, &enercorr, &dvdlcorr);
    }

    if (bEner)
    {
        enerd->term[F_DISPCORR]  = enercorr;
        enerd->term[F_EPOT]     += enercorr;
        enerd->term[F_DVDL_VDW] += dvdlcorr;
    }

    /* ########## Now pressure ############## */
    if (bPres || bConstrain)
    {

        m_add(force_vir, shake_vir, total_vir);

        /* Calculate pressure and apply LR correction if PPPM is used.
         * Use the box from last timestep since we already called update().
         */

        enerd->term[F_PRES] = calc_pres(fr->ePBC, ir->nwall, box, ekind->ekin, total_vir, pres);

        /* Calculate long range corrections to pressure and energy */
        /* this adds to enerd->term[F_PRES] and enerd->term[F_ETOT],
           and computes enerd->term[F_DISPCORR].  Also modifies the
           total_vir and pres tesors */

        m_add(total_vir, corr_vir, total_vir);
        m_add(pres, corr_pres, pres);
        enerd->term[F_PDISPCORR] = prescorr;
        enerd->term[F_PRES]     += prescorr;
    }
}
Exemple #10
0
/* Assemble the positions of the group such that every node has all of them.
 * The atom indices are retrieved from anrs_loc[0..nr_loc]
 * Note that coll_ind[i] = i is needed in the serial case */
extern void communicate_group_positions(
        t_commrec     *cr,           /* Pointer to MPI communication data */
        rvec          *xcoll,        /* Collective array of positions */
        ivec          *shifts,       /* Collective array of shifts for xcoll (can be NULL) */
        ivec          *extra_shifts, /* (optional) Extra shifts since last time step */
        const gmx_bool bNS,          /* (optional) NS step, the shifts have changed */
        rvec          *x_loc,        /* Local positions on this node */
        const int      nr,           /* Total number of atoms in the group */
        const int      nr_loc,       /* Local number of atoms in the group */
        int           *anrs_loc,     /* Local atom numbers */
        int           *coll_ind,     /* Collective index */
        rvec          *xcoll_old,    /* (optional) Positions from the last time step,
                                        used to make group whole */
        matrix         box)          /* (optional) The box */
{
    int i;


    /* Zero out the groups' global position array */
    clear_rvecs(nr, xcoll);

    /* Put the local positions that this node has into the right place of
     * the collective array. Note that in the serial case, coll_ind[i] = i */
    for (i = 0; i < nr_loc; i++)
    {
        copy_rvec(x_loc[anrs_loc[i]], xcoll[coll_ind[i]]);
    }

    if (PAR(cr))
    {
        /* Add the arrays from all nodes together */
        gmx_sum(nr*3, xcoll[0], cr);
    }
    /* Now we have all the positions of the group in the xcoll array present on all
     * nodes.
     *
     * The rest of the code is for making the group whole again in case atoms changed
     * their PBC representation / crossed a box boundary. We only do that if the
     * shifts array is allocated. */
    if (NULL != shifts)
    {
        /* To make the group whole, start with a whole group and each
         * step move the assembled positions at closest distance to the positions
         * from the last step. First shift the positions with the saved shift
         * vectors (these are 0 when this routine is called for the first time!) */
        shift_positions_group(box, xcoll, shifts, nr);

        /* Now check if some shifts changed since the last step.
         * This only needs to be done when the shifts are expected to have changed,
         * i.e. after neighbor searching */
        if (bNS)
        {
            get_shifts_group(3, box, xcoll, nr, xcoll_old, extra_shifts);

            /* Shift with the additional shifts such that we get a whole group now */
            shift_positions_group(box, xcoll, extra_shifts, nr);

            /* Add the shift vectors together for the next time step */
            for (i = 0; i < nr; i++)
            {
                shifts[i][XX] += extra_shifts[i][XX];
                shifts[i][YY] += extra_shifts[i][YY];
                shifts[i][ZZ] += extra_shifts[i][ZZ];
            }

            /* Store current correctly-shifted positions for comparison in the next NS time step */
            for (i = 0; i < nr; i++)
            {
                copy_rvec(xcoll[i], xcoll_old[i]);
            }
        }
    }
}