コード例 #1
0
ファイル: pme-pp.cpp プロジェクト: alfredog/gromacs
void gmx_pme_send_parameters(t_commrec *cr,
                             const interaction_const_t *ic,
                             gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
                             real *chargeA, real *chargeB,
                             real *sqrt_c6A, real *sqrt_c6B,
                             real *sigmaA, real *sigmaB,
                             int maxshift_x, int maxshift_y)
{
    int flags;

    flags = 0;
    if (EEL_PME(ic->eeltype))
    {
        flags |= PP_PME_CHARGE;
    }
    if (EVDW_PME(ic->vdwtype))
    {
        flags |= (PP_PME_SQRTC6 | PP_PME_SIGMA);
    }
    if (bFreeEnergy_q || bFreeEnergy_lj)
    {
        /* Assumes that the B state flags are in the bits just above
         * the ones for the A state. */
        flags |= (flags << 1);
    }

    gmx_pme_send_coeffs_coords(cr, flags,
                               chargeA, chargeB,
                               sqrt_c6A, sqrt_c6B, sigmaA, sigmaB,
                               NULL, NULL, 0, 0, maxshift_x, maxshift_y, -1);
}
コード例 #2
0
void calc_verlet_buffer_size(const gmx_mtop_t *mtop, real boxvol,
                             const t_inputrec *ir,
                             real reference_temperature,
                             const verletbuf_list_setup_t *list_setup,
                             int *n_nonlin_vsite,
                             real *rlist)
{
    double                resolution;
    char                 *env;

    real                  particle_distance;
    real                  nb_clust_frac_pairs_not_in_list_at_cutoff;

    verletbuf_atomtype_t *att  = NULL;
    int                   natt = -1, i;
    double                reppow;
    real                  md1_ljd, d2_ljd, md3_ljd;
    real                  md1_ljr, d2_ljr, md3_ljr;
    real                  md1_el,  d2_el;
    real                  elfac;
    real                  kT_fac, mass_min;
    int                   ib0, ib1, ib;
    real                  rb, rl;
    real                  drift;

    if (reference_temperature < 0)
    {
        if (EI_MD(ir->eI) && ir->etc == etcNO)
        {
            /* This case should be handled outside calc_verlet_buffer_size */
            gmx_incons("calc_verlet_buffer_size called with an NVE ensemble and reference_temperature < 0");
        }

        /* We use the maximum temperature with multiple T-coupl groups.
         * We could use a per particle temperature, but since particles
         * interact, this might underestimate the buffer size.
         */
        reference_temperature = 0;
        for (i = 0; i < ir->opts.ngtc; i++)
        {
            if (ir->opts.tau_t[i] >= 0)
            {
                reference_temperature = max(reference_temperature,
                                            ir->opts.ref_t[i]);
            }
        }
    }

    /* Resolution of the buffer size */
    resolution = 0.001;

    env = getenv("GMX_VERLET_BUFFER_RES");
    if (env != NULL)
    {
        sscanf(env, "%lf", &resolution);
    }

    /* In an atom wise pair-list there would be no pairs in the list
     * beyond the pair-list cut-off.
     * However, we use a pair-list of groups vs groups of atoms.
     * For groups of 4 atoms, the parallelism of SSE instructions, only
     * 10% of the atoms pairs are not in the list just beyond the cut-off.
     * As this percentage increases slowly compared to the decrease of the
     * Gaussian displacement distribution over this range, we can simply
     * reduce the drift by this fraction.
     * For larger groups, e.g. of 8 atoms, this fraction will be lower,
     * so then buffer size will be on the conservative (large) side.
     *
     * Note that the formulas used here do not take into account
     * cancellation of errors which could occur by missing both
     * attractive and repulsive interactions.
     *
     * The only major assumption is homogeneous particle distribution.
     * For an inhomogeneous system, such as a liquid-vapor system,
     * the buffer will be underestimated. The actual energy drift
     * will be higher by the factor: local/homogeneous particle density.
     *
     * The results of this estimate have been checked againt simulations.
     * In most cases the real drift differs by less than a factor 2.
     */

    /* Worst case assumption: HCP packing of particles gives largest distance */
    particle_distance = pow(boxvol*sqrt(2)/mtop->natoms, 1.0/3.0);

    get_verlet_buffer_atomtypes(mtop, &att, &natt, n_nonlin_vsite);
    assert(att != NULL && natt >= 0);

    if (debug)
    {
        fprintf(debug, "particle distance assuming HCP packing: %f nm\n",
                particle_distance);
        fprintf(debug, "energy drift atom types: %d\n", natt);
    }

    reppow   = mtop->ffparams.reppow;
    md1_ljd  = 0;
    d2_ljd   = 0;
    md3_ljd  = 0;
    md1_ljr  = 0;
    d2_ljr   = 0;
    md3_ljr  = 0;
    if (ir->vdwtype == evdwCUT)
    {
        real sw_range, md3_pswf;

        switch (ir->vdw_modifier)
        {
            case eintmodNONE:
            case eintmodPOTSHIFT:
                /* -dV/dr of -r^-6 and r^-reppow */
                md1_ljd =     -6*pow(ir->rvdw, -7.0);
                md1_ljr = reppow*pow(ir->rvdw, -(reppow+1));
                /* The contribution of the higher derivatives is negligible */
                break;
            case eintmodFORCESWITCH:
                /* At the cut-off: V=V'=V''=0, so we use only V''' */
                md3_ljd  = -md3_force_switch(6.0,    ir->rvdw_switch, ir->rvdw);
                md3_ljr  =  md3_force_switch(reppow, ir->rvdw_switch, ir->rvdw);
                break;
            case eintmodPOTSWITCH:
                /* At the cut-off: V=V'=V''=0.
                 * V''' is given by the original potential times
                 * the third derivative of the switch function.
                 */
                sw_range  = ir->rvdw - ir->rvdw_switch;
                md3_pswf  = 60.0*pow(sw_range, -3.0);

                md3_ljd   = -pow(ir->rvdw, -6.0   )*md3_pswf;
                md3_ljr   =  pow(ir->rvdw, -reppow)*md3_pswf;
                break;
            default:
                gmx_incons("Unimplemented VdW modifier");
        }
    }
    else if (EVDW_PME(ir->vdwtype))
    {
        real b, r, br, br2, br4, br6;
        b        = calc_ewaldcoeff_lj(ir->rvdw, ir->ewald_rtol_lj);
        r        = ir->rvdw;
        br       = b*r;
        br2      = br*br;
        br4      = br2*br2;
        br6      = br4*br2;
        /* -dV/dr of g(br)*r^-6 [where g(x) = exp(-x^2)(1+x^2+x^4/2), see LJ-PME equations in manual] and r^-reppow */
        md1_ljd  = -exp(-br2)*(br6 + 3.0*br4 + 6.0*br2 + 6.0)*pow(r, -7.0);
        md1_ljr  = reppow*pow(r, -(reppow+1));
        /* The contribution of the higher derivatives is negligible */
    }
    else
    {
        gmx_fatal(FARGS, "Energy drift calculation is only implemented for plain cut-off Lennard-Jones interactions");
    }

    elfac = ONE_4PI_EPS0/ir->epsilon_r;

    /* Determine md=-dV/dr and dd=d^2V/dr^2 */
    md1_el = 0;
    d2_el  = 0;
    if (ir->coulombtype == eelCUT || EEL_RF(ir->coulombtype))
    {
        real eps_rf, k_rf;

        if (ir->coulombtype == eelCUT)
        {
            eps_rf = 1;
            k_rf   = 0;
        }
        else
        {
            eps_rf = ir->epsilon_rf/ir->epsilon_r;
            if (eps_rf != 0)
            {
                k_rf = pow(ir->rcoulomb, -3.0)*(eps_rf - ir->epsilon_r)/(2*eps_rf + ir->epsilon_r);
            }
            else
            {
                /* epsilon_rf = infinity */
                k_rf = 0.5*pow(ir->rcoulomb, -3.0);
            }
        }

        if (eps_rf > 0)
        {
            md1_el = elfac*(pow(ir->rcoulomb, -2.0) - 2*k_rf*ir->rcoulomb);
        }
        d2_el      = elfac*(2*pow(ir->rcoulomb, -3.0) + 2*k_rf);
    }
    else if (EEL_PME(ir->coulombtype) || ir->coulombtype == eelEWALD)
    {
        real b, rc, br;

        b      = calc_ewaldcoeff_q(ir->rcoulomb, ir->ewald_rtol);
        rc     = ir->rcoulomb;
        br     = b*rc;
        md1_el = elfac*(b*exp(-br*br)*M_2_SQRTPI/rc + gmx_erfc(br)/(rc*rc));
        d2_el  = elfac/(rc*rc)*(2*b*(1 + br*br)*exp(-br*br)*M_2_SQRTPI + 2*gmx_erfc(br)/rc);
    }
    else
    {
        gmx_fatal(FARGS, "Energy drift calculation is only implemented for Reaction-Field and Ewald electrostatics");
    }

    /* Determine the variance of the atomic displacement
     * over nstlist-1 steps: kT_fac
     * For inertial dynamics (not Brownian dynamics) the mass factor
     * is not included in kT_fac, it is added later.
     */
    if (ir->eI == eiBD)
    {
        /* Get the displacement distribution from the random component only.
         * With accurate integration the systematic (force) displacement
         * should be negligible (unless nstlist is extremely large, which
         * you wouldn't do anyhow).
         */
        kT_fac = 2*BOLTZ*reference_temperature*(ir->nstlist-1)*ir->delta_t;
        if (ir->bd_fric > 0)
        {
            /* This is directly sigma^2 of the displacement */
            kT_fac /= ir->bd_fric;

            /* Set the masses to 1 as kT_fac is the full sigma^2,
             * but we divide by m in ener_drift().
             */
            for (i = 0; i < natt; i++)
            {
                att[i].prop.mass = 1;
            }
        }
        else
        {
            real tau_t;

            /* Per group tau_t is not implemented yet, use the maximum */
            tau_t = ir->opts.tau_t[0];
            for (i = 1; i < ir->opts.ngtc; i++)
            {
                tau_t = max(tau_t, ir->opts.tau_t[i]);
            }

            kT_fac *= tau_t;
            /* This kT_fac needs to be divided by the mass to get sigma^2 */
        }
    }
    else
    {
        kT_fac = BOLTZ*reference_temperature*sqr((ir->nstlist-1)*ir->delta_t);
    }

    mass_min = att[0].prop.mass;
    for (i = 1; i < natt; i++)
    {
        mass_min = min(mass_min, att[i].prop.mass);
    }

    if (debug)
    {
        fprintf(debug, "md1_ljd %9.2e d2_ljd %9.2e md3_ljd %9.2e\n", md1_ljd, d2_ljd, md3_ljd);
        fprintf(debug, "md1_ljr %9.2e d2_ljr %9.2e md3_ljr %9.2e\n", md1_ljr, d2_ljr, md3_ljr);
        fprintf(debug, "md1_el  %9.2e d2_el  %9.2e\n", md1_el, d2_el);
        fprintf(debug, "sqrt(kT_fac) %f\n", sqrt(kT_fac));
        fprintf(debug, "mass_min %f\n", mass_min);
    }

    /* Search using bisection */
    ib0 = -1;
    /* The drift will be neglible at 5 times the max sigma */
    ib1 = (int)(5*2*sqrt(kT_fac/mass_min)/resolution) + 1;
    while (ib1 - ib0 > 1)
    {
        ib = (ib0 + ib1)/2;
        rb = ib*resolution;
        rl = max(ir->rvdw, ir->rcoulomb) + rb;

        /* Calculate the average energy drift at the last step
         * of the nstlist steps at which the pair-list is used.
         */
        drift = ener_drift(att, natt, &mtop->ffparams,
                           kT_fac,
                           md1_ljd, d2_ljd, md3_ljd,
                           md1_ljr, d2_ljr, md3_ljr,
                           md1_el,  d2_el,
                           rb,
                           rl, boxvol);

        /* Correct for the fact that we are using a Ni x Nj particle pair list
         * and not a 1 x 1 particle pair list. This reduces the drift.
         */
        /* We don't have a formula for 8 (yet), use 4 which is conservative */
        nb_clust_frac_pairs_not_in_list_at_cutoff =
            surface_frac(min(list_setup->cluster_size_i, 4),
                         particle_distance, rl)*
            surface_frac(min(list_setup->cluster_size_j, 4),
                         particle_distance, rl);
        drift *= nb_clust_frac_pairs_not_in_list_at_cutoff;

        /* Convert the drift to drift per unit time per atom */
        drift /= ir->nstlist*ir->delta_t*mtop->natoms;

        if (debug)
        {
            fprintf(debug, "ib %3d %3d %3d rb %.3f %dx%d fac %.3f drift %.1e\n",
                    ib0, ib, ib1, rb,
                    list_setup->cluster_size_i, list_setup->cluster_size_j,
                    nb_clust_frac_pairs_not_in_list_at_cutoff,
                    drift);
        }

        if (fabs(drift) > ir->verletbuf_tol)
        {
            ib0 = ib;
        }
        else
        {
            ib1 = ib;
        }
    }

    sfree(att);

    *rlist = max(ir->rvdw, ir->rcoulomb) + ib1*resolution;
}
コード例 #3
0
float pme_load_estimate(gmx_mtop_t *mtop, t_inputrec *ir, matrix box)
{
    t_atom        *atom;
    int            mb, nmol, atnr, cg, a, a0, nq_tot, nlj_tot, f;
    gmx_bool       bBHAM, bLJcut, bChargePerturbed, bTypePerturbed;
    gmx_bool       bWater, bQ, bLJ;
    double         ndistance_c, ndistance_simd;
    double         cost_bond, cost_pp, cost_redist, cost_spread, cost_fft, cost_solve, cost_pme;
    float          ratio;
    t_iparams     *iparams;
    gmx_moltype_t *molt;

    /* Computational cost of bonded, non-bonded and PME calculations.
     * This will be machine dependent.
     * The numbers here are accurate for Intel Core2 and AMD Athlon 64
     * in single precision. In double precision PME mesh is slightly cheaper,
     * although not so much that the numbers need to be adjusted.
     */

    iparams = mtop->ffparams.iparams;
    atnr    = mtop->ffparams.atnr;

    count_bonded_distances(mtop, ir, &ndistance_c, &ndistance_simd);
    /* C_BOND is the cost for bonded interactions with SIMD implementations,
     * so we need to scale the number of bonded interactions for which there
     * are only C implementations to the number of SIMD equivalents.
     */
    cost_bond = c_bond*(ndistance_c   *simd_cycle_factor(FALSE) +
                        ndistance_simd*simd_cycle_factor(bHaveSIMD));

    if (ir->cutoff_scheme == ecutsGROUP)
    {
        pp_group_load(mtop, ir, box,
                      &nq_tot, &nlj_tot, &cost_pp,
                      &bChargePerturbed, &bTypePerturbed);
    }
    else
    {
        pp_verlet_load(mtop, ir, box,
                       &nq_tot, &nlj_tot, &cost_pp,
                       &bChargePerturbed, &bTypePerturbed);
    }

    cost_redist = 0;
    cost_spread = 0;
    cost_fft    = 0;
    cost_solve  = 0;

    if (EEL_PME(ir->coulombtype))
    {
        double grid = ir->nkx*ir->nky*((ir->nkz + 1)/2);

        f            = ((ir->efep != efepNO && bChargePerturbed) ? 2 : 1);
        cost_redist +=   c_pme_redist*nq_tot;
        cost_spread += f*c_pme_spread*nq_tot*pow(ir->pme_order, 3);
        cost_fft    += f*c_pme_fft*grid*log(grid)/log(2);
        cost_solve  += f*c_pme_solve*grid*simd_cycle_factor(bHaveSIMD);
    }

    if (EVDW_PME(ir->vdwtype))
    {
        double grid = ir->nkx*ir->nky*((ir->nkz + 1)/2);

        f            = ((ir->efep != efepNO && bTypePerturbed) ? 2 : 1);
        if (ir->ljpme_combination_rule == eljpmeLB)
        {
            /* LB combination rule: we have 7 mesh terms */
            f       *= 7;
        }
        cost_redist +=   c_pme_redist*nlj_tot;
        cost_spread += f*c_pme_spread*nlj_tot*pow(ir->pme_order, 3);
        cost_fft    += f*c_pme_fft*2*grid*log(grid)/log(2);
        cost_solve  += f*c_pme_solve*grid*simd_cycle_factor(bHaveSIMD);
    }

    cost_pme = cost_redist + cost_spread + cost_fft + cost_solve;

    ratio = cost_pme/(cost_bond + cost_pp + cost_pme);

    if (debug)
    {
        fprintf(debug,
                "cost_bond   %f\n"
                "cost_pp     %f\n"
                "cost_redist %f\n"
                "cost_spread %f\n"
                "cost_fft    %f\n"
                "cost_solve  %f\n",
                cost_bond, cost_pp, cost_redist, cost_spread, cost_fft, cost_solve);

        fprintf(debug, "Estimate for relative PME load: %.3f\n", ratio);
    }

    return ratio;
}
コード例 #4
0
static void pp_verlet_load(gmx_mtop_t *mtop, t_inputrec *ir, matrix box,
                           int *nq_tot, int *nlj_tot,
                           double *cost_pp,
                           gmx_bool *bChargePerturbed, gmx_bool *bTypePerturbed)
{
    t_atom        *atom;
    int            mb, nmol, atnr, cg, a, a0, nqlj, nq, nlj;
    gmx_bool       bQRF;
    t_iparams     *iparams;
    gmx_moltype_t *molt;
    real           r_eff;
    double         c_qlj, c_q, c_lj;
    double         nppa;
    int            j_cluster_size;
    /* Conversion factor for reference vs SIMD kernel performance.
     * The factor is about right for SSE2/4, but should be 2 higher for AVX256.
     */
#ifdef GMX_DOUBLE
    const real     nbnxn_refkernel_fac = 4.0;
#else
    const real     nbnxn_refkernel_fac = 8.0;
#endif

    bQRF = (EEL_RF(ir->coulombtype) || ir->coulombtype == eelCUT);

    iparams           = mtop->ffparams.iparams;
    atnr              = mtop->ffparams.atnr;
    nqlj              = 0;
    nq                = 0;
    *bChargePerturbed = FALSE;
    *bTypePerturbed   = FALSE;
    for (mb = 0; mb < mtop->nmolblock; mb++)
    {
        molt = &mtop->moltype[mtop->molblock[mb].type];
        atom = molt->atoms.atom;
        nmol = mtop->molblock[mb].nmol;
        a    = 0;
        for (a = 0; a < molt->atoms.nr; a++)
        {
            if (atom[a].q != 0 || atom[a].qB != 0)
            {
                if (iparams[(atnr+1)*atom[a].type].lj.c6  != 0 ||
                    iparams[(atnr+1)*atom[a].type].lj.c12 != 0)
                {
                    nqlj += nmol;
                }
                else
                {
                    nq += nmol;
                }
            }
            if (atom[a].q != atom[a].qB)
            {
                *bChargePerturbed = TRUE;
            }
            if (atom[a].type != atom[a].typeB)
            {
                *bTypePerturbed = TRUE;
            }
        }
    }

    nlj = mtop->natoms - nqlj - nq;

    *nq_tot  = nqlj + nq;
    *nlj_tot = nqlj + nlj;

    /* Effective cut-off for cluster pair list of 4x4 or 4x8 atoms.
     * This choice should match the one of pick_nbnxn_kernel_cpu().
     * TODO: Make this function use pick_nbnxn_kernel_cpu().
     */
#if defined GMX_SIMD_HAVE_REAL && ((GMX_SIMD_REAL_WIDTH == 8 && defined GMX_SIMD_HAVE_FMA) || GMX_SIMD_REAL_WIDTH > 8)
    j_cluster_size = 8;
#else
    j_cluster_size = 4;
#endif
    r_eff = ir->rlist + nbnxn_get_rlist_effective_inc(j_cluster_size, mtop->natoms/det(box));

    /* The average number of pairs per atom */
    nppa  = 0.5*4/3*M_PI*r_eff*r_eff*r_eff*mtop->natoms/det(box);

    if (debug)
    {
        fprintf(debug, "nqlj %d nq %d nlj %d rlist %.3f r_eff %.3f pairs per atom %.1f\n",
                nqlj, nq, nlj, ir->rlist, r_eff, nppa);
    }

    /* Determine the cost per pair interaction */
    c_qlj = (bQRF ? c_nbnxn_qrf_lj : c_nbnxn_qexp_lj);
    c_q   = (bQRF ? c_nbnxn_qrf    : c_nbnxn_qexp);
    c_lj  = c_nbnxn_lj;
    if (ir->vdw_modifier == eintmodPOTSWITCH || EVDW_PME(ir->vdwtype))
    {
        c_qlj += c_nbnxn_ljexp_add;
        c_lj  += c_nbnxn_ljexp_add;
    }
    if (EVDW_PME(ir->vdwtype) && ir->ljpme_combination_rule == eljpmeLB)
    {
        /* We don't have LJ-PME LB comb. rule kernels, we use slow kernels */
        c_qlj *= nbnxn_refkernel_fac;
        c_q   *= nbnxn_refkernel_fac;
        c_lj  *= nbnxn_refkernel_fac;
    }

    /* For the PP non-bonded cost it is (unrealistically) assumed
     * that all atoms are distributed homogeneously in space.
     */
    *cost_pp = (nqlj*c_qlj + nq*c_q + nlj*c_lj)*nppa;

    *cost_pp *= simd_cycle_factor(bHaveSIMD);
}
コード例 #5
0
ファイル: mdebin.cpp プロジェクト: MelroLeandro/gromacs
t_mdebin *init_mdebin(ener_file_t       fp_ene,
                      const gmx_mtop_t *mtop,
                      const t_inputrec *ir,
                      FILE             *fp_dhdl)
{
    const char         *ener_nm[F_NRE];
    static const char  *vir_nm[] = {
        "Vir-XX", "Vir-XY", "Vir-XZ",
        "Vir-YX", "Vir-YY", "Vir-YZ",
        "Vir-ZX", "Vir-ZY", "Vir-ZZ"
    };
    static const char  *sv_nm[] = {
        "ShakeVir-XX", "ShakeVir-XY", "ShakeVir-XZ",
        "ShakeVir-YX", "ShakeVir-YY", "ShakeVir-YZ",
        "ShakeVir-ZX", "ShakeVir-ZY", "ShakeVir-ZZ"
    };
    static const char  *fv_nm[] = {
        "ForceVir-XX", "ForceVir-XY", "ForceVir-XZ",
        "ForceVir-YX", "ForceVir-YY", "ForceVir-YZ",
        "ForceVir-ZX", "ForceVir-ZY", "ForceVir-ZZ"
    };
    static const char  *pres_nm[] = {
        "Pres-XX", "Pres-XY", "Pres-XZ",
        "Pres-YX", "Pres-YY", "Pres-YZ",
        "Pres-ZX", "Pres-ZY", "Pres-ZZ"
    };
    static const char  *surft_nm[] = {
        "#Surf*SurfTen"
    };
    static const char  *mu_nm[] = {
        "Mu-X", "Mu-Y", "Mu-Z"
    };
    static const char  *vcos_nm[] = {
        "2CosZ*Vel-X"
    };
    static const char  *visc_nm[] = {
        "1/Viscosity"
    };
    static const char  *baro_nm[] = {
        "Barostat"
    };

    char              **grpnms;
    const gmx_groups_t *groups;
    char              **gnm;
    char                buf[256];
    const char         *bufi;
    t_mdebin           *md;
    int                 i, j, ni, nj, n, k, kk, ncon, nset;
    gmx_bool            bBHAM, b14;

    snew(md, 1);

    if (EI_DYNAMICS(ir->eI))
    {
        md->delta_t = ir->delta_t;
    }
    else
    {
        md->delta_t = 0;
    }

    groups = &mtop->groups;

    bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
    b14   = (gmx_mtop_ftype_count(mtop, F_LJ14) > 0 ||
             gmx_mtop_ftype_count(mtop, F_LJC14_Q) > 0);

    ncon           = gmx_mtop_ftype_count(mtop, F_CONSTR);
    nset           = gmx_mtop_ftype_count(mtop, F_SETTLE);
    md->bConstr    = (ncon > 0 || nset > 0);
    md->bConstrVir = FALSE;
    if (md->bConstr)
    {
        if (ncon > 0 && ir->eConstrAlg == econtLINCS)
        {
            md->nCrmsd = 1;
        }
        md->bConstrVir = (getenv("GMX_CONSTRAINTVIR") != NULL);
    }
    else
    {
        md->nCrmsd = 0;
    }

    /* Energy monitoring */
    for (i = 0; i < egNR; i++)
    {
        md->bEInd[i] = FALSE;
    }

    for (i = 0; i < F_NRE; i++)
    {
        md->bEner[i] = FALSE;
        if (i == F_LJ)
        {
            md->bEner[i] = !bBHAM;
        }
        else if (i == F_BHAM)
        {
            md->bEner[i] = bBHAM;
        }
        else if (i == F_EQM)
        {
            md->bEner[i] = ir->bQMMM;
        }
        else if (i == F_RF_EXCL)
        {
            md->bEner[i] = (EEL_RF(ir->coulombtype) && ir->cutoff_scheme == ecutsGROUP);
        }
        else if (i == F_COUL_RECIP)
        {
            md->bEner[i] = EEL_FULL(ir->coulombtype);
        }
        else if (i == F_LJ_RECIP)
        {
            md->bEner[i] = EVDW_PME(ir->vdwtype);
        }
        else if (i == F_LJ14)
        {
            md->bEner[i] = b14;
        }
        else if (i == F_COUL14)
        {
            md->bEner[i] = b14;
        }
        else if (i == F_LJC14_Q || i == F_LJC_PAIRS_NB)
        {
            md->bEner[i] = FALSE;
        }
        else if ((i == F_DVDL_COUL && ir->fepvals->separate_dvdl[efptCOUL]) ||
                 (i == F_DVDL_VDW  && ir->fepvals->separate_dvdl[efptVDW]) ||
                 (i == F_DVDL_BONDED && ir->fepvals->separate_dvdl[efptBONDED]) ||
                 (i == F_DVDL_RESTRAINT && ir->fepvals->separate_dvdl[efptRESTRAINT]) ||
                 (i == F_DKDL && ir->fepvals->separate_dvdl[efptMASS]) ||
                 (i == F_DVDL && ir->fepvals->separate_dvdl[efptFEP]))
        {
            md->bEner[i] = (ir->efep != efepNO);
        }
        else if ((interaction_function[i].flags & IF_VSITE) ||
                 (i == F_CONSTR) || (i == F_CONSTRNC) || (i == F_SETTLE))
        {
            md->bEner[i] = FALSE;
        }
        else if ((i == F_COUL_SR) || (i == F_EPOT) || (i == F_PRES)  || (i == F_EQM))
        {
            md->bEner[i] = TRUE;
        }
        else if ((i == F_GBPOL) && ir->implicit_solvent == eisGBSA)
        {
            md->bEner[i] = TRUE;
        }
        else if ((i == F_NPSOLVATION) && ir->implicit_solvent == eisGBSA && (ir->sa_algorithm != esaNO))
        {
            md->bEner[i] = TRUE;
        }
        else if ((i == F_GB12) || (i == F_GB13) || (i == F_GB14))
        {
            md->bEner[i] = FALSE;
        }
        else if ((i == F_ETOT) || (i == F_EKIN) || (i == F_TEMP))
        {
            md->bEner[i] = EI_DYNAMICS(ir->eI);
        }
        else if (i == F_DISPCORR || i == F_PDISPCORR)
        {
            md->bEner[i] = (ir->eDispCorr != edispcNO);
        }
        else if (i == F_DISRESVIOL)
        {
            md->bEner[i] = (gmx_mtop_ftype_count(mtop, F_DISRES) > 0);
        }
        else if (i == F_ORIRESDEV)
        {
            md->bEner[i] = (gmx_mtop_ftype_count(mtop, F_ORIRES) > 0);
        }
        else if (i == F_CONNBONDS)
        {
            md->bEner[i] = FALSE;
        }
        else if (i == F_COM_PULL)
        {
            md->bEner[i] = (ir->bPull && pull_have_potential(ir->pull_work));
        }
        else if (i == F_ECONSERVED)
        {
            md->bEner[i] = ((ir->etc == etcNOSEHOOVER || ir->etc == etcVRESCALE) &&
                            (ir->epc == epcNO || ir->epc == epcMTTK));
        }
        else
        {
            md->bEner[i] = (gmx_mtop_ftype_count(mtop, i) > 0);
        }
    }

    md->f_nre = 0;
    for (i = 0; i < F_NRE; i++)
    {
        if (md->bEner[i])
        {
            ener_nm[md->f_nre] = interaction_function[i].longname;
            md->f_nre++;
        }
    }

    md->epc            = ir->epc;
    md->bDiagPres      = !TRICLINIC(ir->ref_p);
    md->ref_p          = (ir->ref_p[XX][XX]+ir->ref_p[YY][YY]+ir->ref_p[ZZ][ZZ])/DIM;
    md->bTricl         = TRICLINIC(ir->compress) || TRICLINIC(ir->deform);
    md->bDynBox        = inputrecDynamicBox(ir);
    md->etc            = ir->etc;
    md->bNHC_trotter   = inputrecNvtTrotter(ir);
    md->bPrintNHChains = ir->bPrintNHChains;
    md->bMTTK          = (inputrecNptTrotter(ir) || inputrecNphTrotter(ir));
    md->bMu            = inputrecNeedMutot(ir);

    md->ebin  = mk_ebin();
    /* Pass NULL for unit to let get_ebin_space determine the units
     * for interaction_function[i].longname
     */
    md->ie    = get_ebin_space(md->ebin, md->f_nre, ener_nm, NULL);
    if (md->nCrmsd)
    {
        /* This should be called directly after the call for md->ie,
         * such that md->iconrmsd follows directly in the list.
         */
        md->iconrmsd = get_ebin_space(md->ebin, md->nCrmsd, conrmsd_nm, "");
    }
    if (md->bDynBox)
    {
        md->ib    = get_ebin_space(md->ebin,
                                   md->bTricl ? NTRICLBOXS : NBOXS,
                                   md->bTricl ? tricl_boxs_nm : boxs_nm,
                                   unit_length);
        md->ivol  = get_ebin_space(md->ebin, 1, vol_nm,  unit_volume);
        md->idens = get_ebin_space(md->ebin, 1, dens_nm, unit_density_SI);
        if (md->bDiagPres)
        {
            md->ipv       = get_ebin_space(md->ebin, 1, pv_nm,   unit_energy);
            md->ienthalpy = get_ebin_space(md->ebin, 1, enthalpy_nm,   unit_energy);
        }
    }
    if (md->bConstrVir)
    {
        md->isvir = get_ebin_space(md->ebin, asize(sv_nm), sv_nm, unit_energy);
        md->ifvir = get_ebin_space(md->ebin, asize(fv_nm), fv_nm, unit_energy);
    }
    md->ivir   = get_ebin_space(md->ebin, asize(vir_nm), vir_nm, unit_energy);
    md->ipres  = get_ebin_space(md->ebin, asize(pres_nm), pres_nm, unit_pres_bar);
    md->isurft = get_ebin_space(md->ebin, asize(surft_nm), surft_nm,
                                unit_surft_bar);
    if (md->epc == epcPARRINELLORAHMAN || md->epc == epcMTTK)
    {
        md->ipc = get_ebin_space(md->ebin, md->bTricl ? 6 : 3,
                                 boxvel_nm, unit_vel);
    }
    if (md->bMu)
    {
        md->imu    = get_ebin_space(md->ebin, asize(mu_nm), mu_nm, unit_dipole_D);
    }
    if (ir->cos_accel != 0)
    {
        md->ivcos = get_ebin_space(md->ebin, asize(vcos_nm), vcos_nm, unit_vel);
        md->ivisc = get_ebin_space(md->ebin, asize(visc_nm), visc_nm,
                                   unit_invvisc_SI);
    }

    /* Energy monitoring */
    for (i = 0; i < egNR; i++)
    {
        md->bEInd[i] = FALSE;
    }
    md->bEInd[egCOULSR] = TRUE;
    md->bEInd[egLJSR  ] = TRUE;

    if (bBHAM)
    {
        md->bEInd[egLJSR]   = FALSE;
        md->bEInd[egBHAMSR] = TRUE;
    }
    if (b14)
    {
        md->bEInd[egLJ14]   = TRUE;
        md->bEInd[egCOUL14] = TRUE;
    }
    md->nEc = 0;
    for (i = 0; (i < egNR); i++)
    {
        if (md->bEInd[i])
        {
            md->nEc++;
        }
    }

    n       = groups->grps[egcENER].nr;
    md->nEg = n;
    md->nE  = (n*(n+1))/2;

    snew(md->igrp, md->nE);
    if (md->nE > 1)
    {
        n = 0;
        snew(gnm, md->nEc);
        for (k = 0; (k < md->nEc); k++)
        {
            snew(gnm[k], STRLEN);
        }
        for (i = 0; (i < groups->grps[egcENER].nr); i++)
        {
            ni = groups->grps[egcENER].nm_ind[i];
            for (j = i; (j < groups->grps[egcENER].nr); j++)
            {
                nj = groups->grps[egcENER].nm_ind[j];
                for (k = kk = 0; (k < egNR); k++)
                {
                    if (md->bEInd[k])
                    {
                        sprintf(gnm[kk], "%s:%s-%s", egrp_nm[k],
                                *(groups->grpname[ni]), *(groups->grpname[nj]));
                        kk++;
                    }
                }
                md->igrp[n] = get_ebin_space(md->ebin, md->nEc,
                                             (const char **)gnm, unit_energy);
                n++;
            }
        }
        for (k = 0; (k < md->nEc); k++)
        {
            sfree(gnm[k]);
        }
        sfree(gnm);

        if (n != md->nE)
        {
            gmx_incons("Number of energy terms wrong");
        }
    }

    md->nTC  = groups->grps[egcTC].nr;
    md->nNHC = ir->opts.nhchainlength; /* shorthand for number of NH chains */
    if (md->bMTTK)
    {
        md->nTCP = 1;  /* assume only one possible coupling system for barostat
                          for now */
    }
    else
    {
        md->nTCP = 0;
    }
    if (md->etc == etcNOSEHOOVER)
    {
        if (md->bNHC_trotter)
        {
            md->mde_n = 2*md->nNHC*md->nTC;
        }
        else
        {
            md->mde_n = 2*md->nTC;
        }
        if (md->epc == epcMTTK)
        {
            md->mdeb_n = 2*md->nNHC*md->nTCP;
        }
    }
    else
    {
        md->mde_n  = md->nTC;
        md->mdeb_n = 0;
    }

    snew(md->tmp_r, md->mde_n);
    snew(md->tmp_v, md->mde_n);
    snew(md->grpnms, md->mde_n);
    grpnms = md->grpnms;

    for (i = 0; (i < md->nTC); i++)
    {
        ni = groups->grps[egcTC].nm_ind[i];
        sprintf(buf, "T-%s", *(groups->grpname[ni]));
        grpnms[i] = gmx_strdup(buf);
    }
    md->itemp = get_ebin_space(md->ebin, md->nTC, (const char **)grpnms,
                               unit_temp_K);

    if (md->etc == etcNOSEHOOVER)
    {
        if (md->bPrintNHChains)
        {
            if (md->bNHC_trotter)
            {
                for (i = 0; (i < md->nTC); i++)
                {
                    ni   = groups->grps[egcTC].nm_ind[i];
                    bufi = *(groups->grpname[ni]);
                    for (j = 0; (j < md->nNHC); j++)
                    {
                        sprintf(buf, "Xi-%d-%s", j, bufi);
                        grpnms[2*(i*md->nNHC+j)] = gmx_strdup(buf);
                        sprintf(buf, "vXi-%d-%s", j, bufi);
                        grpnms[2*(i*md->nNHC+j)+1] = gmx_strdup(buf);
                    }
                }
                md->itc = get_ebin_space(md->ebin, md->mde_n,
                                         (const char **)grpnms, unit_invtime);
                if (md->bMTTK)
                {
                    for (i = 0; (i < md->nTCP); i++)
                    {
                        bufi = baro_nm[0];  /* All barostat DOF's together for now. */
                        for (j = 0; (j < md->nNHC); j++)
                        {
                            sprintf(buf, "Xi-%d-%s", j, bufi);
                            grpnms[2*(i*md->nNHC+j)] = gmx_strdup(buf);
                            sprintf(buf, "vXi-%d-%s", j, bufi);
                            grpnms[2*(i*md->nNHC+j)+1] = gmx_strdup(buf);
                        }
                    }
                    md->itcb = get_ebin_space(md->ebin, md->mdeb_n,
                                              (const char **)grpnms, unit_invtime);
                }
            }
            else
            {
                for (i = 0; (i < md->nTC); i++)
                {
                    ni   = groups->grps[egcTC].nm_ind[i];
                    bufi = *(groups->grpname[ni]);
                    sprintf(buf, "Xi-%s", bufi);
                    grpnms[2*i] = gmx_strdup(buf);
                    sprintf(buf, "vXi-%s", bufi);
                    grpnms[2*i+1] = gmx_strdup(buf);
                }
                md->itc = get_ebin_space(md->ebin, md->mde_n,
                                         (const char **)grpnms, unit_invtime);
            }
        }
    }
    else if (md->etc == etcBERENDSEN || md->etc == etcYES ||
             md->etc == etcVRESCALE)
    {
        for (i = 0; (i < md->nTC); i++)
        {
            ni = groups->grps[egcTC].nm_ind[i];
            sprintf(buf, "Lamb-%s", *(groups->grpname[ni]));
            grpnms[i] = gmx_strdup(buf);
        }
        md->itc = get_ebin_space(md->ebin, md->mde_n, (const char **)grpnms, "");
    }

    sfree(grpnms);


    md->nU = groups->grps[egcACC].nr;
    if (md->nU > 1)
    {
        snew(grpnms, 3*md->nU);
        for (i = 0; (i < md->nU); i++)
        {
            ni = groups->grps[egcACC].nm_ind[i];
            sprintf(buf, "Ux-%s", *(groups->grpname[ni]));
            grpnms[3*i+XX] = gmx_strdup(buf);
            sprintf(buf, "Uy-%s", *(groups->grpname[ni]));
            grpnms[3*i+YY] = gmx_strdup(buf);
            sprintf(buf, "Uz-%s", *(groups->grpname[ni]));
            grpnms[3*i+ZZ] = gmx_strdup(buf);
        }
        md->iu = get_ebin_space(md->ebin, 3*md->nU, (const char **)grpnms, unit_vel);
        sfree(grpnms);
    }

    if (fp_ene)
    {
        do_enxnms(fp_ene, &md->ebin->nener, &md->ebin->enm);
    }

    md->print_grpnms = NULL;

    /* check whether we're going to write dh histograms */
    md->dhc = NULL;
    if (ir->fepvals->separate_dhdl_file == esepdhdlfileNO)
    {
        /* Currently dh histograms are only written with dynamics */
        if (EI_DYNAMICS(ir->eI))
        {
            snew(md->dhc, 1);

            mde_delta_h_coll_init(md->dhc, ir);
        }
        md->fp_dhdl = NULL;
        snew(md->dE, ir->fepvals->n_lambda);
    }
    else
    {
        md->fp_dhdl = fp_dhdl;
        snew(md->dE, ir->fepvals->n_lambda);
    }
    if (ir->bSimTemp)
    {
        int i;
        snew(md->temperatures, ir->fepvals->n_lambda);
        for (i = 0; i < ir->fepvals->n_lambda; i++)
        {
            md->temperatures[i] = ir->simtempvals->temperatures[i];
        }
    }
    return md;
}
コード例 #6
0
gmx_bool pme_load_balance(pme_load_balancing_t       pme_lb,
                          t_commrec                 *cr,
                          FILE                      *fp_err,
                          FILE                      *fp_log,
                          t_inputrec                *ir,
                          t_state                   *state,
                          double                     cycles,
                          interaction_const_t       *ic,
                          struct nonbonded_verlet_t *nbv,
                          struct gmx_pme_t **        pmedata,
                          gmx_int64_t                step)
{
    gmx_bool     OK;
    pme_setup_t *set;
    double       cycles_fast;
    char         buf[STRLEN], sbuf[22];
    real         rtab;
    gmx_bool     bUsesSimpleTables = TRUE;

    if (pme_lb->stage == pme_lb->nstage)
    {
        return FALSE;
    }

    if (PAR(cr))
    {
        gmx_sumd(1, &cycles, cr);
        cycles /= cr->nnodes;
    }

    set = &pme_lb->setup[pme_lb->cur];
    set->count++;

    rtab = ir->rlistlong + ir->tabext;

    if (set->count % 2 == 1)
    {
        /* Skip the first cycle, because the first step after a switch
         * is much slower due to allocation and/or caching effects.
         */
        return TRUE;
    }

    sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
    print_grid(fp_err, fp_log, buf, "timed with", set, cycles);

    if (set->count <= 2)
    {
        set->cycles = cycles;
    }
    else
    {
        if (cycles*PME_LB_ACCEL_TOL < set->cycles &&
            pme_lb->stage == pme_lb->nstage - 1)
        {
            /* The performance went up a lot (due to e.g. DD load balancing).
             * Add a stage, keep the minima, but rescan all setups.
             */
            pme_lb->nstage++;

            if (debug)
            {
                fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
                        "Increased the number stages to %d"
                        " and ignoring the previous performance\n",
                        set->grid[XX], set->grid[YY], set->grid[ZZ],
                        cycles*1e-6, set->cycles*1e-6, PME_LB_ACCEL_TOL,
                        pme_lb->nstage);
            }
        }
        set->cycles = min(set->cycles, cycles);
    }

    if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
    {
        pme_lb->fastest = pme_lb->cur;

        if (DOMAINDECOMP(cr))
        {
            /* We found a new fastest setting, ensure that with subsequent
             * shorter cut-off's the dynamic load balancing does not make
             * the use of the current cut-off impossible. This solution is
             * a trade-off, as the PME load balancing and DD domain size
             * load balancing can interact in complex ways.
             * With the Verlet kernels, DD load imbalance will usually be
             * mainly due to bonded interaction imbalance, which will often
             * quickly push the domain boundaries beyond the limit for the
             * optimal, PME load balanced, cut-off. But it could be that
             * better overal performance can be obtained with a slightly
             * shorter cut-off and better DD load balancing.
             */
            change_dd_dlb_cutoff_limit(cr);
        }
    }
    cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;

    /* Check in stage 0 if we should stop scanning grids.
     * Stop when the time is more than SLOW_FAC longer than the fastest.
     */
    if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
        cycles > pme_lb->setup[pme_lb->fastest].cycles*PME_LB_SLOW_FAC)
    {
        pme_lb->n = pme_lb->cur + 1;
        /* Done with scanning, go to stage 1 */
        switch_to_stage1(pme_lb);
    }

    if (pme_lb->stage == 0)
    {
        int gridsize_start;

        gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];

        do
        {
            if (pme_lb->cur+1 < pme_lb->n)
            {
                /* We had already generated the next setup */
                OK = TRUE;
            }
            else
            {
                /* Find the next setup */
                OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);

                if (!OK)
                {
                    pme_lb->elimited = epmelblimPMEGRID;
                }
            }

            if (OK && ir->ePBC != epbcNONE)
            {
                OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
                      <= max_cutoff2(ir->ePBC, state->box));
                if (!OK)
                {
                    pme_lb->elimited = epmelblimBOX;
                }
            }

            if (OK)
            {
                pme_lb->cur++;

                if (DOMAINDECOMP(cr))
                {
                    OK = change_dd_cutoff(cr, state, ir,
                                          pme_lb->setup[pme_lb->cur].rlistlong);
                    if (!OK)
                    {
                        /* Failed: do not use this setup */
                        pme_lb->cur--;
                        pme_lb->elimited = epmelblimDD;
                    }
                }
            }
            if (!OK)
            {
                /* We hit the upper limit for the cut-off,
                 * the setup should not go further than cur.
                 */
                pme_lb->n = pme_lb->cur + 1;
                print_loadbal_limited(fp_err, fp_log, step, pme_lb);
                /* Switch to the next stage */
                switch_to_stage1(pme_lb);
            }
        }
        while (OK &&
               !(pme_lb->setup[pme_lb->cur].grid[XX]*
                 pme_lb->setup[pme_lb->cur].grid[YY]*
                 pme_lb->setup[pme_lb->cur].grid[ZZ] <
                 gridsize_start*PME_LB_GRID_SCALE_FAC
                 &&
                 pme_lb->setup[pme_lb->cur].grid_efficiency <
                 pme_lb->setup[pme_lb->cur-1].grid_efficiency*PME_LB_GRID_EFFICIENCY_REL_FAC));
    }

    if (pme_lb->stage > 0 && pme_lb->end == 1)
    {
        pme_lb->cur   = 0;
        pme_lb->stage = pme_lb->nstage;
    }
    else if (pme_lb->stage > 0 && pme_lb->end > 1)
    {
        /* If stage = nstage-1:
         *   scan over all setups, rerunning only those setups
         *   which are not much slower than the fastest
         * else:
         *   use the next setup
         */
        do
        {
            pme_lb->cur++;
            if (pme_lb->cur == pme_lb->end)
            {
                pme_lb->stage++;
                pme_lb->cur = pme_lb->start;
            }
        }
        while (pme_lb->stage == pme_lb->nstage - 1 &&
               pme_lb->setup[pme_lb->cur].count > 0 &&
               pme_lb->setup[pme_lb->cur].cycles > cycles_fast*PME_LB_SLOW_FAC);

        if (pme_lb->stage == pme_lb->nstage)
        {
            /* We are done optimizing, use the fastest setup we found */
            pme_lb->cur = pme_lb->fastest;
        }
    }

    if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
    {
        OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
        if (!OK)
        {
            /* Failsafe solution */
            if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
            {
                pme_lb->stage--;
            }
            pme_lb->fastest  = 0;
            pme_lb->start    = 0;
            pme_lb->end      = pme_lb->cur;
            pme_lb->cur      = pme_lb->start;
            pme_lb->elimited = epmelblimDD;
            print_loadbal_limited(fp_err, fp_log, step, pme_lb);
        }
    }

    /* Change the Coulomb cut-off and the PME grid */

    set = &pme_lb->setup[pme_lb->cur];

    ic->rcoulomb     = set->rcut_coulomb;
    ic->rlist        = set->rlist;
    ic->rlistlong    = set->rlistlong;
    ir->nstcalclr    = set->nstcalclr;
    ic->ewaldcoeff_q = set->ewaldcoeff_q;
    /* TODO: centralize the code that sets the potentials shifts */
    if (ic->coulomb_modifier == eintmodPOTSHIFT)
    {
        ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
    }
    if (EVDW_PME(ic->vdwtype))
    {
        /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
        ic->rvdw            = set->rcut_coulomb;
        ic->ewaldcoeff_lj   = set->ewaldcoeff_lj;
        if (ic->vdw_modifier == eintmodPOTSHIFT)
        {
            real crc2;

            ic->dispersion_shift.cpot = -pow(ic->rvdw, -6.0);
            ic->repulsion_shift.cpot  = -pow(ic->rvdw, -12.0);
            ic->sh_invrc6             = -ic->dispersion_shift.cpot;
            crc2                      = sqr(ic->ewaldcoeff_lj*ic->rvdw);
            ic->sh_lj_ewald           = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*pow(ic->rvdw, -6.0);
        }
    }

    bUsesSimpleTables = uses_simple_tables(ir->cutoff_scheme, nbv, 0);
    nbnxn_gpu_pme_loadbal_update_param(nbv, ic);

    /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
     * also sharing texture references. To keep the code simple, we don't
     * treat texture references as shared resources, but this means that
     * the coulomb_tab texture ref will get updated by multiple threads.
     * Hence, to ensure that the non-bonded kernels don't start before all
     * texture binding operations are finished, we need to wait for all ranks
     * to arrive here before continuing.
     *
     * Note that we could omit this barrier if GPUs are not shared (or
     * texture objects are used), but as this is initialization code, there
     * is not point in complicating things.
     */
#ifdef GMX_THREAD_MPI
    if (PAR(cr) && use_GPU(nbv))
    {
        gmx_barrier(cr);
    }
#endif  /* GMX_THREAD_MPI */

    /* Usually we won't need the simple tables with GPUs.
     * But we do with hybrid acceleration and with free energy.
     * To avoid bugs, we always re-initialize the simple tables here.
     */
    init_interaction_const_tables(NULL, ic, bUsesSimpleTables, rtab);

    if (cr->duty & DUTY_PME)
    {
        if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
        {
            /* Generate a new PME data structure,
             * copying part of the old pointers.
             */
            gmx_pme_reinit(&set->pmedata,
                           cr, pme_lb->setup[0].pmedata, ir,
                           set->grid);
        }
        *pmedata = set->pmedata;
    }
    else
    {
        /* Tell our PME-only node to switch grid */
        gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
    }

    if (debug)
    {
        print_grid(NULL, debug, "", "switched to", set, -1);
    }

    if (pme_lb->stage == pme_lb->nstage)
    {
        print_grid(fp_err, fp_log, "", "optimal", set, -1);
    }

    return TRUE;
}
コード例 #7
0
ファイル: force.cpp プロジェクト: zlmturnout/gromacs
void do_force_lowlevel(t_forcerec *fr,      t_inputrec *ir,
                       t_idef     *idef,    t_commrec  *cr,
                       t_nrnb     *nrnb,    gmx_wallcycle_t wcycle,
                       t_mdatoms  *md,
                       rvec       x[],      history_t  *hist,
                       rvec       f[],
                       rvec       f_longrange[],
                       gmx_enerdata_t *enerd,
                       t_fcdata   *fcd,
                       gmx_localtop_t *top,
                       gmx_genborn_t *born,
                       gmx_bool       bBornRadii,
                       matrix     box,
                       t_lambda   *fepvals,
                       real       *lambda,
                       t_graph    *graph,
                       t_blocka   *excl,
                       rvec       mu_tot[],
                       int        flags,
                       float      *cycles_pme)
{
    int         i, j;
    int         donb_flags;
    gmx_bool    bSB;
    int         pme_flags;
    matrix      boxs;
    rvec        box_size;
    t_pbc       pbc;
    real        dvdl_dum[efptNR], dvdl_nb[efptNR];

#ifdef GMX_MPI
    double  t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
#endif

    set_pbc(&pbc, fr->ePBC, box);

    /* reset free energy components */
    for (i = 0; i < efptNR; i++)
    {
        dvdl_nb[i]  = 0;
        dvdl_dum[i] = 0;
    }

    /* Reset box */
    for (i = 0; (i < DIM); i++)
    {
        box_size[i] = box[i][i];
    }

    debug_gmx();

    /* do QMMM first if requested */
    if (fr->bQMMM)
    {
        enerd->term[F_EQM] = calculate_QMMM(cr, x, f, fr);
    }

    /* Call the short range functions all in one go. */

#ifdef GMX_MPI
    /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
#define TAKETIME FALSE
    if (TAKETIME)
    {
        MPI_Barrier(cr->mpi_comm_mygroup);
        t0 = MPI_Wtime();
    }
#endif

    if (ir->nwall)
    {
        /* foreign lambda component for walls */
        real dvdl_walls = do_walls(ir, fr, box, md, x, f, lambda[efptVDW],
                                   enerd->grpp.ener[egLJSR], nrnb);
        enerd->dvdl_lin[efptVDW] += dvdl_walls;
    }

    /* If doing GB, reset dvda and calculate the Born radii */
    if (ir->implicit_solvent)
    {
        wallcycle_sub_start(wcycle, ewcsNONBONDED);

        for (i = 0; i < born->nr; i++)
        {
            fr->dvda[i] = 0;
        }

        if (bBornRadii)
        {
            calc_gb_rad(cr, fr, ir, top, x, &(fr->gblist), born, md, nrnb);
        }

        wallcycle_sub_stop(wcycle, ewcsNONBONDED);
    }

    where();
    /* We only do non-bonded calculation with group scheme here, the verlet
     * calls are done from do_force_cutsVERLET(). */
    if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
    {
        donb_flags = 0;
        /* Add short-range interactions */
        donb_flags |= GMX_NONBONDED_DO_SR;

        /* Currently all group scheme kernels always calculate (shift-)forces */
        if (flags & GMX_FORCE_FORCES)
        {
            donb_flags |= GMX_NONBONDED_DO_FORCE;
        }
        if (flags & GMX_FORCE_VIRIAL)
        {
            donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
        }
        if (flags & GMX_FORCE_ENERGY)
        {
            donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
        }
        if (flags & GMX_FORCE_DO_LR)
        {
            donb_flags |= GMX_NONBONDED_DO_LR;
        }

        wallcycle_sub_start(wcycle, ewcsNONBONDED);
        do_nonbonded(fr, x, f, f_longrange, md, excl,
                     &enerd->grpp, nrnb,
                     lambda, dvdl_nb, -1, -1, donb_flags);

        /* If we do foreign lambda and we have soft-core interactions
         * we have to recalculate the (non-linear) energies contributions.
         */
        if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
        {
            for (i = 0; i < enerd->n_lambda; i++)
            {
                real lam_i[efptNR];

                for (j = 0; j < efptNR; j++)
                {
                    lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
                }
                reset_foreign_enerdata(enerd);
                do_nonbonded(fr, x, f, f_longrange, md, excl,
                             &(enerd->foreign_grpp), nrnb,
                             lam_i, dvdl_dum, -1, -1,
                             (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
                sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
                enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
            }
        }
        wallcycle_sub_stop(wcycle, ewcsNONBONDED);
        where();
    }

    /* If we are doing GB, calculate bonded forces and apply corrections
     * to the solvation forces */
    /* MRS: Eventually, many need to include free energy contribution here! */
    if (ir->implicit_solvent)
    {
        wallcycle_sub_start(wcycle, ewcsLISTED);
        calc_gb_forces(cr, md, born, top, x, f, fr, idef,
                       ir->gb_algorithm, ir->sa_algorithm, nrnb, &pbc, graph, enerd);
        wallcycle_sub_stop(wcycle, ewcsLISTED);
    }

#ifdef GMX_MPI
    if (TAKETIME)
    {
        t1          = MPI_Wtime();
        fr->t_fnbf += t1-t0;
    }
#endif

    if (fepvals->sc_alpha != 0)
    {
        enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
    }
    else
    {
        enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
    }

    if (fepvals->sc_alpha != 0)

    /* even though coulomb part is linear, we already added it, beacuse we
       need to go through the vdw calculation anyway */
    {
        enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
    }
    else
    {
        enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
    }

    debug_gmx();


    if (debug)
    {
        pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
    }

    /* Shift the coordinates. Must be done before listed forces and PPPM,
     * but is also necessary for SHAKE and update, therefore it can NOT
     * go when no listed forces have to be evaluated.
     *
     * The shifting and PBC code is deliberately not timed, since with
     * the Verlet scheme it only takes non-zero time with triclinic
     * boxes, and even then the time is around a factor of 100 less
     * than the next smallest counter.
     */


    /* Here sometimes we would not need to shift with NBFonly,
     * but we do so anyhow for consistency of the returned coordinates.
     */
    if (graph)
    {
        shift_self(graph, box, x);
        if (TRICLINIC(box))
        {
            inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
        }
        else
        {
            inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
        }
    }
    /* Check whether we need to do listed interactions or correct for exclusions */
    if (fr->bMolPBC &&
        ((flags & GMX_FORCE_LISTED)
         || EEL_RF(fr->eeltype) || EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype)))
    {
        /* TODO There are no electrostatics methods that require this
           transformation, when using the Verlet scheme, so update the
           above conditional. */
        /* Since all atoms are in the rectangular or triclinic unit-cell,
         * only single box vector shifts (2 in x) are required.
         */
        set_pbc_dd(&pbc, fr->ePBC, cr->dd, TRUE, box);
    }
    debug_gmx();

    do_force_listed(wcycle, box, ir->fepvals, cr->ms,
                    idef, (const rvec *) x, hist, f, fr,
                    &pbc, graph, enerd, nrnb, lambda, md, fcd,
                    DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL,
                    flags);

    where();

    *cycles_pme = 0;
    clear_mat(fr->vir_el_recip);
    clear_mat(fr->vir_lj_recip);

    /* Do long-range electrostatics and/or LJ-PME, including related short-range
     * corrections.
     */
    if (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype))
    {
        int  status            = 0;
        real Vlr_q             = 0, Vlr_lj = 0, Vcorr_q = 0, Vcorr_lj = 0;
        real dvdl_long_range_q = 0, dvdl_long_range_lj = 0;

        bSB = (ir->nwall == 2);
        if (bSB)
        {
            copy_mat(box, boxs);
            svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
            box_size[ZZ] *= ir->wall_ewald_zfac;
        }

        if (EEL_PME_EWALD(fr->eeltype) || EVDW_PME(fr->vdwtype))
        {
            real dvdl_long_range_correction_q   = 0;
            real dvdl_long_range_correction_lj  = 0;
            /* With the Verlet scheme exclusion forces are calculated
             * in the non-bonded kernel.
             */
            /* The TPI molecule does not have exclusions with the rest
             * of the system and no intra-molecular PME grid
             * contributions will be calculated in
             * gmx_pme_calc_energy.
             */
            if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
                ir->ewald_geometry != eewg3D ||
                ir->epsilon_surface != 0)
            {
                int nthreads, t;

                wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);

                if (fr->n_tpi > 0)
                {
                    gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
                }

                nthreads = fr->nthread_ewc;
#pragma omp parallel for num_threads(nthreads) schedule(static)
                for (t = 0; t < nthreads; t++)
                {
                    try
                    {
                        tensor *vir_q, *vir_lj;
                        real   *Vcorrt_q, *Vcorrt_lj, *dvdlt_q, *dvdlt_lj;
                        if (t == 0)
                        {
                            vir_q     = &fr->vir_el_recip;
                            vir_lj    = &fr->vir_lj_recip;
                            Vcorrt_q  = &Vcorr_q;
                            Vcorrt_lj = &Vcorr_lj;
                            dvdlt_q   = &dvdl_long_range_correction_q;
                            dvdlt_lj  = &dvdl_long_range_correction_lj;
                        }
                        else
                        {
                            vir_q     = &fr->ewc_t[t].vir_q;
                            vir_lj    = &fr->ewc_t[t].vir_lj;
                            Vcorrt_q  = &fr->ewc_t[t].Vcorr_q;
                            Vcorrt_lj = &fr->ewc_t[t].Vcorr_lj;
                            dvdlt_q   = &fr->ewc_t[t].dvdl[efptCOUL];
                            dvdlt_lj  = &fr->ewc_t[t].dvdl[efptVDW];
                            clear_mat(*vir_q);
                            clear_mat(*vir_lj);
                        }
                        *dvdlt_q  = 0;
                        *dvdlt_lj = 0;

                        /* Threading is only supported with the Verlet cut-off
                         * scheme and then only single particle forces (no
                         * exclusion forces) are calculated, so we can store
                         * the forces in the normal, single fr->f_novirsum array.
                         */
                        ewald_LRcorrection(fr->excl_load[t], fr->excl_load[t+1],
                                           cr, t, fr,
                                           md->chargeA, md->chargeB,
                                           md->sqrt_c6A, md->sqrt_c6B,
                                           md->sigmaA, md->sigmaB,
                                           md->sigma3A, md->sigma3B,
                                           md->nChargePerturbed || md->nTypePerturbed,
                                           ir->cutoff_scheme != ecutsVERLET,
                                           excl, x, bSB ? boxs : box, mu_tot,
                                           ir->ewald_geometry,
                                           ir->epsilon_surface,
                                           fr->f_novirsum, *vir_q, *vir_lj,
                                           Vcorrt_q, Vcorrt_lj,
                                           lambda[efptCOUL], lambda[efptVDW],
                                           dvdlt_q, dvdlt_lj);
                    }
                    GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
                }
                if (nthreads > 1)
                {
                    reduce_thread_energies(fr->vir_el_recip, fr->vir_lj_recip,
                                           &Vcorr_q, &Vcorr_lj,
                                           &dvdl_long_range_correction_q,
                                           &dvdl_long_range_correction_lj,
                                           nthreads, fr->ewc_t);
                }
                wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
            }
コード例 #8
0
ファイル: mdatoms.cpp プロジェクト: tanigawa/gromacs
void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir,
              int nindex, const int *index,
              int homenr,
              t_mdatoms *md)
{
    gmx_bool              bLJPME;
    gmx_mtop_atomlookup_t alook;
    int                   i;
    const t_grpopts      *opts;
    const gmx_groups_t   *groups;
    int                   nthreads gmx_unused;
    const real            oneOverSix = 1.0 / 6.0;

    bLJPME = EVDW_PME(ir->vdwtype);

    opts = &ir->opts;

    groups = &mtop->groups;

    /* Index==NULL indicates no DD (unless we have a DD node with no
     * atoms), so also check for homenr. This should be
     * signaled properly with an extra parameter or nindex==-1.
     */
    if (index == NULL && (homenr > 0))
    {
        md->nr = mtop->natoms;
    }
    else
    {
        md->nr = nindex;
    }

    if (md->nr > md->nalloc)
    {
        md->nalloc = over_alloc_dd(md->nr);

        if (md->nMassPerturbed)
        {
            srenew(md->massA, md->nalloc);
            srenew(md->massB, md->nalloc);
        }
        srenew(md->massT, md->nalloc);
        srenew(md->invmass, md->nalloc);
        srenew(md->chargeA, md->nalloc);
        srenew(md->typeA, md->nalloc);
        if (md->nPerturbed)
        {
            srenew(md->chargeB, md->nalloc);
            srenew(md->typeB, md->nalloc);
        }
        if (bLJPME)
        {
            srenew(md->sqrt_c6A, md->nalloc);
            srenew(md->sigmaA, md->nalloc);
            srenew(md->sigma3A, md->nalloc);
            if (md->nPerturbed)
            {
                srenew(md->sqrt_c6B, md->nalloc);
                srenew(md->sigmaB, md->nalloc);
                srenew(md->sigma3B, md->nalloc);
            }
        }
        srenew(md->ptype, md->nalloc);
        if (opts->ngtc > 1)
        {
            srenew(md->cTC, md->nalloc);
            /* We always copy cTC with domain decomposition */
        }
        srenew(md->cENER, md->nalloc);
        if (opts->ngacc > 1)
        {
            srenew(md->cACC, md->nalloc);
        }
        if (opts->nFreeze &&
            (opts->ngfrz > 1 ||
             opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ]))
        {
            srenew(md->cFREEZE, md->nalloc);
        }
        if (md->bVCMgrps)
        {
            srenew(md->cVCM, md->nalloc);
        }
        if (md->bOrires)
        {
            srenew(md->cORF, md->nalloc);
        }
        if (md->nPerturbed)
        {
            srenew(md->bPerturbed, md->nalloc);
        }

        /* Note that these user t_mdatoms array pointers are NULL
         * when there is only one group present.
         * Therefore, when adding code, the user should use something like:
         * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
         */
        if (mtop->groups.grpnr[egcUser1] != NULL)
        {
            srenew(md->cU1, md->nalloc);
        }
        if (mtop->groups.grpnr[egcUser2] != NULL)
        {
            srenew(md->cU2, md->nalloc);
        }

        if (ir->bQMMM)
        {
            srenew(md->bQM, md->nalloc);
        }
        if (ir->bAdress)
        {
            srenew(md->wf, md->nalloc);
            srenew(md->tf_table_index, md->nalloc);
        }
    }

    alook = gmx_mtop_atomlookup_init(mtop);

    // cppcheck-suppress unreadVariable
    nthreads = gmx_omp_nthreads_get(emntDefault);
#pragma omp parallel for num_threads(nthreads) schedule(static)
    for (i = 0; i < md->nr; i++)
    {
        try
        {
            int      g, ag;
            real     mA, mB, fac;
            real     c6, c12;
            t_atom  *atom;

            if (index == NULL)
            {
                ag = i;
            }
            else
            {
                ag   = index[i];
            }
            gmx_mtop_atomnr_to_atom(alook, ag, &atom);

            if (md->cFREEZE)
            {
                md->cFREEZE[i] = ggrpnr(groups, egcFREEZE, ag);
            }
            if (EI_ENERGY_MINIMIZATION(ir->eI))
            {
                /* Displacement is proportional to F, masses used for constraints */
                mA = 1.0;
                mB = 1.0;
            }
            else if (ir->eI == eiBD)
            {
                /* With BD the physical masses are irrelevant.
                 * To keep the code simple we use most of the normal MD code path
                 * for BD. Thus for constraining the masses should be proportional
                 * to the friction coefficient. We set the absolute value such that
                 * m/2<(dx/dt)^2> = m/2*2kT/fric*dt = kT/2 => m=fric*dt/2
                 * Then if we set the (meaningless) velocity to v=dx/dt, we get the
                 * correct kinetic energy and temperature using the usual code path.
                 * Thus with BD v*dt will give the displacement and the reported
                 * temperature can signal bad integration (too large time step).
                 */
                if (ir->bd_fric > 0)
                {
                    mA = 0.5*ir->bd_fric*ir->delta_t;
                    mB = 0.5*ir->bd_fric*ir->delta_t;
                }
                else
                {
                    /* The friction coefficient is mass/tau_t */
                    fac = ir->delta_t/opts->tau_t[md->cTC ? groups->grpnr[egcTC][ag] : 0];
                    mA  = 0.5*atom->m*fac;
                    mB  = 0.5*atom->mB*fac;
                }
            }
            else
            {
                mA = atom->m;
                mB = atom->mB;
            }
            if (md->nMassPerturbed)
            {
                md->massA[i]  = mA;
                md->massB[i]  = mB;
            }
            md->massT[i]    = mA;
            if (mA == 0.0)
            {
                md->invmass[i]    = 0;
            }
            else if (md->cFREEZE)
            {
                g = md->cFREEZE[i];
                if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ])
                {
                    /* Set the mass of completely frozen particles to ALMOST_ZERO iso 0
                     * to avoid div by zero in lincs or shake.
                     * Note that constraints can still move a partially frozen particle.
                     */
                    md->invmass[i]  = ALMOST_ZERO;
                }
                else
                {
                    md->invmass[i]  = 1.0/mA;
                }
            }
            else
            {
                md->invmass[i]    = 1.0/mA;
            }
            md->chargeA[i]      = atom->q;
            md->typeA[i]        = atom->type;
            if (bLJPME)
            {
                c6                = mtop->ffparams.iparams[atom->type*(mtop->ffparams.atnr+1)].lj.c6;
                c12               = mtop->ffparams.iparams[atom->type*(mtop->ffparams.atnr+1)].lj.c12;
                md->sqrt_c6A[i]   = sqrt(c6);
                if (c6 == 0.0 || c12 == 0)
                {
                    md->sigmaA[i] = 1.0;
                }
                else
                {
                    md->sigmaA[i] = pow(c12/c6, oneOverSix);
                }
                md->sigma3A[i]    = 1/(md->sigmaA[i]*md->sigmaA[i]*md->sigmaA[i]);
            }
            if (md->nPerturbed)
            {
                md->bPerturbed[i] = PERTURBED(*atom);
                md->chargeB[i]    = atom->qB;
                md->typeB[i]      = atom->typeB;
                if (bLJPME)
                {
                    c6                = mtop->ffparams.iparams[atom->typeB*(mtop->ffparams.atnr+1)].lj.c6;
                    c12               = mtop->ffparams.iparams[atom->typeB*(mtop->ffparams.atnr+1)].lj.c12;
                    md->sqrt_c6B[i]   = sqrt(c6);
                    if (c6 == 0.0 || c12 == 0)
                    {
                        md->sigmaB[i] = 1.0;
                    }
                    else
                    {
                        md->sigmaB[i] = pow(c12/c6, oneOverSix);
                    }
                    md->sigma3B[i]    = 1/(md->sigmaB[i]*md->sigmaB[i]*md->sigmaB[i]);
                }
            }
            md->ptype[i]    = atom->ptype;
            if (md->cTC)
            {
                md->cTC[i]    = groups->grpnr[egcTC][ag];
            }
            md->cENER[i]    =
                (groups->grpnr[egcENER] ? groups->grpnr[egcENER][ag] : 0);
            if (md->cACC)
            {
                md->cACC[i]   = groups->grpnr[egcACC][ag];
            }
            if (md->cVCM)
            {
                md->cVCM[i]       = groups->grpnr[egcVCM][ag];
            }
            if (md->cORF)
            {
                md->cORF[i]       = groups->grpnr[egcORFIT][ag];
            }

            if (md->cU1)
            {
                md->cU1[i]        = groups->grpnr[egcUser1][ag];
            }
            if (md->cU2)
            {
                md->cU2[i]        = groups->grpnr[egcUser2][ag];
            }

            if (ir->bQMMM)
            {
                if (groups->grpnr[egcQMMM] == 0 ||
                    groups->grpnr[egcQMMM][ag] < groups->grps[egcQMMM].nr-1)
                {
                    md->bQM[i]      = TRUE;
                }
                else
                {
                    md->bQM[i]      = FALSE;
                }
            }
            /* Initialize AdResS weighting functions to adressw */
            if (ir->bAdress)
            {
                md->wf[i]           = 1.0;
                /* if no tf table groups specified, use default table */
                md->tf_table_index[i] = DEFAULT_TF_TABLE;
                if (ir->adress->n_tf_grps > 0)
                {
                    /* if tf table groups specified, tf is only applied to thoose energy groups*/
                    md->tf_table_index[i] = NO_TF_TABLE;
                    /* check wether atom is in one of the relevant energy groups and assign a table index */
                    for (g = 0; g < ir->adress->n_tf_grps; g++)
                    {
                        if (md->cENER[i] == ir->adress->tf_table_index[g])
                        {
                            md->tf_table_index[i] = g;
                        }
                    }
                }
            }
        }
        GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
    }

    gmx_mtop_atomlookup_destroy(alook);

    md->homenr = homenr;
    md->lambda = 0;
}
コード例 #9
0
/* There's nothing special to do here if just masses are perturbed,
 * but if either charge or type is perturbed then the implementation
 * requires that B states are defined for both charge and type, and
 * does not optimize for the cases where only one changes.
 *
 * The parameter vectors for B states are left undefined in atoms2md()
 * when either FEP is inactive, or when there are no mass/charge/type
 * perturbations. The parameter vectors for LJ-PME are likewise
 * undefined when LJ-PME is not active. This works because
 * bHaveChargeOrTypePerturbed handles the control flow. */
void ewald_LRcorrection(int numAtomsLocal,
                        t_commrec *cr,
                        int numThreads, int thread,
                        t_forcerec *fr,
                        real *chargeA, real *chargeB,
                        real *C6A, real *C6B,
                        real *sigmaA, real *sigmaB,
                        real *sigma3A, real *sigma3B,
                        gmx_bool bHaveChargeOrTypePerturbed,
                        gmx_bool calc_excl_corr,
                        t_blocka *excl, rvec x[],
                        matrix box, rvec mu_tot[],
                        int ewald_geometry, real epsilon_surface,
                        rvec *f, tensor vir_q, tensor vir_lj,
                        real *Vcorr_q, real *Vcorr_lj,
                        real lambda_q, real lambda_lj,
                        real *dvdlambda_q, real *dvdlambda_lj)
{
    int numAtomsToBeCorrected;
    if (calc_excl_corr)
    {
        /* We need to correct all exclusion pairs (cutoff-scheme = group) */
        numAtomsToBeCorrected = excl->nr;

        GMX_RELEASE_ASSERT(numAtomsToBeCorrected >= numAtomsLocal, "We might need to do self-corrections");
    }
    else
    {
        /* We need to correct only self interactions */
        numAtomsToBeCorrected = numAtomsLocal;
    }
    int         start =  (numAtomsToBeCorrected* thread     )/numThreads;
    int         end   =  (numAtomsToBeCorrected*(thread + 1))/numThreads;

    int         i, i1, i2, j, k, m, iv, jv, q;
    int        *AA;
    double      Vexcl_q, dvdl_excl_q, dvdl_excl_lj; /* Necessary for precision */
    double      Vexcl_lj;
    real        one_4pi_eps;
    real        v, vc, qiA, qiB, dr2, rinv;
    real        Vself_q[2], Vself_lj[2], Vdipole[2], rinv2, ewc_q = fr->ewaldcoeff_q, ewcdr;
    real        ewc_lj = fr->ewaldcoeff_lj, ewc_lj2 = ewc_lj * ewc_lj;
    real        c6Ai   = 0, c6Bi = 0, c6A = 0, c6B = 0, ewcdr2, ewcdr4, c6L = 0, rinv6;
    rvec        df, dx, mutot[2], dipcorrA, dipcorrB;
    tensor      dxdf_q = {{0}}, dxdf_lj = {{0}};
    real        vol    = box[XX][XX]*box[YY][YY]*box[ZZ][ZZ];
    real        L1_q, L1_lj, dipole_coeff, qqA, qqB, qqL, vr0_q, vr0_lj = 0;
    gmx_bool    bMolPBC      = fr->bMolPBC;
    gmx_bool    bDoingLBRule = (fr->ljpme_combination_rule == eljpmeLB);
    gmx_bool    bNeedLongRangeCorrection;

    /* This routine can be made faster by using tables instead of analytical interactions
     * However, that requires a thorough verification that they are correct in all cases.
     */

    one_4pi_eps   = ONE_4PI_EPS0/fr->epsilon_r;
    vr0_q         = ewc_q*M_2_SQRTPI;
    if (EVDW_PME(fr->vdwtype))
    {
        vr0_lj    = -gmx::power6(ewc_lj)/6.0;
    }

    AA           = excl->a;
    Vexcl_q      = 0;
    Vexcl_lj     = 0;
    dvdl_excl_q  = 0;
    dvdl_excl_lj = 0;
    Vdipole[0]   = 0;
    Vdipole[1]   = 0;
    L1_q         = 1.0-lambda_q;
    L1_lj        = 1.0-lambda_lj;
    /* Note that we have to transform back to gromacs units, since
     * mu_tot contains the dipole in debye units (for output).
     */
    for (i = 0; (i < DIM); i++)
    {
        mutot[0][i] = mu_tot[0][i]*DEBYE2ENM;
        mutot[1][i] = mu_tot[1][i]*DEBYE2ENM;
        dipcorrA[i] = 0;
        dipcorrB[i] = 0;
    }
    dipole_coeff = 0;
    switch (ewald_geometry)
    {
        case eewg3D:
            if (epsilon_surface != 0)
            {
                dipole_coeff =
                    2*M_PI*ONE_4PI_EPS0/((2*epsilon_surface + fr->epsilon_r)*vol);
                for (i = 0; (i < DIM); i++)
                {
                    dipcorrA[i] = 2*dipole_coeff*mutot[0][i];
                    dipcorrB[i] = 2*dipole_coeff*mutot[1][i];
                }
            }
            break;
        case eewg3DC:
            dipole_coeff = 2*M_PI*one_4pi_eps/vol;
            dipcorrA[ZZ] = 2*dipole_coeff*mutot[0][ZZ];
            dipcorrB[ZZ] = 2*dipole_coeff*mutot[1][ZZ];
            break;
        default:
            gmx_incons("Unsupported Ewald geometry");
            break;
    }
    if (debug)
    {
        fprintf(debug, "dipcorr = %8.3f  %8.3f  %8.3f\n",
                dipcorrA[XX], dipcorrA[YY], dipcorrA[ZZ]);
        fprintf(debug, "mutot   = %8.3f  %8.3f  %8.3f\n",
                mutot[0][XX], mutot[0][YY], mutot[0][ZZ]);
    }
    bNeedLongRangeCorrection = (calc_excl_corr || dipole_coeff != 0);
    if (bNeedLongRangeCorrection && !bHaveChargeOrTypePerturbed)
    {
        for (i = start; (i < end); i++)
        {
            /* Initiate local variables (for this i-particle) to 0 */
            qiA = chargeA[i]*one_4pi_eps;
            if (EVDW_PME(fr->vdwtype))
            {
                c6Ai = C6A[i];
                if (bDoingLBRule)
                {
                    c6Ai *= sigma3A[i];
                }
            }
            if (calc_excl_corr)
            {
                i1  = excl->index[i];
                i2  = excl->index[i+1];

                /* Loop over excluded neighbours */
                for (j = i1; (j < i2); j++)
                {
                    k = AA[j];
                    /*
                     * First we must test whether k <> i, and then,
                     * because the exclusions are all listed twice i->k
                     * and k->i we must select just one of the two.  As
                     * a minor optimization we only compute forces when
                     * the charges are non-zero.
                     */
                    if (k > i)
                    {
                        qqA = qiA*chargeA[k];
                        if (EVDW_PME(fr->vdwtype))
                        {
                            c6A  = c6Ai * C6A[k];
                            if (bDoingLBRule)
                            {
                                c6A *= gmx::power6(0.5*(sigmaA[i]+sigmaA[k]))*sigma3A[k];
                            }
                        }
                        if (qqA != 0.0 || c6A != 0.0)
                        {
                            rvec_sub(x[i], x[k], dx);
                            if (bMolPBC)
                            {
                                /* Cheap pbc_dx, assume excluded pairs are at short distance. */
                                for (m = DIM-1; (m >= 0); m--)
                                {
                                    if (dx[m] > 0.5*box[m][m])
                                    {
                                        rvec_dec(dx, box[m]);
                                    }
                                    else if (dx[m] < -0.5*box[m][m])
                                    {
                                        rvec_inc(dx, box[m]);
                                    }
                                }
                            }
                            dr2 = norm2(dx);
                            /* Distance between two excluded particles
                             * may be zero in the case of shells
                             */
                            if (dr2 != 0)
                            {
                                rinv              = gmx::invsqrt(dr2);
                                rinv2             = rinv*rinv;
                                if (qqA != 0.0)
                                {
                                    real dr, fscal;

                                    dr       = 1.0/rinv;
                                    ewcdr    = ewc_q*dr;
                                    vc       = qqA*std::erf(ewcdr)*rinv;
                                    Vexcl_q += vc;
#if GMX_DOUBLE
                                    /* Relative accuracy at R_ERF_R_INACC of 3e-10 */
#define       R_ERF_R_INACC 0.006
#else
                                    /* Relative accuracy at R_ERF_R_INACC of 2e-5 */
#define       R_ERF_R_INACC 0.1
#endif
                                    /* fscal is the scalar force pre-multiplied by rinv,
                                     * to normalise the relative position vector dx */
                                    if (ewcdr > R_ERF_R_INACC)
                                    {
                                        fscal = rinv2*(vc - qqA*ewc_q*M_2_SQRTPI*exp(-ewcdr*ewcdr));
                                    }
                                    else
                                    {
                                        /* Use a fourth order series expansion for small ewcdr */
                                        fscal = ewc_q*ewc_q*qqA*vr0_q*(2.0/3.0 - 0.4*ewcdr*ewcdr);
                                    }

                                    /* The force vector is obtained by multiplication with
                                     * the relative position vector
                                     */
                                    svmul(fscal, dx, df);
                                    rvec_inc(f[k], df);
                                    rvec_dec(f[i], df);
                                    for (iv = 0; (iv < DIM); iv++)
                                    {
                                        for (jv = 0; (jv < DIM); jv++)
                                        {
                                            dxdf_q[iv][jv] += dx[iv]*df[jv];
                                        }
                                    }
                                }

                                if (c6A != 0.0)
                                {
                                    real fscal;

                                    rinv6     = rinv2*rinv2*rinv2;
                                    ewcdr2    = ewc_lj2*dr2;
                                    ewcdr4    = ewcdr2*ewcdr2;
                                    /* We get the excluded long-range contribution from -C6*(1-g(r))
                                     * g(r) is also defined in the manual under LJ-PME
                                     */
                                    vc        = -c6A*rinv6*(1.0 - exp(-ewcdr2)*(1 + ewcdr2 + 0.5*ewcdr4));
                                    Vexcl_lj += vc;
                                    /* The force is the derivative of the potential vc.
                                     * fscal is the scalar force pre-multiplied by rinv,
                                     * to normalise the relative position vector dx */
                                    fscal     = 6.0*vc*rinv2 + c6A*rinv6*exp(-ewcdr2)*ewc_lj2*ewcdr4;

                                    /* The force vector is obtained by multiplication with
                                     * the relative position vector
                                     */
                                    svmul(fscal, dx, df);
                                    rvec_inc(f[k], df);
                                    rvec_dec(f[i], df);
                                    for (iv = 0; (iv < DIM); iv++)
                                    {
                                        for (jv = 0; (jv < DIM); jv++)
                                        {
                                            dxdf_lj[iv][jv] += dx[iv]*df[jv];
                                        }
                                    }
                                }
                            }
                            else
                            {
                                Vexcl_q  += qqA*vr0_q;
                                Vexcl_lj += c6A*vr0_lj;
                            }
                        }
                    }
                }
            }
            /* Dipole correction on force */
            if (dipole_coeff != 0 && i < numAtomsLocal)
            {
                for (j = 0; (j < DIM); j++)
                {
                    f[i][j] -= dipcorrA[j]*chargeA[i];
                }
            }
        }
    }
    else if (bNeedLongRangeCorrection)
    {
        for (i = start; (i < end); i++)
        {
            /* Initiate local variables (for this i-particle) to 0 */
            qiA = chargeA[i]*one_4pi_eps;
            qiB = chargeB[i]*one_4pi_eps;
            if (EVDW_PME(fr->vdwtype))
            {
                c6Ai = C6A[i];
                c6Bi = C6B[i];
                if (bDoingLBRule)
                {
                    c6Ai *= sigma3A[i];
                    c6Bi *= sigma3B[i];
                }
            }
            if (calc_excl_corr)
            {
                i1  = excl->index[i];
                i2  = excl->index[i+1];

                /* Loop over excluded neighbours */
                for (j = i1; (j < i2); j++)
                {
                    k = AA[j];
                    if (k > i)
                    {
                        qqA = qiA*chargeA[k];
                        qqB = qiB*chargeB[k];
                        if (EVDW_PME(fr->vdwtype))
                        {
                            c6A = c6Ai*C6A[k];
                            c6B = c6Bi*C6B[k];
                            if (bDoingLBRule)
                            {
                                c6A *= gmx::power6(0.5*(sigmaA[i]+sigmaA[k]))*sigma3A[k];
                                c6B *= gmx::power6(0.5*(sigmaB[i]+sigmaB[k]))*sigma3B[k];
                            }
                        }
                        if (qqA != 0.0 || qqB != 0.0 || c6A != 0.0 || c6B != 0.0)
                        {
                            real fscal;

                            qqL   = L1_q*qqA + lambda_q*qqB;
                            if (EVDW_PME(fr->vdwtype))
                            {
                                c6L = L1_lj*c6A + lambda_lj*c6B;
                            }
                            rvec_sub(x[i], x[k], dx);
                            if (bMolPBC)
                            {
                                /* Cheap pbc_dx, assume excluded pairs are at short distance. */
                                for (m = DIM-1; (m >= 0); m--)
                                {
                                    if (dx[m] > 0.5*box[m][m])
                                    {
                                        rvec_dec(dx, box[m]);
                                    }
                                    else if (dx[m] < -0.5*box[m][m])
                                    {
                                        rvec_inc(dx, box[m]);
                                    }
                                }
                            }
                            dr2 = norm2(dx);
                            if (dr2 != 0)
                            {
                                rinv    = gmx::invsqrt(dr2);
                                rinv2   = rinv*rinv;
                                if (qqA != 0.0 || qqB != 0.0)
                                {
                                    real dr;

                                    dr           = 1.0/rinv;
                                    v            = std::erf(ewc_q*dr)*rinv;
                                    vc           = qqL*v;
                                    Vexcl_q     += vc;
                                    /* fscal is the scalar force pre-multiplied by rinv,
                                     * to normalise the relative position vector dx */
                                    fscal        = rinv2*(vc-qqL*ewc_q*M_2_SQRTPI*exp(-ewc_q*ewc_q*dr2));
                                    dvdl_excl_q += (qqB - qqA)*v;

                                    /* The force vector is obtained by multiplication with
                                     * the relative position vector
                                     */
                                    svmul(fscal, dx, df);
                                    rvec_inc(f[k], df);
                                    rvec_dec(f[i], df);
                                    for (iv = 0; (iv < DIM); iv++)
                                    {
                                        for (jv = 0; (jv < DIM); jv++)
                                        {
                                            dxdf_q[iv][jv] += dx[iv]*df[jv];
                                        }
                                    }
                                }

                                if ((c6A != 0.0 || c6B != 0.0) && EVDW_PME(fr->vdwtype))
                                {
                                    rinv6         = rinv2*rinv2*rinv2;
                                    ewcdr2        = ewc_lj2*dr2;
                                    ewcdr4        = ewcdr2*ewcdr2;
                                    v             = -rinv6*(1.0 - exp(-ewcdr2)*(1 + ewcdr2 + 0.5*ewcdr4));
                                    vc            = c6L*v;
                                    Vexcl_lj     += vc;
                                    /* fscal is the scalar force pre-multiplied by rinv,
                                     * to normalise the relative position vector dx */
                                    fscal         = 6.0*vc*rinv2 + c6L*rinv6*exp(-ewcdr2)*ewc_lj2*ewcdr4;
                                    dvdl_excl_lj += (c6B - c6A)*v;

                                    /* The force vector is obtained by multiplication with
                                     * the relative position vector
                                     */
                                    svmul(fscal, dx, df);
                                    rvec_inc(f[k], df);
                                    rvec_dec(f[i], df);
                                    for (iv = 0; (iv < DIM); iv++)
                                    {
                                        for (jv = 0; (jv < DIM); jv++)
                                        {
                                            dxdf_lj[iv][jv] += dx[iv]*df[jv];
                                        }
                                    }
                                }
                            }
                            else
                            {
                                Vexcl_q      += qqL*vr0_q;
                                dvdl_excl_q  += (qqB - qqA)*vr0_q;
                                Vexcl_lj     += c6L*vr0_lj;
                                dvdl_excl_lj += (c6B - c6A)*vr0_lj;
                            }
                        }
                    }
                }
            }
            /* Dipole correction on force */
            if (dipole_coeff != 0 && i < numAtomsLocal)
            {
                for (j = 0; (j < DIM); j++)
                {
                    f[i][j] -= L1_q*dipcorrA[j]*chargeA[i]
                        + lambda_q*dipcorrB[j]*chargeB[i];
                }
            }
        }
    }
    for (iv = 0; (iv < DIM); iv++)
    {
        for (jv = 0; (jv < DIM); jv++)
        {
            vir_q[iv][jv]  += 0.5*dxdf_q[iv][jv];
            vir_lj[iv][jv] += 0.5*dxdf_lj[iv][jv];
        }
    }

    Vself_q[0]  = 0;
    Vself_q[1]  = 0;
    Vself_lj[0] = 0;
    Vself_lj[1] = 0;

    /* Global corrections only on master process */
    if (MASTER(cr) && thread == 0)
    {
        for (q = 0; q < (bHaveChargeOrTypePerturbed ? 2 : 1); q++)
        {
            if (calc_excl_corr)
            {
                /* Self-energy correction */
                Vself_q[q] = ewc_q*one_4pi_eps*fr->q2sum[q]*M_1_SQRTPI;
                if (EVDW_PME(fr->vdwtype))
                {
                    Vself_lj[q] =  fr->c6sum[q]*0.5*vr0_lj;
                }
            }

            /* Apply surface dipole correction:
             * correction = dipole_coeff * (dipole)^2
             */
            if (dipole_coeff != 0)
            {
                if (ewald_geometry == eewg3D)
                {
                    Vdipole[q] = dipole_coeff*iprod(mutot[q], mutot[q]);
                }
                else if (ewald_geometry == eewg3DC)
                {
                    Vdipole[q] = dipole_coeff*mutot[q][ZZ]*mutot[q][ZZ];
                }
            }
        }
    }
    if (!bHaveChargeOrTypePerturbed)
    {
        *Vcorr_q = Vdipole[0] - Vself_q[0] - Vexcl_q;
        if (EVDW_PME(fr->vdwtype))
        {
            *Vcorr_lj = -Vself_lj[0] - Vexcl_lj;
        }
    }
    else
    {
        *Vcorr_q = L1_q*(Vdipole[0] - Vself_q[0])
            + lambda_q*(Vdipole[1] - Vself_q[1])
            - Vexcl_q;
        *dvdlambda_q += Vdipole[1] - Vself_q[1]
            - (Vdipole[0] - Vself_q[0]) - dvdl_excl_q;
        if (EVDW_PME(fr->vdwtype))
        {
            *Vcorr_lj      = -(L1_lj*Vself_lj[0] + lambda_lj*Vself_lj[1]) - Vexcl_lj;
            *dvdlambda_lj += -Vself_lj[1] + Vself_lj[0] - dvdl_excl_lj;
        }
    }

    if (debug)
    {
        fprintf(debug, "Long Range corrections for Ewald interactions:\n");
        fprintf(debug, "q2sum = %g, Vself_q=%g c6sum = %g, Vself_lj=%g\n",
                L1_q*fr->q2sum[0]+lambda_q*fr->q2sum[1], L1_q*Vself_q[0]+lambda_q*Vself_q[1], L1_lj*fr->c6sum[0]+lambda_lj*fr->c6sum[1], L1_lj*Vself_lj[0]+lambda_lj*Vself_lj[1]);
        fprintf(debug, "Electrostatic Long Range correction: Vexcl=%g\n", Vexcl_q);
        fprintf(debug, "Lennard-Jones Long Range correction: Vexcl=%g\n", Vexcl_lj);
        if (MASTER(cr) && thread == 0)
        {
            if (epsilon_surface > 0 || ewald_geometry == eewg3DC)
            {
                fprintf(debug, "Total dipole correction: Vdipole=%g\n",
                        L1_q*Vdipole[0]+lambda_q*Vdipole[1]);
            }
        }
    }
}
コード例 #10
0
ファイル: force.cpp プロジェクト: FoldingAtHome/gromacs
void do_force_lowlevel(t_forcerec *fr,      t_inputrec *ir,
                       t_idef     *idef,    t_commrec  *cr,
                       t_nrnb     *nrnb,    gmx_wallcycle_t wcycle,
                       t_mdatoms  *md,
                       rvec       x[],      history_t  *hist,
                       rvec       f[],
                       rvec       f_longrange[],
                       gmx_enerdata_t *enerd,
                       t_fcdata   *fcd,
                       gmx_localtop_t *top,
                       gmx_genborn_t *born,
                       gmx_bool       bBornRadii,
                       matrix     box,
                       t_lambda   *fepvals,
                       real       *lambda,
                       t_graph    *graph,
                       t_blocka   *excl,
                       rvec       mu_tot[],
                       int        flags,
                       float      *cycles_pme)
{
    int         i, j;
    int         donb_flags;
    gmx_bool    bSB;
    int         pme_flags;
    matrix      boxs;
    rvec        box_size;
    t_pbc       pbc;
    real        dvdl_dum[efptNR], dvdl_nb[efptNR];

#ifdef GMX_MPI
    double  t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
#endif

    set_pbc(&pbc, fr->ePBC, box);

    /* reset free energy components */
    for (i = 0; i < efptNR; i++)
    {
        dvdl_nb[i]  = 0;
        dvdl_dum[i] = 0;
    }

    /* Reset box */
    for (i = 0; (i < DIM); i++)
    {
        box_size[i] = box[i][i];
    }

    debug_gmx();

    /* do QMMM first if requested */
    if (fr->bQMMM)
    {
        enerd->term[F_EQM] = calculate_QMMM(cr, x, f, fr);
    }

    /* Call the short range functions all in one go. */

#ifdef GMX_MPI
    /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
#define TAKETIME FALSE
    if (TAKETIME)
    {
        MPI_Barrier(cr->mpi_comm_mygroup);
        t0 = MPI_Wtime();
    }
#endif

    if (ir->nwall)
    {
        /* foreign lambda component for walls */
        real dvdl_walls = do_walls(ir, fr, box, md, x, f, lambda[efptVDW],
                                   enerd->grpp.ener[egLJSR], nrnb);
        enerd->dvdl_lin[efptVDW] += dvdl_walls;
    }

    /* If doing GB, reset dvda and calculate the Born radii */
    if (ir->implicit_solvent)
    {
        wallcycle_sub_start(wcycle, ewcsNONBONDED);

        for (i = 0; i < born->nr; i++)
        {
            fr->dvda[i] = 0;
        }

        if (bBornRadii)
        {
            calc_gb_rad(cr, fr, ir, top, x, &(fr->gblist), born, md, nrnb);
        }

        wallcycle_sub_stop(wcycle, ewcsNONBONDED);
    }

    where();
    /* We only do non-bonded calculation with group scheme here, the verlet
     * calls are done from do_force_cutsVERLET(). */
    if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
    {
        donb_flags = 0;
        /* Add short-range interactions */
        donb_flags |= GMX_NONBONDED_DO_SR;

        /* Currently all group scheme kernels always calculate (shift-)forces */
        if (flags & GMX_FORCE_FORCES)
        {
            donb_flags |= GMX_NONBONDED_DO_FORCE;
        }
        if (flags & GMX_FORCE_VIRIAL)
        {
            donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
        }
        if (flags & GMX_FORCE_ENERGY)
        {
            donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
        }
        if (flags & GMX_FORCE_DO_LR)
        {
            donb_flags |= GMX_NONBONDED_DO_LR;
        }

        wallcycle_sub_start(wcycle, ewcsNONBONDED);
        do_nonbonded(fr, x, f, f_longrange, md, excl,
                     &enerd->grpp, nrnb,
                     lambda, dvdl_nb, -1, -1, donb_flags);

        /* If we do foreign lambda and we have soft-core interactions
         * we have to recalculate the (non-linear) energies contributions.
         */
        if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
        {
            for (i = 0; i < enerd->n_lambda; i++)
            {
                real lam_i[efptNR];

                for (j = 0; j < efptNR; j++)
                {
                    lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
                }
                reset_foreign_enerdata(enerd);
                do_nonbonded(fr, x, f, f_longrange, md, excl,
                             &(enerd->foreign_grpp), nrnb,
                             lam_i, dvdl_dum, -1, -1,
                             (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
                sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
                enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
            }
        }
        wallcycle_sub_stop(wcycle, ewcsNONBONDED);
        where();
    }

    /* If we are doing GB, calculate bonded forces and apply corrections
     * to the solvation forces */
    /* MRS: Eventually, many need to include free energy contribution here! */
    if (ir->implicit_solvent)
    {
        wallcycle_sub_start(wcycle, ewcsLISTED);
        calc_gb_forces(cr, md, born, top, x, f, fr, idef,
                       ir->gb_algorithm, ir->sa_algorithm, nrnb, &pbc, graph, enerd);
        wallcycle_sub_stop(wcycle, ewcsLISTED);
    }

#ifdef GMX_MPI
    if (TAKETIME)
    {
        t1          = MPI_Wtime();
        fr->t_fnbf += t1-t0;
    }
#endif

    if (fepvals->sc_alpha != 0)
    {
        enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
    }
    else
    {
        enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
    }

    if (fepvals->sc_alpha != 0)

    /* even though coulomb part is linear, we already added it, beacuse we
       need to go through the vdw calculation anyway */
    {
        enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
    }
    else
    {
        enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
    }

    debug_gmx();


    if (debug)
    {
        pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
    }

    /* Shift the coordinates. Must be done before listed forces and PPPM,
     * but is also necessary for SHAKE and update, therefore it can NOT
     * go when no listed forces have to be evaluated.
     *
     * The shifting and PBC code is deliberately not timed, since with
     * the Verlet scheme it only takes non-zero time with triclinic
     * boxes, and even then the time is around a factor of 100 less
     * than the next smallest counter.
     */


    /* Here sometimes we would not need to shift with NBFonly,
     * but we do so anyhow for consistency of the returned coordinates.
     */
    if (graph)
    {
        shift_self(graph, box, x);
        if (TRICLINIC(box))
        {
            inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
        }
        else
        {
            inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
        }
    }
    /* Check whether we need to do listed interactions or correct for exclusions */
    if (fr->bMolPBC &&
        ((flags & GMX_FORCE_LISTED)
         || EEL_RF(fr->eeltype) || EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype)))
    {
        /* TODO There are no electrostatics methods that require this
           transformation, when using the Verlet scheme, so update the
           above conditional. */
        /* Since all atoms are in the rectangular or triclinic unit-cell,
         * only single box vector shifts (2 in x) are required.
         */
        set_pbc_dd(&pbc, fr->ePBC, cr->dd, TRUE, box);
    }
    debug_gmx();

    do_force_listed(wcycle, box, ir->fepvals, cr->ms,
                    idef, (const rvec *) x, hist, f, fr,
                    &pbc, graph, enerd, nrnb, lambda, md, fcd,
                    DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL,
                    flags);

    where();

    *cycles_pme = 0;
    clear_mat(fr->vir_el_recip);
    clear_mat(fr->vir_lj_recip);

    /* Do long-range electrostatics and/or LJ-PME, including related short-range
     * corrections.
     */
    if (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype))
    {
        int  status            = 0;
        real Vlr_q             = 0, Vlr_lj = 0, Vcorr_q = 0, Vcorr_lj = 0;
        real dvdl_long_range_q = 0, dvdl_long_range_lj = 0;

        bSB = (ir->nwall == 2);
        if (bSB)
        {
            copy_mat(box, boxs);
            svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
            box_size[ZZ] *= ir->wall_ewald_zfac;
        }

        if (EEL_PME_EWALD(fr->eeltype) || EVDW_PME(fr->vdwtype))
        {
            real dvdl_long_range_correction_q   = 0;
            real dvdl_long_range_correction_lj  = 0;
            /* With the Verlet scheme exclusion forces are calculated
             * in the non-bonded kernel.
             */
            /* The TPI molecule does not have exclusions with the rest
             * of the system and no intra-molecular PME grid
             * contributions will be calculated in
             * gmx_pme_calc_energy.
             */
            if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
                ir->ewald_geometry != eewg3D ||
                ir->epsilon_surface != 0)
            {
                int nthreads, t;

                wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);

                if (fr->n_tpi > 0)
                {
                    gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
                }

                nthreads = gmx_omp_nthreads_get(emntBonded);
#pragma omp parallel for num_threads(nthreads) schedule(static)
                for (t = 0; t < nthreads; t++)
                {
                    int     i;
                    rvec   *fnv;
                    tensor *vir_q, *vir_lj;
                    real   *Vcorrt_q, *Vcorrt_lj, *dvdlt_q, *dvdlt_lj;
                    if (t == 0)
                    {
                        fnv       = fr->f_novirsum;
                        vir_q     = &fr->vir_el_recip;
                        vir_lj    = &fr->vir_lj_recip;
                        Vcorrt_q  = &Vcorr_q;
                        Vcorrt_lj = &Vcorr_lj;
                        dvdlt_q   = &dvdl_long_range_correction_q;
                        dvdlt_lj  = &dvdl_long_range_correction_lj;
                    }
                    else
                    {
                        fnv       = fr->f_t[t].f;
                        vir_q     = &fr->f_t[t].vir_q;
                        vir_lj    = &fr->f_t[t].vir_lj;
                        Vcorrt_q  = &fr->f_t[t].Vcorr_q;
                        Vcorrt_lj = &fr->f_t[t].Vcorr_lj;
                        dvdlt_q   = &fr->f_t[t].dvdl[efptCOUL];
                        dvdlt_lj  = &fr->f_t[t].dvdl[efptVDW];
                        for (i = 0; i < fr->natoms_force; i++)
                        {
                            clear_rvec(fnv[i]);
                        }
                        clear_mat(*vir_q);
                        clear_mat(*vir_lj);
                    }
                    *dvdlt_q  = 0;
                    *dvdlt_lj = 0;

                    ewald_LRcorrection(fr->excl_load[t], fr->excl_load[t+1],
                                       cr, t, fr,
                                       md->chargeA, md->chargeB,
                                       md->sqrt_c6A, md->sqrt_c6B,
                                       md->sigmaA, md->sigmaB,
                                       md->sigma3A, md->sigma3B,
                                       md->nChargePerturbed || md->nTypePerturbed,
                                       ir->cutoff_scheme != ecutsVERLET,
                                       excl, x, bSB ? boxs : box, mu_tot,
                                       ir->ewald_geometry,
                                       ir->epsilon_surface,
                                       fnv, *vir_q, *vir_lj,
                                       Vcorrt_q, Vcorrt_lj,
                                       lambda[efptCOUL], lambda[efptVDW],
                                       dvdlt_q, dvdlt_lj);
                }
                if (nthreads > 1)
                {
                    reduce_thread_forces(fr->natoms_force, fr->f_novirsum,
                                         fr->vir_el_recip, fr->vir_lj_recip,
                                         &Vcorr_q, &Vcorr_lj,
                                         &dvdl_long_range_correction_q,
                                         &dvdl_long_range_correction_lj,
                                         nthreads, fr->f_t);
                }
                wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
            }

            if (EEL_PME_EWALD(fr->eeltype) && fr->n_tpi == 0)
            {
                /* This is not in a subcounter because it takes a
                   negligible and constant-sized amount of time */
                Vcorr_q += ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
                                                   &dvdl_long_range_correction_q,
                                                   fr->vir_el_recip);
            }

            enerd->dvdl_lin[efptCOUL] += dvdl_long_range_correction_q;
            enerd->dvdl_lin[efptVDW]  += dvdl_long_range_correction_lj;

            if ((EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)) && (cr->duty & DUTY_PME))
            {
                /* Do reciprocal PME for Coulomb and/or LJ. */
                assert(fr->n_tpi >= 0);
                if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
                {
                    pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
                    if (EEL_PME(fr->eeltype))
                    {
                        pme_flags     |= GMX_PME_DO_COULOMB;
                    }
                    if (EVDW_PME(fr->vdwtype))
                    {
                        pme_flags |= GMX_PME_DO_LJ;
                    }
                    if (flags & GMX_FORCE_FORCES)
                    {
                        pme_flags |= GMX_PME_CALC_F;
                    }
                    if (flags & GMX_FORCE_VIRIAL)
                    {
                        pme_flags |= GMX_PME_CALC_ENER_VIR;
                    }
                    if (fr->n_tpi > 0)
                    {
                        /* We don't calculate f, but we do want the potential */
                        pme_flags |= GMX_PME_CALC_POT;
                    }
                    wallcycle_start(wcycle, ewcPMEMESH);
                    status = gmx_pme_do(fr->pmedata,
                                        0, md->homenr - fr->n_tpi,
                                        x, fr->f_novirsum,
                                        md->chargeA, md->chargeB,
                                        md->sqrt_c6A, md->sqrt_c6B,
                                        md->sigmaA, md->sigmaB,
                                        bSB ? boxs : box, cr,
                                        DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
                                        DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
                                        nrnb, wcycle,
                                        fr->vir_el_recip, fr->ewaldcoeff_q,
                                        fr->vir_lj_recip, fr->ewaldcoeff_lj,
                                        &Vlr_q, &Vlr_lj,
                                        lambda[efptCOUL], lambda[efptVDW],
                                        &dvdl_long_range_q, &dvdl_long_range_lj, pme_flags);
                    *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
                    if (status != 0)
                    {
                        gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
                    }
                    /* We should try to do as little computation after
                     * this as possible, because parallel PME synchronizes
                     * the nodes, so we want all load imbalance of the
                     * rest of the force calculation to be before the PME
                     * call.  DD load balancing is done on the whole time
                     * of the force call (without PME).
                     */
                }
                if (fr->n_tpi > 0)
                {
                    if (EVDW_PME(ir->vdwtype))
                    {

                        gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
                    }
                    /* Determine the PME grid energy of the test molecule
                     * with the PME grid potential of the other charges.
                     */
                    gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
                                        x + md->homenr - fr->n_tpi,
                                        md->chargeA + md->homenr - fr->n_tpi,
                                        &Vlr_q);
                }
            }
        }

        if (!EEL_PME(fr->eeltype) && EEL_PME_EWALD(fr->eeltype))
        {
            Vlr_q = do_ewald(ir, x, fr->f_novirsum,
                             md->chargeA, md->chargeB,
                             box_size, cr, md->homenr,
                             fr->vir_el_recip, fr->ewaldcoeff_q,
                             lambda[efptCOUL], &dvdl_long_range_q, fr->ewald_table);
        }

        /* Note that with separate PME nodes we get the real energies later */
        enerd->dvdl_lin[efptCOUL] += dvdl_long_range_q;
        enerd->dvdl_lin[efptVDW]  += dvdl_long_range_lj;
        enerd->term[F_COUL_RECIP]  = Vlr_q + Vcorr_q;
        enerd->term[F_LJ_RECIP]    = Vlr_lj + Vcorr_lj;
        if (debug)
        {
            fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
                    Vlr_q, Vcorr_q, enerd->term[F_COUL_RECIP]);
            pr_rvecs(debug, 0, "vir_el_recip after corr", fr->vir_el_recip, DIM);
            pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
            fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
                    Vlr_lj, Vcorr_lj, enerd->term[F_LJ_RECIP]);
            pr_rvecs(debug, 0, "vir_lj_recip after corr", fr->vir_lj_recip, DIM);
        }
    }
    else
    {
        /* Is there a reaction-field exclusion correction needed? */
        if (EEL_RF(fr->eeltype) && eelRF_NEC != fr->eeltype)
        {
            /* With the Verlet scheme, exclusion forces are calculated
             * in the non-bonded kernel.
             */
            if (ir->cutoff_scheme != ecutsVERLET)
            {
                real dvdl_rf_excl      = 0;
                enerd->term[F_RF_EXCL] =
                    RF_excl_correction(fr, graph, md, excl, x, f,
                                       fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);

                enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
            }
        }
    }
    where();
    debug_gmx();

    if (debug)
    {
        print_nrnb(debug, nrnb);
    }
    debug_gmx();

#ifdef GMX_MPI
    if (TAKETIME)
    {
        t2 = MPI_Wtime();
        MPI_Barrier(cr->mpi_comm_mygroup);
        t3          = MPI_Wtime();
        fr->t_wait += t3-t2;
        if (fr->timesteps == 11)
        {
            char buf[22];
            fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
                    cr->nodeid, gmx_step_str(fr->timesteps, buf),
                    100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
                    (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
        }
        fr->timesteps++;
    }
#endif

    if (debug)
    {
        pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
    }

}
コード例 #11
0
ファイル: perf_est.cpp プロジェクト: mpharrigan/gromacs
float pme_load_estimate(const gmx_mtop_t *mtop, const t_inputrec *ir,
                        matrix box)
{
    int            nq_tot, nlj_tot, f;
    gmx_bool       bChargePerturbed, bTypePerturbed;
    double         cost_bond, cost_pp, cost_redist, cost_spread, cost_fft, cost_solve, cost_pme;
    float          ratio;

    /* Computational cost of bonded, non-bonded and PME calculations.
     * This will be machine dependent.
     * The numbers here are accurate for Intel Core2 and AMD Athlon 64
     * in single precision. In double precision PME mesh is slightly cheaper,
     * although not so much that the numbers need to be adjusted.
     */

    cost_bond = C_BOND*n_bonded_dx(mtop, TRUE);

    if (ir->cutoff_scheme == ecutsGROUP)
    {
        pp_group_load(mtop, ir, box,
                      &nq_tot, &nlj_tot, &cost_pp,
                      &bChargePerturbed, &bTypePerturbed);
    }
    else
    {
        pp_verlet_load(mtop, ir, box,
                       &nq_tot, &nlj_tot, &cost_pp,
                       &bChargePerturbed, &bTypePerturbed);
    }

    cost_redist = 0;
    cost_spread = 0;
    cost_fft    = 0;
    cost_solve  = 0;

    if (EEL_PME(ir->coulombtype))
    {
        f            = ((ir->efep != efepNO && bChargePerturbed) ? 2 : 1);
        cost_redist +=   C_PME_REDIST*nq_tot;
        cost_spread += f*C_PME_SPREAD*nq_tot*std::pow(static_cast<real>(ir->pme_order), static_cast<real>(3.0));
        cost_fft    += f*C_PME_FFT*ir->nkx*ir->nky*ir->nkz*std::log(static_cast<real>(ir->nkx*ir->nky*ir->nkz));
        cost_solve  += f*C_PME_SOLVE*ir->nkx*ir->nky*ir->nkz;
    }

    if (EVDW_PME(ir->vdwtype))
    {
        f            = ((ir->efep != efepNO && bTypePerturbed) ? 2 : 1);
        if (ir->ljpme_combination_rule == eljpmeLB)
        {
            /* LB combination rule: we have 7 mesh terms */
            f       *= 7;
        }
        cost_redist +=   C_PME_REDIST*nlj_tot;
        cost_spread += f*C_PME_SPREAD*nlj_tot*std::pow(static_cast<real>(ir->pme_order), static_cast<real>(3.0));
        cost_fft    += f*C_PME_FFT*ir->nkx*ir->nky*ir->nkz*std::log(static_cast<real>(ir->nkx*ir->nky*ir->nkz));
        cost_solve  += f*C_PME_SOLVE*ir->nkx*ir->nky*ir->nkz;
    }

    cost_pme = cost_redist + cost_spread + cost_fft + cost_solve;

    ratio = cost_pme/(cost_bond + cost_pp + cost_pme);

    if (debug)
    {
        fprintf(debug,
                "cost_bond   %f\n"
                "cost_pp     %f\n"
                "cost_redist %f\n"
                "cost_spread %f\n"
                "cost_fft    %f\n"
                "cost_solve  %f\n",
                cost_bond, cost_pp, cost_redist, cost_spread, cost_fft, cost_solve);

        fprintf(debug, "Estimate for relative PME load: %.3f\n", ratio);
    }

    return ratio;
}
コード例 #12
0
ファイル: perf_est.cpp プロジェクト: mpharrigan/gromacs
static void pp_verlet_load(const gmx_mtop_t *mtop, const t_inputrec *ir,
                           matrix box,
                           int *nq_tot, int *nlj_tot,
                           double *cost_pp,
                           gmx_bool *bChargePerturbed, gmx_bool *bTypePerturbed)
{
    t_atom        *atom;
    int            mb, nmol, atnr, a, nqlj, nq, nlj;
    gmx_bool       bQRF;
    t_iparams     *iparams;
    gmx_moltype_t *molt;
    real           r_eff;
    double         c_qlj, c_q, c_lj;
    double         nat;
    /* Conversion factor for reference vs SIMD kernel performance.
     * The factor is about right for SSE2/4, but should be 2 higher for AVX256.
     */
#ifdef GMX_DOUBLE
    const real     nbnxn_refkernel_fac = 4.0;
#else
    const real     nbnxn_refkernel_fac = 8.0;
#endif

    bQRF = (EEL_RF(ir->coulombtype) || ir->coulombtype == eelCUT);

    iparams           = mtop->ffparams.iparams;
    atnr              = mtop->ffparams.atnr;
    nqlj              = 0;
    nq                = 0;
    *bChargePerturbed = FALSE;
    *bTypePerturbed   = FALSE;
    for (mb = 0; mb < mtop->nmolblock; mb++)
    {
        molt = &mtop->moltype[mtop->molblock[mb].type];
        atom = molt->atoms.atom;
        nmol = mtop->molblock[mb].nmol;
        for (a = 0; a < molt->atoms.nr; a++)
        {
            if (atom[a].q != 0 || atom[a].qB != 0)
            {
                if (iparams[(atnr+1)*atom[a].type].lj.c6  != 0 ||
                        iparams[(atnr+1)*atom[a].type].lj.c12 != 0)
                {
                    nqlj += nmol;
                }
                else
                {
                    nq += nmol;
                }
            }
            if (atom[a].q != atom[a].qB)
            {
                *bChargePerturbed = TRUE;
            }
            if (atom[a].type != atom[a].typeB)
            {
                *bTypePerturbed = TRUE;
            }
        }
    }

    nlj = mtop->natoms - nqlj - nq;

    *nq_tot  = nqlj + nq;
    *nlj_tot = nqlj + nlj;

    /* Effective cut-off for cluster pair list of 4x4 atoms */
    r_eff = ir->rlist + nbnxn_get_rlist_effective_inc(NBNXN_CPU_CLUSTER_I_SIZE, mtop->natoms/det(box));

    if (debug)
    {
        fprintf(debug, "nqlj %d nq %d nlj %d rlist %.3f r_eff %.3f\n",
                nqlj, nq, nlj, ir->rlist, r_eff);
    }

    /* Determine the cost per pair interaction */
    c_qlj = (bQRF ? C_VT_QRF_LJ : C_VT_QEXP_LJ);
    c_q   = (bQRF ? C_VT_QRF    : C_VT_QEXP);
    c_lj  = C_VT_LJ;
    if (ir->vdw_modifier == eintmodPOTSWITCH || EVDW_PME(ir->vdwtype))
    {
        c_qlj += C_VT_LJEXP_ADD;
        c_lj  += C_VT_LJEXP_ADD;
    }
    if (EVDW_PME(ir->vdwtype) && ir->ljpme_combination_rule == eljpmeLB)
    {
        /* We don't have LJ-PME LB comb. rule kernels, we use slow kernels */
        c_qlj *= nbnxn_refkernel_fac;
        c_q   *= nbnxn_refkernel_fac;
        c_lj  *= nbnxn_refkernel_fac;
    }

    /* For the PP non-bonded cost it is (unrealistically) assumed
     * that all atoms are distributed homogeneously in space.
     */
    /* Convert mtop->natoms to double to avoid int overflow */
    nat      = mtop->natoms;
    *cost_pp = 0.5*nat*(nqlj*c_qlj + nq*c_q + nlj*c_lj)
               *4/3*M_PI*r_eff*r_eff*r_eff/det(box);
}
コード例 #13
0
ファイル: runner.cpp プロジェクト: GaganDhanoa/plumed2
int mdrunner(gmx_hw_opt_t *hw_opt,
             FILE *fplog, t_commrec *cr, int nfile,
             const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
             gmx_bool bCompact, int nstglobalcomm,
             ivec ddxyz, int dd_node_order, real rdd, real rconstr,
             const char *dddlb_opt, real dlb_scale,
             const char *ddcsx, const char *ddcsy, const char *ddcsz,
             const char *nbpu_opt, int nstlist_cmdline,
             gmx_int64_t nsteps_cmdline, int nstepout, int resetstep,
             int gmx_unused nmultisim, int repl_ex_nst, int repl_ex_nex,
             int repl_ex_seed, real pforce, real cpt_period, real max_hours,
             int imdport, unsigned long Flags)
{
    gmx_bool                  bForceUseGPU, bTryUseGPU, bRerunMD;
    t_inputrec               *inputrec;
    t_state                  *state = NULL;
    matrix                    box;
    gmx_ddbox_t               ddbox = {0};
    int                       npme_major, npme_minor;
    t_nrnb                   *nrnb;
    gmx_mtop_t               *mtop          = NULL;
    t_mdatoms                *mdatoms       = NULL;
    t_forcerec               *fr            = NULL;
    t_fcdata                 *fcd           = NULL;
    real                      ewaldcoeff_q  = 0;
    real                      ewaldcoeff_lj = 0;
    struct gmx_pme_t        **pmedata       = NULL;
    gmx_vsite_t              *vsite         = NULL;
    gmx_constr_t              constr;
    int                       nChargePerturbed = -1, nTypePerturbed = 0, status;
    gmx_wallcycle_t           wcycle;
    gmx_bool                  bReadEkin;
    gmx_walltime_accounting_t walltime_accounting = NULL;
    int                       rc;
    gmx_int64_t               reset_counters;
    gmx_edsam_t               ed           = NULL;
    int                       nthreads_pme = 1;
    int                       nthreads_pp  = 1;
    gmx_membed_t              membed       = NULL;
    gmx_hw_info_t            *hwinfo       = NULL;
    /* The master rank decides early on bUseGPU and broadcasts this later */
    gmx_bool                  bUseGPU      = FALSE;

    /* CAUTION: threads may be started later on in this function, so
       cr doesn't reflect the final parallel state right now */
    snew(inputrec, 1);
    snew(mtop, 1);

    if (Flags & MD_APPENDFILES)
    {
        fplog = NULL;
    }

    bRerunMD     = (Flags & MD_RERUN);
    bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0);
    bTryUseGPU   = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU;

    /* Detect hardware, gather information. This is an operation that is
     * global for this process (MPI rank). */
    hwinfo = gmx_detect_hardware(fplog, cr, bTryUseGPU);

    gmx_print_detected_hardware(fplog, cr, hwinfo);

    if (fplog != NULL)
    {
        /* Print references after all software/hardware printing */
        please_cite(fplog, "Abraham2015");
        please_cite(fplog, "Pall2015");
        please_cite(fplog, "Pronk2013");
        please_cite(fplog, "Hess2008b");
        please_cite(fplog, "Spoel2005a");
        please_cite(fplog, "Lindahl2001a");
        please_cite(fplog, "Berendsen95a");
    }

    snew(state, 1);
    if (SIMMASTER(cr))
    {
        /* Read (nearly) all data required for the simulation */
        read_tpx_state(ftp2fn(efTPR, nfile, fnm), inputrec, state, NULL, mtop);

        if (inputrec->cutoff_scheme == ecutsVERLET)
        {
            /* Here the master rank decides if all ranks will use GPUs */
            bUseGPU = (hwinfo->gpu_info.n_dev_compatible > 0 ||
                       getenv("GMX_EMULATE_GPU") != NULL);

            /* TODO add GPU kernels for this and replace this check by:
             * (bUseGPU && (ir->vdwtype == evdwPME &&
             *               ir->ljpme_combination_rule == eljpmeLB))
             * update the message text and the content of nbnxn_acceleration_supported.
             */
            if (bUseGPU &&
                !nbnxn_gpu_acceleration_supported(fplog, cr, inputrec, bRerunMD))
            {
                /* Fallback message printed by nbnxn_acceleration_supported */
                if (bForceUseGPU)
                {
                    gmx_fatal(FARGS, "GPU acceleration requested, but not supported with the given input settings");
                }
                bUseGPU = FALSE;
            }

            prepare_verlet_scheme(fplog, cr,
                                  inputrec, nstlist_cmdline, mtop, state->box,
                                  bUseGPU);
        }
        else
        {
            if (nstlist_cmdline > 0)
            {
                gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme");
            }

            if (hwinfo->gpu_info.n_dev_compatible > 0)
            {
                md_print_warn(cr, fplog,
                              "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
                              "      To use a GPU, set the mdp option: cutoff-scheme = Verlet\n");
            }

            if (bForceUseGPU)
            {
                gmx_fatal(FARGS, "GPU requested, but can't be used without cutoff-scheme=Verlet");
            }

#ifdef GMX_TARGET_BGQ
            md_print_warn(cr, fplog,
                          "NOTE: There is no SIMD implementation of the group scheme kernels on\n"
                          "      BlueGene/Q. You will observe better performance from using the\n"
                          "      Verlet cut-off scheme.\n");
#endif
        }

        if (inputrec->eI == eiSD2)
        {
            md_print_warn(cr, fplog, "The stochastic dynamics integrator %s is deprecated, since\n"
                          "it is slower than integrator %s and is slightly less accurate\n"
                          "with constraints. Use the %s integrator.",
                          ei_names[inputrec->eI], ei_names[eiSD1], ei_names[eiSD1]);
        }
    }

    /* Check and update the hardware options for internal consistency */
    check_and_update_hw_opt_1(hw_opt, cr);

    /* Early check for externally set process affinity. */
    gmx_check_thread_affinity_set(fplog, cr,
                                  hw_opt, hwinfo->nthreads_hw_avail, FALSE);

#ifdef GMX_THREAD_MPI
    if (SIMMASTER(cr))
    {
        if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0)
        {
            gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
        }

        /* Since the master knows the cut-off scheme, update hw_opt for this.
         * This is done later for normal MPI and also once more with tMPI
         * for all tMPI ranks.
         */
        check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme);

        /* NOW the threads will be started: */
        hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo,
                                                 hw_opt,
                                                 inputrec, mtop,
                                                 cr, fplog, bUseGPU);

        if (hw_opt->nthreads_tmpi > 1)
        {
            t_commrec *cr_old       = cr;
            /* now start the threads. */
            cr = mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm,
                                        oenv, bVerbose, bCompact, nstglobalcomm,
                                        ddxyz, dd_node_order, rdd, rconstr,
                                        dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
                                        nbpu_opt, nstlist_cmdline,
                                        nsteps_cmdline, nstepout, resetstep, nmultisim,
                                        repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
                                        cpt_period, max_hours,
                                        Flags);
            /* the main thread continues here with a new cr. We don't deallocate
               the old cr because other threads may still be reading it. */
            if (cr == NULL)
            {
                gmx_comm("Failed to spawn threads");
            }
        }
    }
#endif
    /* END OF CAUTION: cr is now reliable */

    /* g_membed initialisation *
     * Because we change the mtop, init_membed is called before the init_parallel *
     * (in case we ever want to make it run in parallel) */
    if (opt2bSet("-membed", nfile, fnm))
    {
        if (MASTER(cr))
        {
            fprintf(stderr, "Initializing membed");
        }
        membed = init_membed(fplog, nfile, fnm, mtop, inputrec, state, cr, &cpt_period);
    }

    if (PAR(cr))
    {
        /* now broadcast everything to the non-master nodes/threads: */
        init_parallel(cr, inputrec, mtop);

        /* The master rank decided on the use of GPUs,
         * broadcast this information to all ranks.
         */
        gmx_bcast_sim(sizeof(bUseGPU), &bUseGPU, cr);
    }

    if (fplog != NULL)
    {
        pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
        fprintf(fplog, "\n");
    }

    /* now make sure the state is initialized and propagated */
    set_state_entries(state, inputrec);

    /* A parallel command line option consistency check that we can
       only do after any threads have started. */
    if (!PAR(cr) &&
        (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
    {
        gmx_fatal(FARGS,
                  "The -dd or -npme option request a parallel simulation, "
#ifndef GMX_MPI
                  "but %s was compiled without threads or MPI enabled"
#else
#ifdef GMX_THREAD_MPI
                  "but the number of threads (option -nt) is 1"
#else
                  "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
#endif
#endif
                  , output_env_get_program_display_name(oenv)
                  );
    }

    if (bRerunMD &&
        (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
    {
        gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
    }

    if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr))
    {
        gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
    }

    if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)))
    {
        if (cr->npmenodes > 0)
        {
            gmx_fatal_collective(FARGS, cr, NULL,
                                 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
        }

        cr->npmenodes = 0;
    }

    if (bUseGPU && cr->npmenodes < 0)
    {
        /* With GPUs we don't automatically use PME-only ranks. PME ranks can
         * improve performance with many threads per GPU, since our OpenMP
         * scaling is bad, but it's difficult to automate the setup.
         */
        cr->npmenodes = 0;
    }

#ifdef GMX_FAHCORE
    if (MASTER(cr))
    {
        fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
    }
#endif

    /* NMR restraints must be initialized before load_checkpoint,
     * since with time averaging the history is added to t_state.
     * For proper consistency check we therefore need to extend
     * t_state here.
     * So the PME-only nodes (if present) will also initialize
     * the distance restraints.
     */
    snew(fcd, 1);

    /* This needs to be called before read_checkpoint to extend the state */
    init_disres(fplog, mtop, inputrec, cr, fcd, state, repl_ex_nst > 0);

    init_orires(fplog, mtop, state->x, inputrec, cr, &(fcd->orires),
                state);

    if (DEFORM(*inputrec))
    {
        /* Store the deform reference box before reading the checkpoint */
        if (SIMMASTER(cr))
        {
            copy_mat(state->box, box);
        }
        if (PAR(cr))
        {
            gmx_bcast(sizeof(box), box, cr);
        }
        /* Because we do not have the update struct available yet
         * in which the reference values should be stored,
         * we store them temporarily in static variables.
         * This should be thread safe, since they are only written once
         * and with identical values.
         */
        tMPI_Thread_mutex_lock(&deform_init_box_mutex);
        deform_init_init_step_tpx = inputrec->init_step;
        copy_mat(box, deform_init_box_tpx);
        tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
    }

    if (opt2bSet("-cpi", nfile, fnm))
    {
        /* Check if checkpoint file exists before doing continuation.
         * This way we can use identical input options for the first and subsequent runs...
         */
        if (gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr) )
        {
            load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog,
                            cr, ddxyz,
                            inputrec, state, &bReadEkin,
                            (Flags & MD_APPENDFILES),
                            (Flags & MD_APPENDFILESSET));

            if (bReadEkin)
            {
                Flags |= MD_READ_EKIN;
            }
        }
    }

    if (MASTER(cr) && (Flags & MD_APPENDFILES))
    {
        gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr,
                     Flags, &fplog);
    }

    /* override nsteps with value from cmdline */
    override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr);

    if (SIMMASTER(cr))
    {
        copy_mat(state->box, box);
    }

    if (PAR(cr))
    {
        gmx_bcast(sizeof(box), box, cr);
    }

    /* Essential dynamics */
    if (opt2bSet("-ei", nfile, fnm))
    {
        /* Open input and output files, allocate space for ED data structure */
        ed = ed_open(mtop->natoms, &state->edsamstate, nfile, fnm, Flags, oenv, cr);
    }

    if (PAR(cr) && !(EI_TPI(inputrec->eI) ||
                     inputrec->eI == eiNM))
    {
        cr->dd = init_domain_decomposition(fplog, cr, Flags, ddxyz, rdd, rconstr,
                                           dddlb_opt, dlb_scale,
                                           ddcsx, ddcsy, ddcsz,
                                           mtop, inputrec,
                                           box, state->x,
                                           &ddbox, &npme_major, &npme_minor);

        make_dd_communicators(fplog, cr, dd_node_order);

        /* Set overallocation to avoid frequent reallocation of arrays */
        set_over_alloc_dd(TRUE);
    }
    else
    {
        /* PME, if used, is done on all nodes with 1D decomposition */
        cr->npmenodes = 0;
        cr->duty      = (DUTY_PP | DUTY_PME);
        npme_major    = 1;
        npme_minor    = 1;

        if (inputrec->ePBC == epbcSCREW)
        {
            gmx_fatal(FARGS,
                      "pbc=%s is only implemented with domain decomposition",
                      epbc_names[inputrec->ePBC]);
        }
    }

    if (PAR(cr))
    {
        /* After possible communicator splitting in make_dd_communicators.
         * we can set up the intra/inter node communication.
         */
        gmx_setup_nodecomm(fplog, cr);
    }

    /* Initialize per-physical-node MPI process/thread ID and counters. */
    gmx_init_intranode_counters(cr);
#ifdef GMX_MPI
    if (MULTISIM(cr))
    {
        md_print_info(cr, fplog,
                      "This is simulation %d out of %d running as a composite GROMACS\n"
                      "multi-simulation job. Setup for this simulation:\n\n",
                      cr->ms->sim, cr->ms->nsim);
    }
    md_print_info(cr, fplog, "Using %d MPI %s\n",
                  cr->nnodes,
#ifdef GMX_THREAD_MPI
                  cr->nnodes == 1 ? "thread" : "threads"
#else
                  cr->nnodes == 1 ? "process" : "processes"
#endif
                  );
    fflush(stderr);
#endif

    /* Check and update hw_opt for the cut-off scheme */
    check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme);

    /* Check and update hw_opt for the number of MPI ranks */
    check_and_update_hw_opt_3(hw_opt);

    gmx_omp_nthreads_init(fplog, cr,
                          hwinfo->nthreads_hw_avail,
                          hw_opt->nthreads_omp,
                          hw_opt->nthreads_omp_pme,
                          (cr->duty & DUTY_PP) == 0,
                          inputrec->cutoff_scheme == ecutsVERLET);

#ifndef NDEBUG
    if (integrator[inputrec->eI].func != do_tpi &&
        inputrec->cutoff_scheme == ecutsVERLET)
    {
        gmx_feenableexcept();
    }
#endif

    if (bUseGPU)
    {
        /* Select GPU id's to use */
        gmx_select_gpu_ids(fplog, cr, &hwinfo->gpu_info, bForceUseGPU,
                           &hw_opt->gpu_opt);
    }
    else
    {
        /* Ignore (potentially) manually selected GPUs */
        hw_opt->gpu_opt.n_dev_use = 0;
    }

    /* check consistency across ranks of things like SIMD
     * support and number of GPUs selected */
    gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt, bUseGPU);

    /* Now that we know the setup is consistent, check for efficiency */
    check_resource_division_efficiency(hwinfo, hw_opt, Flags & MD_NTOMPSET,
                                       cr, fplog);

    if (DOMAINDECOMP(cr))
    {
        /* When we share GPUs over ranks, we need to know this for the DLB */
        dd_setup_dlb_resource_sharing(cr, hwinfo, hw_opt);
    }

    /* getting number of PP/PME threads
       PME: env variable should be read only on one node to make sure it is
       identical everywhere;
     */
    /* TODO nthreads_pp is only used for pinning threads.
     * This is a temporary solution until we have a hw topology library.
     */
    nthreads_pp  = gmx_omp_nthreads_get(emntNonbonded);
    nthreads_pme = gmx_omp_nthreads_get(emntPME);

    wcycle = wallcycle_init(fplog, resetstep, cr, nthreads_pp, nthreads_pme);

    if (PAR(cr))
    {
        /* Master synchronizes its value of reset_counters with all nodes
         * including PME only nodes */
        reset_counters = wcycle_get_reset_counters(wcycle);
        gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
        wcycle_set_reset_counters(wcycle, reset_counters);
    }

    snew(nrnb, 1);
    if (cr->duty & DUTY_PP)
    {
        bcast_state(cr, state);

        /* Initiate forcerecord */
        fr          = mk_forcerec();
        fr->hwinfo  = hwinfo;
        fr->gpu_opt = &hw_opt->gpu_opt;
        init_forcerec(fplog, oenv, fr, fcd, inputrec, mtop, cr, box,
                      opt2fn("-table", nfile, fnm),
                      opt2fn("-tabletf", nfile, fnm),
                      opt2fn("-tablep", nfile, fnm),
                      opt2fn("-tableb", nfile, fnm),
                      nbpu_opt,
                      FALSE,
                      pforce);

        /* version for PCA_NOT_READ_NODE (see md.c) */
        /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
           "nofile","nofile","nofile","nofile",FALSE,pforce);
         */

        /* Initialize QM-MM */
        if (fr->bQMMM)
        {
            init_QMMMrec(cr, mtop, inputrec, fr);
        }

        /* Initialize the mdatoms structure.
         * mdatoms is not filled with atom data,
         * as this can not be done now with domain decomposition.
         */
        mdatoms = init_mdatoms(fplog, mtop, inputrec->efep != efepNO);

        /* Initialize the virtual site communication */
        vsite = init_vsite(mtop, cr, FALSE);

        calc_shifts(box, fr->shift_vec);

        /* With periodic molecules the charge groups should be whole at start up
         * and the virtual sites should not be far from their proper positions.
         */
        if (!inputrec->bContinuation && MASTER(cr) &&
            !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
        {
            /* Make molecules whole at start of run */
            if (fr->ePBC != epbcNONE)
            {
                do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, state->x);
            }
            if (vsite)
            {
                /* Correct initial vsite positions are required
                 * for the initial distribution in the domain decomposition
                 * and for the initial shell prediction.
                 */
                construct_vsites_mtop(vsite, mtop, state->x);
            }
        }

        if (EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype))
        {
            ewaldcoeff_q  = fr->ewaldcoeff_q;
            ewaldcoeff_lj = fr->ewaldcoeff_lj;
            pmedata       = &fr->pmedata;
        }
        else
        {
            pmedata = NULL;
        }
    }
    else
    {
        /* This is a PME only node */

        /* We don't need the state */
        done_state(state);

        ewaldcoeff_q  = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol);
        ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj);
        snew(pmedata, 1);
    }

    if (hw_opt->thread_affinity != threadaffOFF)
    {
        /* Before setting affinity, check whether the affinity has changed
         * - which indicates that probably the OpenMP library has changed it
         * since we first checked).
         */
        gmx_check_thread_affinity_set(fplog, cr,
                                      hw_opt, hwinfo->nthreads_hw_avail, TRUE);

        /* Set the CPU affinity */
        gmx_set_thread_affinity(fplog, cr, hw_opt, hwinfo);
    }

    /* Initiate PME if necessary,
     * either on all nodes or on dedicated PME nodes only. */
    if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
    {
        if (mdatoms)
        {
            nChargePerturbed = mdatoms->nChargePerturbed;
            if (EVDW_PME(inputrec->vdwtype))
            {
                nTypePerturbed   = mdatoms->nTypePerturbed;
            }
        }
        if (cr->npmenodes > 0)
        {
            /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
            gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
            gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
        }

        if (cr->duty & DUTY_PME)
        {
            status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec,
                                  mtop ? mtop->natoms : 0, nChargePerturbed, nTypePerturbed,
                                  (Flags & MD_REPRODUCIBLE), nthreads_pme);
            if (status != 0)
            {
                gmx_fatal(FARGS, "Error %d initializing PME", status);
            }
        }
    }


    if (integrator[inputrec->eI].func == do_md)
    {
        /* Turn on signal handling on all nodes */
        /*
         * (A user signal from the PME nodes (if any)
         * is communicated to the PP nodes.
         */
        signal_handler_install();
    }

    if (cr->duty & DUTY_PP)
    {
        /* Assumes uniform use of the number of OpenMP threads */
        walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));

        if (inputrec->bPull)
        {
            /* Initialize pull code */
            inputrec->pull_work =
                init_pull(fplog, inputrec->pull, inputrec, nfile, fnm,
                          mtop, cr, oenv, inputrec->fepvals->init_lambda,
                          EI_DYNAMICS(inputrec->eI) && MASTER(cr), Flags);
        }

        if (inputrec->bRot)
        {
            /* Initialize enforced rotation code */
            init_rot(fplog, inputrec, nfile, fnm, cr, state->x, box, mtop, oenv,
                     bVerbose, Flags);
        }

        if (inputrec->eSwapCoords != eswapNO)
        {
            /* Initialize ion swapping code */
            init_swapcoords(fplog, bVerbose, inputrec, opt2fn_master("-swap", nfile, fnm, cr),
                            mtop, state->x, state->box, &state->swapstate, cr, oenv, Flags);
        }

        constr = init_constraints(fplog, mtop, inputrec, ed, state, cr);

        if (DOMAINDECOMP(cr))
        {
            GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP");
            dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec,
                            Flags & MD_DDBONDCHECK, fr->cginfo_mb);

            set_dd_parameters(fplog, cr->dd, dlb_scale, inputrec, &ddbox);

            setup_dd_grid(fplog, cr->dd);
        }

        /* Now do whatever the user wants us to do (how flexible...) */
        integrator[inputrec->eI].func(fplog, cr, nfile, fnm,
                                      oenv, bVerbose, bCompact,
                                      nstglobalcomm,
                                      vsite, constr,
                                      nstepout, inputrec, mtop,
                                      fcd, state,
                                      mdatoms, nrnb, wcycle, ed, fr,
                                      repl_ex_nst, repl_ex_nex, repl_ex_seed,
                                      membed,
                                      cpt_period, max_hours,
                                      imdport,
                                      Flags,
                                      walltime_accounting);

        if (inputrec->bPull)
        {
            finish_pull(inputrec->pull_work);
        }

        if (inputrec->bRot)
        {
            finish_rot(inputrec->rot);
        }

    }
    else
    {
        GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
        /* do PME only */
        walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
        gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff_q, ewaldcoeff_lj, inputrec);
    }

    wallcycle_stop(wcycle, ewcRUN);

    /* Finish up, write some stuff
     * if rerunMD, don't write last frame again
     */
    finish_run(fplog, cr,
               inputrec, nrnb, wcycle, walltime_accounting,
               fr ? fr->nbv : NULL,
               EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));


    /* Free GPU memory and context */
    free_gpu_resources(fr, cr, &hwinfo->gpu_info, fr ? fr->gpu_opt : NULL);

    if (opt2bSet("-membed", nfile, fnm))
    {
        sfree(membed);
    }

    gmx_hardware_info_free(hwinfo);

    /* Does what it says */
    print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
    walltime_accounting_destroy(walltime_accounting);

    /* PLUMED */
    if(plumedswitch){
      plumed_finalize(plumedmain);
    }
    /* END PLUMED */

    /* Close logfile already here if we were appending to it */
    if (MASTER(cr) && (Flags & MD_APPENDFILES))
    {
        gmx_log_close(fplog);
    }

    rc = (int)gmx_get_stop_condition();

    done_ed(&ed);

#ifdef GMX_THREAD_MPI
    /* we need to join all threads. The sub-threads join when they
       exit this function, but the master thread needs to be told to
       wait for that. */
    if (PAR(cr) && MASTER(cr))
    {
        tMPI_Finalize();
    }
#endif

    return rc;
}