/* Read in the tpr file and save information we need later in info */ static void read_tpr_file(const char *fn_sim_tpr, t_inputinfo *info, t_state *state, gmx_mtop_t *mtop, t_inputrec *ir, real user_beta, real fracself) { read_tpx_state(fn_sim_tpr,ir,state,NULL,mtop); /* The values of the original tpr input file are save in the first * place [0] of the arrays */ info->orig_sim_steps = ir->nsteps; info->pme_order[0] = ir->pme_order; info->rcoulomb[0] = ir->rcoulomb; info->rvdw[0] = ir->rvdw; info->nkx[0] = ir->nkx; info->nky[0] = ir->nky; info->nkz[0] = ir->nkz; info->ewald_rtol[0] = ir->ewald_rtol; info->fracself = fracself; if (user_beta > 0) info->ewald_beta[0] = user_beta; else info->ewald_beta[0] = calc_ewaldcoeff(info->rcoulomb[0],info->ewald_rtol[0]); /* Check if PME was chosen */ if (EEL_PME(ir->coulombtype) == FALSE) gmx_fatal(FARGS, "Can only do optimizations for simulations with PME"); /* Check if rcoulomb == rlist, which is necessary for PME */ if (!(ir->rcoulomb == ir->rlist)) gmx_fatal(FARGS, "PME requires rcoulomb (%f) to be equal to rlist (%f).", ir->rcoulomb, ir->rlist); }
void calc_verlet_buffer_size(const gmx_mtop_t *mtop, real boxvol, const t_inputrec *ir, real drift_target, const verletbuf_list_setup_t *list_setup, int *n_nonlin_vsite, real *rlist) { double resolution; char *env; real particle_distance; real nb_clust_frac_pairs_not_in_list_at_cutoff; verletbuf_atomtype_t *att = NULL; int natt = -1, i; double reppow; real md_ljd, md_ljr, md_el, dd_el; real elfac; real kT_fac, mass_min; int ib0, ib1, ib; real rb, rl; real drift; /* Resolution of the buffer size */ resolution = 0.001; env = getenv("GMX_VERLET_BUFFER_RES"); if (env != NULL) { sscanf(env, "%lf", &resolution); } /* In an atom wise pair-list there would be no pairs in the list * beyond the pair-list cut-off. * However, we use a pair-list of groups vs groups of atoms. * For groups of 4 atoms, the parallelism of SSE instructions, only * 10% of the atoms pairs are not in the list just beyond the cut-off. * As this percentage increases slowly compared to the decrease of the * Gaussian displacement distribution over this range, we can simply * reduce the drift by this fraction. * For larger groups, e.g. of 8 atoms, this fraction will be lower, * so then buffer size will be on the conservative (large) side. * * Note that the formulas used here do not take into account * cancellation of errors which could occur by missing both * attractive and repulsive interactions. * * The only major assumption is homogeneous particle distribution. * For an inhomogeneous system, such as a liquid-vapor system, * the buffer will be underestimated. The actual energy drift * will be higher by the factor: local/homogeneous particle density. * * The results of this estimate have been checked againt simulations. * In most cases the real drift differs by less than a factor 2. */ /* Worst case assumption: HCP packing of particles gives largest distance */ particle_distance = pow(boxvol*sqrt(2)/mtop->natoms, 1.0/3.0); get_verlet_buffer_atomtypes(mtop, &att, &natt, n_nonlin_vsite); assert(att != NULL && natt >= 0); if (debug) { fprintf(debug, "particle distance assuming HCP packing: %f nm\n", particle_distance); fprintf(debug, "energy drift atom types: %d\n", natt); } reppow = mtop->ffparams.reppow; md_ljd = 0; md_ljr = 0; if (ir->vdwtype == evdwCUT) { /* -dV/dr of -r^-6 and r^-repporw */ md_ljd = -6*pow(ir->rvdw, -7.0); md_ljr = reppow*pow(ir->rvdw, -(reppow+1)); /* The contribution of the second derivative is negligible */ } else { gmx_fatal(FARGS, "Energy drift calculation is only implemented for plain cut-off Lennard-Jones interactions"); } elfac = ONE_4PI_EPS0/ir->epsilon_r; /* Determine md=-dV/dr and dd=d^2V/dr^2 */ md_el = 0; dd_el = 0; if (ir->coulombtype == eelCUT || EEL_RF(ir->coulombtype)) { real eps_rf, k_rf; if (ir->coulombtype == eelCUT) { eps_rf = 1; k_rf = 0; } else { eps_rf = ir->epsilon_rf/ir->epsilon_r; if (eps_rf != 0) { k_rf = pow(ir->rcoulomb, -3.0)*(eps_rf - ir->epsilon_r)/(2*eps_rf + ir->epsilon_r); } else { /* epsilon_rf = infinity */ k_rf = 0.5*pow(ir->rcoulomb, -3.0); } } if (eps_rf > 0) { md_el = elfac*(pow(ir->rcoulomb, -2.0) - 2*k_rf*ir->rcoulomb); } dd_el = elfac*(2*pow(ir->rcoulomb, -3.0) + 2*k_rf); } else if (EEL_PME(ir->coulombtype) || ir->coulombtype == eelEWALD) { real b, rc, br; b = calc_ewaldcoeff(ir->rcoulomb, ir->ewald_rtol); rc = ir->rcoulomb; br = b*rc; md_el = elfac*(b*exp(-br*br)*M_2_SQRTPI/rc + gmx_erfc(br)/(rc*rc)); dd_el = elfac/(rc*rc)*(2*b*(1 + br*br)*exp(-br*br)*M_2_SQRTPI + 2*gmx_erfc(br)/rc); } else { gmx_fatal(FARGS, "Energy drift calculation is only implemented for Reaction-Field and Ewald electrostatics"); } /* Determine the variance of the atomic displacement * over nstlist-1 steps: kT_fac * For inertial dynamics (not Brownian dynamics) the mass factor * is not included in kT_fac, it is added later. */ if (ir->eI == eiBD) { /* Get the displacement distribution from the random component only. * With accurate integration the systematic (force) displacement * should be negligible (unless nstlist is extremely large, which * you wouldn't do anyhow). */ kT_fac = 2*BOLTZ*ir->opts.ref_t[0]*(ir->nstlist-1)*ir->delta_t; if (ir->bd_fric > 0) { /* This is directly sigma^2 of the displacement */ kT_fac /= ir->bd_fric; /* Set the masses to 1 as kT_fac is the full sigma^2, * but we divide by m in ener_drift(). */ for (i = 0; i < natt; i++) { att[i].mass = 1; } } else { real tau_t; /* Per group tau_t is not implemented yet, use the maximum */ tau_t = ir->opts.tau_t[0]; for (i = 1; i < ir->opts.ngtc; i++) { tau_t = max(tau_t, ir->opts.tau_t[i]); } kT_fac *= tau_t; /* This kT_fac needs to be divided by the mass to get sigma^2 */ } } else { kT_fac = BOLTZ*ir->opts.ref_t[0]*sqr((ir->nstlist-1)*ir->delta_t); } mass_min = att[0].mass; for (i = 1; i < natt; i++) { mass_min = min(mass_min, att[i].mass); } if (debug) { fprintf(debug, "md_ljd %e md_ljr %e\n", md_ljd, md_ljr); fprintf(debug, "md_el %e dd_el %e\n", md_el, dd_el); fprintf(debug, "sqrt(kT_fac) %f\n", sqrt(kT_fac)); fprintf(debug, "mass_min %f\n", mass_min); } /* Search using bisection */ ib0 = -1; /* The drift will be neglible at 5 times the max sigma */ ib1 = (int)(5*2*sqrt(kT_fac/mass_min)/resolution) + 1; while (ib1 - ib0 > 1) { ib = (ib0 + ib1)/2; rb = ib*resolution; rl = max(ir->rvdw, ir->rcoulomb) + rb; /* Calculate the average energy drift at the last step * of the nstlist steps at which the pair-list is used. */ drift = ener_drift(att, natt, &mtop->ffparams, kT_fac, md_ljd, md_ljr, md_el, dd_el, rb, rl, boxvol); /* Correct for the fact that we are using a Ni x Nj particle pair list * and not a 1 x 1 particle pair list. This reduces the drift. */ /* We don't have a formula for 8 (yet), use 4 which is conservative */ nb_clust_frac_pairs_not_in_list_at_cutoff = surface_frac(min(list_setup->cluster_size_i, 4), particle_distance, rl)* surface_frac(min(list_setup->cluster_size_j, 4), particle_distance, rl); drift *= nb_clust_frac_pairs_not_in_list_at_cutoff; /* Convert the drift to drift per unit time per atom */ drift /= ir->nstlist*ir->delta_t*mtop->natoms; if (debug) { fprintf(debug, "ib %3d %3d %3d rb %.3f %dx%d fac %.3f drift %f\n", ib0, ib, ib1, rb, list_setup->cluster_size_i, list_setup->cluster_size_j, nb_clust_frac_pairs_not_in_list_at_cutoff, drift); } if (fabs(drift) > drift_target) { ib0 = ib; } else { ib1 = ib; } } sfree(att); *rlist = max(ir->rvdw, ir->rcoulomb) + ib1*resolution; }
int main(int argc,char *argv[]) { static char *desc[] = { "The [TT]pmetest[tt] program tests the scaling of the PME code. When only given", "a [TT].tpr[tt] file it will compute PME for one frame. When given a trajectory", "it will do so for all the frames in the trajectory. Before the PME", "routine is called the coordinates are sorted along the X-axis.[PAR]", "As an extra service to the public the program can also compute", "long-range Coulomb energies for components of the system. When the", "[TT]-groups[tt] flag is given to the program the energy groups", "from the [TT].tpr[tt] file will be read, and half an energy matrix computed." }; t_commrec *cr,*mcr; static t_filenm fnm[] = { { efTPX, NULL, NULL, ffREAD }, { efTRN, "-o", NULL, ffWRITE }, { efLOG, "-g", "pme", ffWRITE }, { efTRX, "-f", NULL, ffOPTRD }, { efXVG, "-x", "ener-pme", ffWRITE } }; #define NFILE asize(fnm) /* Command line options ! */ static gmx_bool bVerbose=FALSE; static gmx_bool bOptFFT=FALSE; static gmx_bool bSort=FALSE; static int ewald_geometry=eewg3D; static int nnodes=1; static int pme_order=0; static rvec grid = { -1, -1, -1 }; static real rc = 0.0; static real dtol = 0.0; static gmx_bool bGroups = FALSE; static t_pargs pa[] = { { "-np", FALSE, etINT, {&nnodes}, "Number of nodes, must be the same as used for [TT]grompp[tt]" }, { "-v", FALSE, etBOOL,{&bVerbose}, "Be loud and noisy" }, { "-sort", FALSE, etBOOL,{&bSort}, "Sort coordinates. Crucial for domain decomposition." }, { "-grid", FALSE, etRVEC,{&grid}, "Number of grid cells in X, Y, Z dimension (if -1 use from [TT].tpr[tt])" }, { "-order", FALSE, etINT, {&pme_order}, "Order of the PME spreading algorithm" }, { "-groups", FALSE, etBOOL, {&bGroups}, "Compute half an energy matrix based on the energy groups in your [TT].tpr[tt] file" }, { "-rc", FALSE, etREAL, {&rc}, "Rcoulomb for Ewald summation" }, { "-tol", FALSE, etREAL, {&dtol}, "Tolerance for Ewald summation" } }; FILE *fp; t_inputrec *ir; t_topology top; t_tpxheader tpx; t_nrnb nrnb; t_nsborder *nsb; t_forcerec *fr; t_mdatoms *mdatoms; char title[STRLEN]; int natoms,step,status,i,ncg,root; real t,lambda,ewaldcoeff,qtot; rvec *x,*f,*xbuf; int *index; gmx_bool bCont; real *charge,*qbuf,*qqbuf; matrix box; /* Start the actual parallel code if necessary */ cr = init_par(&argc,&argv); root = 0; if (MASTER(cr)) CopyRight(stderr,argv[0]); /* Parse command line on all processors, arguments are passed on in * init_par (see above) */ parse_common_args(&argc,argv, PCA_KEEP_ARGS | PCA_NOEXIT_ON_ARGS | PCA_BE_NICE | PCA_CAN_SET_DEFFNM | (MASTER(cr) ? 0 : PCA_QUIET), NFILE,fnm,asize(pa),pa,asize(desc),desc,0,NULL); #ifndef GMX_MPI if (nnodes > 1) gmx_fatal(FARGS,"GROMACS compiled without MPI support - can't do parallel runs"); #endif /* Open log files on all processors */ open_log(ftp2fn(efLOG,NFILE,fnm),cr); snew(ir,1); if (MASTER(cr)) { /* Read tpr file etc. */ read_tpxheader(ftp2fn(efTPX,NFILE,fnm),&tpx,FALSE,NULL,NULL); snew(x,tpx.natoms); read_tpx(ftp2fn(efTPX,NFILE,fnm),&step,&t,&lambda,ir, box,&natoms,x,NULL,NULL,&top); /* Charges */ qtot = 0; snew(charge,natoms); for(i=0; (i<natoms); i++) { charge[i] = top.atoms.atom[i].q; qtot += charge[i]; } /* Grid stuff */ if (opt2parg_bSet("-grid",asize(pa),pa)) { ir->nkx = grid[XX]; ir->nky = grid[YY]; ir->nkz = grid[ZZ]; } /* Check command line parameters for consistency */ if ((ir->nkx <= 0) || (ir->nky <= 0) || (ir->nkz <= 0)) gmx_fatal(FARGS,"PME grid = %d %d %d",ir->nkx,ir->nky,ir->nkz); if (opt2parg_bSet("-rc",asize(pa),pa)) ir->rcoulomb = rc; if (ir->rcoulomb <= 0) gmx_fatal(FARGS,"rcoulomb should be > 0 (not %f)",ir->rcoulomb); if (opt2parg_bSet("-order",asize(pa),pa)) ir->pme_order = pme_order; if (ir->pme_order <= 0) gmx_fatal(FARGS,"pme_order should be > 0 (not %d)",ir->pme_order); if (opt2parg_bSet("-tol",asize(pa),pa)) ir->ewald_rtol = dtol; if (ir->ewald_rtol <= 0) gmx_fatal(FARGS,"ewald_tol should be > 0 (not %f)",ir->ewald_rtol); } else { init_top(&top); } /* Add parallellization code here */ snew(nsb,1); if (MASTER(cr)) { ncg = top.blocks[ebCGS].multinr[0]; for(i=0; (i<cr->nnodes-1); i++) top.blocks[ebCGS].multinr[i] = min(ncg,(ncg*(i+1))/cr->nnodes); for( ; (i<MAXNODES); i++) top.blocks[ebCGS].multinr[i] = ncg; } if (PAR(cr)) { /* Set some variables to zero to avoid core dumps */ ir->opts.ngtc = ir->opts.ngacc = ir->opts.ngfrz = ir->opts.ngener = 0; #ifdef GMX_MPI /* Distribute the data over processors */ MPI_Bcast(&natoms,1,MPI_INT,root,MPI_COMM_WORLD); MPI_Bcast(ir,sizeof(*ir),MPI_BYTE,root,MPI_COMM_WORLD); MPI_Bcast(&qtot,1,GMX_MPI_REAL,root,MPI_COMM_WORLD); #endif /* Call some dedicated communication routines, master sends n-1 times */ if (MASTER(cr)) { for(i=1; (i<cr->nnodes); i++) { mv_block(i,&(top.blocks[ebCGS])); mv_block(i,&(top.atoms.excl)); } } else { ld_block(root,&(top.blocks[ebCGS])); ld_block(root,&(top.atoms.excl)); } if (!MASTER(cr)) { snew(charge,natoms); snew(x,natoms); } #ifdef GMX_MPI MPI_Bcast(charge,natoms,GMX_MPI_REAL,root,MPI_COMM_WORLD); #endif } ewaldcoeff = calc_ewaldcoeff(ir->rcoulomb,ir->ewald_rtol); if (bVerbose) pr_inputrec(stdlog,0,"Inputrec",ir); /* Allocate memory for temp arrays etc. */ snew(xbuf,natoms); snew(f,natoms); snew(qbuf,natoms); snew(qqbuf,natoms); snew(index,natoms); /* Initialize the PME code */ init_pme(stdlog,cr,ir->nkx,ir->nky,ir->nkz,ir->pme_order, natoms,FALSE,bOptFFT,ewald_geometry); /* MFlops accounting */ init_nrnb(&nrnb); /* Initialize the work division */ calc_nsb(stdlog,&(top.blocks[ebCGS]),cr->nnodes,nsb,0); nsb->nodeid = cr->nodeid; print_nsb(stdlog,"pmetest",nsb); /* Initiate forcerec */ mdatoms = atoms2md(stdlog,&top.atoms,ir->opts.nFreeze,ir->eI, ir->delta_t,0,ir->opts.tau_t,FALSE,FALSE); snew(fr,1); init_forcerec(stdlog,fr,ir,&top,cr,mdatoms,nsb,box,FALSE,NULL,NULL,FALSE); /* First do PME based on coordinates in tpr file, send them to * other processors if needed. */ if (MASTER(cr)) fprintf(stdlog,"-----\n" "Results based on tpr file %s\n",ftp2fn(efTPX,NFILE,fnm)); #ifdef GMX_MPI if (PAR(cr)) { MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD); MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD); MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD); } #endif do_my_pme(stdlog,0,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort, cr,nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,NULL, bGroups ? ir->opts.ngener : 1,mdatoms->cENER); /* If we have a trajectry file, we will read the frames in it and compute * the PME energy. */ if (ftp2bSet(efTRX,NFILE,fnm)) { fprintf(stdlog,"-----\n" "Results based on trx file %s\n",ftp2fn(efTRX,NFILE,fnm)); if (MASTER(cr)) { sfree(x); natoms = read_first_x(&status,ftp2fn(efTRX,NFILE,fnm),&t,&x,box); if (natoms != top.atoms.nr) gmx_fatal(FARGS,"natoms in trx = %d, in tpr = %d",natoms,top.atoms.nr); fp = xvgropen(ftp2fn(efXVG,NFILE,fnm),"PME Energy","Time (ps)","E (kJ/mol)"); } else fp = NULL; do { /* Send coordinates, box and time to the other nodes */ #ifdef GMX_MPI if (PAR(cr)) { MPI_Bcast(x[0],natoms*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD); MPI_Bcast(box[0],DIM*DIM,GMX_MPI_REAL,root,MPI_COMM_WORLD); MPI_Bcast(&t,1,GMX_MPI_REAL,root,MPI_COMM_WORLD); } #endif rm_pbc(&top.idef,nsb->natoms,box,x,x); /* Call the PME wrapper function */ do_my_pme(stdlog,t,bVerbose,ir,x,xbuf,f,charge,qbuf,qqbuf,box,bSort,cr, nsb,&nrnb,&(top.atoms.excl),qtot,fr,index,fp, bGroups ? ir->opts.ngener : 1,mdatoms->cENER); /* Only the master processor reads more data */ if (MASTER(cr)) bCont = read_next_x(status,&t,natoms,x,box); /* Check whether we need to continue */ #ifdef GMX_MPI if (PAR(cr)) MPI_Bcast(&bCont,1,MPI_INT,root,MPI_COMM_WORLD); #endif } while (bCont); /* Finish I/O, close files */ if (MASTER(cr)) { close_trx(status); ffclose(fp); } } if (bVerbose) { /* Do some final I/O about performance, might be useful in debugging */ fprintf(stdlog,"-----\n"); print_nrnb(stdlog,&nrnb); } /* Finish the parallel stuff */ if (gmx_parallel_env_initialized()) gmx_finalize(cr); /* Thank the audience, as usual */ if (MASTER(cr)) thanx(stderr); return 0; }
void init_forcerec(FILE *fp, t_forcerec *fr, t_inputrec *ir, t_topology *top, t_commrec *cr, t_mdatoms *mdatoms, t_nsborder *nsb, matrix box, bool bMolEpot, char *tabfn, bool bNoSolvOpt) { int i,j,m,natoms,ngrp,tabelemsize; real q,zsq,nrdf,T; rvec box_size; double rtab; t_block *mols,*cgs; t_idef *idef; if (check_box(box)) fatal_error(0,check_box(box)); cgs = &(top->blocks[ebCGS]); mols = &(top->blocks[ebMOLS]); idef = &(top->idef); natoms = mdatoms->nr; /* Shell stuff */ fr->fc_stepsize = ir->fc_stepsize; /* Free energy */ fr->efep = ir->efep; fr->sc_alpha = ir->sc_alpha; fr->sc_sigma6 = pow(ir->sc_sigma,6); /* Neighbour searching stuff */ fr->bGrid = (ir->ns_type == ensGRID); fr->ndelta = ir->ndelta; fr->ePBC = ir->ePBC; fr->rlist = ir->rlist; fr->rlistlong = max(ir->rlist,max(ir->rcoulomb,ir->rvdw)); fr->eeltype = ir->coulombtype; fr->vdwtype = ir->vdwtype; fr->bTwinRange = fr->rlistlong > fr->rlist; fr->bEwald = fr->eeltype==eelPME || fr->eeltype==eelEWALD; fr->bvdwtab = fr->vdwtype != evdwCUT; fr->bRF = (fr->eeltype==eelRF || fr->eeltype==eelGRF) && fr->vdwtype==evdwCUT; fr->bcoultab = (fr->eeltype!=eelCUT && !fr->bRF) || fr->bEwald; #ifndef SPEC_CPU if (getenv("GMX_FORCE_TABLES")) { fr->bvdwtab = TRUE; fr->bcoultab = TRUE; } #endif if (fp) { fprintf(fp,"Table routines are used for coulomb: %s\n",bool_names[fr->bcoultab]); fprintf(fp,"Table routines are used for vdw: %s\n",bool_names[fr->bvdwtab ]); } /* Tables are used for direct ewald sum */ if(fr->bEwald) { fr->ewaldcoeff=calc_ewaldcoeff(ir->rcoulomb, ir->ewald_rtol); if (fp) fprintf(fp,"Using a Gaussian width (1/beta) of %g nm for Ewald\n", 1/fr->ewaldcoeff); } /* Domain decomposition parallellism... */ fr->bDomDecomp = ir->bDomDecomp; fr->Dimension = ir->decomp_dir; /* Electrostatics */ fr->epsilon_r = ir->epsilon_r; fr->fudgeQQ = ir->fudgeQQ; fr->rcoulomb_switch = ir->rcoulomb_switch; fr->rcoulomb = ir->rcoulomb; #ifndef SPEC_CPU if (bNoSolvOpt || getenv("GMX_NO_SOLV_OPT")) fr->bSolvOpt = FALSE; else #endif fr->bSolvOpt = TRUE; /* Parameters for generalized RF */ fr->zsquare = 0.0; fr->temp = 0.0; if (fr->eeltype == eelGRF) { zsq = 0.0; for (i=0; (i<cgs->nr); i++) { q = 0; for(j=cgs->index[i]; (j<cgs->index[i+1]); j++) q+=mdatoms->chargeT[cgs->a[j]]; if (fabs(q) > GMX_REAL_MIN) /* Changed from square to fabs 990314 DvdS * Does not make a difference for monovalent ions, but doe for * divalent ions (Ca2+!!) */ zsq += fabs(q); } fr->zsquare = zsq; T = 0.0; nrdf = 0.0; for(i=0; (i<ir->opts.ngtc); i++) { nrdf += ir->opts.nrdf[i]; T += (ir->opts.nrdf[i] * ir->opts.ref_t[i]); } if (nrdf < GMX_REAL_MIN) fatal_error(0,"No degrees of freedom!"); fr->temp = T/nrdf; } else if (EEL_LR(fr->eeltype) || (fr->eeltype == eelSHIFT) || (fr->eeltype == eelUSER) || (fr->eeltype == eelSWITCH)) { /* We must use the long range cut-off for neighboursearching... * An extra range of e.g. 0.1 nm (half the size of a charge group) * is necessary for neighboursearching. This allows diffusion * into the cut-off range (between neighborlist updates), * and gives more accurate forces because all atoms within the short-range * cut-off rc must be taken into account, while the ns criterium takes * only those with the center of geometry within the cut-off. * (therefore we have to add half the size of a charge group, plus * something to account for diffusion if we have nstlist > 1) */ for(m=0; (m<DIM); m++) box_size[m]=box[m][m]; if (fr->phi == NULL) snew(fr->phi,mdatoms->nr); if ((fr->eeltype==eelPPPM) || (fr->eeltype==eelPOISSON) || (fr->eeltype == eelSHIFT && fr->rcoulomb > fr->rcoulomb_switch)) set_shift_consts(fp,fr->rcoulomb_switch,fr->rcoulomb,box_size,fr); } /* Initiate arrays */ if (fr->bTwinRange) { snew(fr->f_twin,natoms); snew(fr->fshift_twin,SHIFTS); } if (EEL_LR(fr->eeltype)) { snew(fr->f_pme,natoms); } /* Mask that says whether or not this NBF list should be computed */ /* if (fr->bMask == NULL) { ngrp = ir->opts.ngener*ir->opts.ngener; snew(fr->bMask,ngrp);*/ /* Defaults to always */ /* for(i=0; (i<ngrp); i++) fr->bMask[i] = TRUE; }*/ if (fr->cg_cm == NULL) snew(fr->cg_cm,cgs->nr); if (fr->shift_vec == NULL) snew(fr->shift_vec,SHIFTS); if (fr->fshift == NULL) snew(fr->fshift,SHIFTS); if (bMolEpot && (fr->nmol==0)) { fr->nmol=mols->nr; fr->mol_nr=make_invblock(mols,natoms); snew(fr->mol_epot,fr->nmol); fr->nstcalc=ir->nstenergy; } if (fr->nbfp == NULL) { fr->ntype = idef->atnr; fr->bBHAM = (idef->functype[0] == F_BHAM); fr->nbfp = mk_nbfp(idef,fr->bBHAM); } /* Copy the energy group exclusions */ fr->eg_excl = ir->opts.eg_excl; /* Van der Waals stuff */ fr->rvdw = ir->rvdw; fr->rvdw_switch = ir->rvdw_switch; if ((fr->vdwtype != evdwCUT) && (fr->vdwtype != evdwUSER) && !fr->bBHAM) { if (fr->rvdw_switch >= fr->rvdw) fatal_error(0,"rvdw_switch (%g) must be < rvdw (%g)", fr->rvdw_switch,fr->rvdw); if (fp) fprintf(fp,"Using %s Lennard-Jones, switch between %g and %g nm\n", (fr->eeltype==eelSWITCH) ? "switched":"shifted", fr->rvdw_switch,fr->rvdw); } if (fp) fprintf(fp,"Cut-off's: NS: %g Coulomb: %g %s: %g\n", fr->rlist,fr->rcoulomb,fr->bBHAM ? "BHAM":"LJ",fr->rvdw); if (ir->eDispCorr != edispcNO) set_avcsix(fp,fr,mdatoms); if (fr->bBHAM) set_bham_b_max(fp,fr,mdatoms); /* Now update the rest of the vars */ update_forcerec(fp,fr,box); /* if we are using LR electrostatics, and they are tabulated, * the tables will contain shifted coulomb interactions. * Since we want to use the non-shifted ones for 1-4 * coulombic interactions, we must have an extra set of * tables. This should be done in tables.c, instead of this * ugly hack, but it works for now... */ #define MAX_14_DIST 1.0 /* Shell to account for the maximum chargegroup radius (2*0.2 nm) * * and diffusion during nstlist steps (0.2 nm) */ #define TAB_EXT 0.6 /* Construct tables. * A little unnecessary to make both vdw and coul tables sometimes, * but what the heck... */ if (fr->bcoultab || fr->bvdwtab) { if (EEL_LR(fr->eeltype)) { bool bcoulsave,bvdwsave; /* generate extra tables for 1-4 interactions only * fake the forcerec so make_tables thinks it should * just create the non shifted version */ bcoulsave=fr->bcoultab; bvdwsave=fr->bvdwtab; fr->bcoultab=FALSE; fr->bvdwtab=FALSE; fr->rtab=MAX_14_DIST; make_tables(fp,fr,MASTER(cr),tabfn); fr->bcoultab=bcoulsave; fr->bvdwtab=bvdwsave; fr->coulvdw14tab=fr->coulvdwtab; fr->coulvdwtab=NULL; } fr->rtab = max(fr->rlistlong+TAB_EXT,MAX_14_DIST); } else if (fr->efep != efepNO) { if (fr->rlistlong < GMX_REAL_MIN) { char *ptr,*envvar="FEP_TABLE_LENGTH"; fr->rtab = 5; #ifdef SPEC_CPU ptr = NULL; #else ptr = getenv(envvar); #endif if (ptr) { sscanf(ptr,"%lf",&rtab); fr->rtab = rtab; } if (fp) fprintf(fp,"\nNote: Setting the free energy table length to %g nm\n" " You can set this value with the environment variable %s" "\n\n",fr->rtab,envvar); } else fr->rtab = max(fr->rlistlong+TAB_EXT,MAX_14_DIST); } else fr->rtab = MAX_14_DIST; /* make tables for ordinary interactions */ make_tables(fp,fr,MASTER(cr),tabfn); if(!(EEL_LR(fr->eeltype) && (fr->bcoultab || fr->bvdwtab))) fr->coulvdw14tab=fr->coulvdwtab; /* Copy the contents of the table to separate coulomb and LJ * tables too, to improve cache performance. */ tabelemsize=fr->bBHAM ? 16 : 12; snew(fr->coultab,4*(fr->ntab+1)*sizeof(real)); snew(fr->vdwtab,(tabelemsize-4)*(fr->ntab+1)*sizeof(real)); for(i=0; i<=fr->ntab; i++) { for(j=0; j<4; j++) fr->coultab[4*i+j]=fr->coulvdwtab[tabelemsize*i+j]; for(j=0; j<tabelemsize-4; j++) fr->vdwtab[(tabelemsize-4)*i+j]=fr->coulvdwtab[tabelemsize*i+4+j]; } if (!fr->mno_index) check_solvent(fp,top,fr,mdatoms,nsb); }