/* Read in the tpr file and save information we need later in info */ static void read_tpr_file(const char *fn_sim_tpr, t_inputinfo *info, t_state *state, gmx_mtop_t *mtop, t_inputrec *ir, real user_beta, real fracself) { read_tpx_state(fn_sim_tpr, ir, state, NULL, mtop); /* The values of the original tpr input file are save in the first * place [0] of the arrays */ info->orig_sim_steps = ir->nsteps; info->pme_order[0] = ir->pme_order; info->rcoulomb[0] = ir->rcoulomb; info->rvdw[0] = ir->rvdw; info->nkx[0] = ir->nkx; info->nky[0] = ir->nky; info->nkz[0] = ir->nkz; info->ewald_rtol[0] = ir->ewald_rtol; info->fracself = fracself; if (user_beta > 0) { info->ewald_beta[0] = user_beta; } else { info->ewald_beta[0] = calc_ewaldcoeff_q(info->rcoulomb[0], info->ewald_rtol[0]); } /* Check if PME was chosen */ if (EEL_PME(ir->coulombtype) == FALSE) { gmx_fatal(FARGS, "Can only do optimizations for simulations with PME"); } /* Check if rcoulomb == rlist, which is necessary for PME */ if (!(ir->rcoulomb == ir->rlist)) { gmx_fatal(FARGS, "PME requires rcoulomb (%f) to be equal to rlist (%f).", ir->rcoulomb, ir->rlist); } }
void calc_verlet_buffer_size(const gmx_mtop_t *mtop, real boxvol, const t_inputrec *ir, real reference_temperature, const verletbuf_list_setup_t *list_setup, int *n_nonlin_vsite, real *rlist) { double resolution; char *env; real particle_distance; real nb_clust_frac_pairs_not_in_list_at_cutoff; verletbuf_atomtype_t *att = NULL; int natt = -1, i; double reppow; real md1_ljd, d2_ljd, md3_ljd; real md1_ljr, d2_ljr, md3_ljr; real md1_el, d2_el; real elfac; real kT_fac, mass_min; int ib0, ib1, ib; real rb, rl; real drift; if (reference_temperature < 0) { if (EI_MD(ir->eI) && ir->etc == etcNO) { /* This case should be handled outside calc_verlet_buffer_size */ gmx_incons("calc_verlet_buffer_size called with an NVE ensemble and reference_temperature < 0"); } /* We use the maximum temperature with multiple T-coupl groups. * We could use a per particle temperature, but since particles * interact, this might underestimate the buffer size. */ reference_temperature = 0; for (i = 0; i < ir->opts.ngtc; i++) { if (ir->opts.tau_t[i] >= 0) { reference_temperature = max(reference_temperature, ir->opts.ref_t[i]); } } } /* Resolution of the buffer size */ resolution = 0.001; env = getenv("GMX_VERLET_BUFFER_RES"); if (env != NULL) { sscanf(env, "%lf", &resolution); } /* In an atom wise pair-list there would be no pairs in the list * beyond the pair-list cut-off. * However, we use a pair-list of groups vs groups of atoms. * For groups of 4 atoms, the parallelism of SSE instructions, only * 10% of the atoms pairs are not in the list just beyond the cut-off. * As this percentage increases slowly compared to the decrease of the * Gaussian displacement distribution over this range, we can simply * reduce the drift by this fraction. * For larger groups, e.g. of 8 atoms, this fraction will be lower, * so then buffer size will be on the conservative (large) side. * * Note that the formulas used here do not take into account * cancellation of errors which could occur by missing both * attractive and repulsive interactions. * * The only major assumption is homogeneous particle distribution. * For an inhomogeneous system, such as a liquid-vapor system, * the buffer will be underestimated. The actual energy drift * will be higher by the factor: local/homogeneous particle density. * * The results of this estimate have been checked againt simulations. * In most cases the real drift differs by less than a factor 2. */ /* Worst case assumption: HCP packing of particles gives largest distance */ particle_distance = pow(boxvol*sqrt(2)/mtop->natoms, 1.0/3.0); get_verlet_buffer_atomtypes(mtop, &att, &natt, n_nonlin_vsite); assert(att != NULL && natt >= 0); if (debug) { fprintf(debug, "particle distance assuming HCP packing: %f nm\n", particle_distance); fprintf(debug, "energy drift atom types: %d\n", natt); } reppow = mtop->ffparams.reppow; md1_ljd = 0; d2_ljd = 0; md3_ljd = 0; md1_ljr = 0; d2_ljr = 0; md3_ljr = 0; if (ir->vdwtype == evdwCUT) { real sw_range, md3_pswf; switch (ir->vdw_modifier) { case eintmodNONE: case eintmodPOTSHIFT: /* -dV/dr of -r^-6 and r^-reppow */ md1_ljd = -6*pow(ir->rvdw, -7.0); md1_ljr = reppow*pow(ir->rvdw, -(reppow+1)); /* The contribution of the higher derivatives is negligible */ break; case eintmodFORCESWITCH: /* At the cut-off: V=V'=V''=0, so we use only V''' */ md3_ljd = -md3_force_switch(6.0, ir->rvdw_switch, ir->rvdw); md3_ljr = md3_force_switch(reppow, ir->rvdw_switch, ir->rvdw); break; case eintmodPOTSWITCH: /* At the cut-off: V=V'=V''=0. * V''' is given by the original potential times * the third derivative of the switch function. */ sw_range = ir->rvdw - ir->rvdw_switch; md3_pswf = 60.0*pow(sw_range, -3.0); md3_ljd = -pow(ir->rvdw, -6.0 )*md3_pswf; md3_ljr = pow(ir->rvdw, -reppow)*md3_pswf; break; default: gmx_incons("Unimplemented VdW modifier"); } } else if (EVDW_PME(ir->vdwtype)) { real b, r, br, br2, br4, br6; b = calc_ewaldcoeff_lj(ir->rvdw, ir->ewald_rtol_lj); r = ir->rvdw; br = b*r; br2 = br*br; br4 = br2*br2; br6 = br4*br2; /* -dV/dr of g(br)*r^-6 [where g(x) = exp(-x^2)(1+x^2+x^4/2), see LJ-PME equations in manual] and r^-reppow */ md1_ljd = -exp(-br2)*(br6 + 3.0*br4 + 6.0*br2 + 6.0)*pow(r, -7.0); md1_ljr = reppow*pow(r, -(reppow+1)); /* The contribution of the higher derivatives is negligible */ } else { gmx_fatal(FARGS, "Energy drift calculation is only implemented for plain cut-off Lennard-Jones interactions"); } elfac = ONE_4PI_EPS0/ir->epsilon_r; /* Determine md=-dV/dr and dd=d^2V/dr^2 */ md1_el = 0; d2_el = 0; if (ir->coulombtype == eelCUT || EEL_RF(ir->coulombtype)) { real eps_rf, k_rf; if (ir->coulombtype == eelCUT) { eps_rf = 1; k_rf = 0; } else { eps_rf = ir->epsilon_rf/ir->epsilon_r; if (eps_rf != 0) { k_rf = pow(ir->rcoulomb, -3.0)*(eps_rf - ir->epsilon_r)/(2*eps_rf + ir->epsilon_r); } else { /* epsilon_rf = infinity */ k_rf = 0.5*pow(ir->rcoulomb, -3.0); } } if (eps_rf > 0) { md1_el = elfac*(pow(ir->rcoulomb, -2.0) - 2*k_rf*ir->rcoulomb); } d2_el = elfac*(2*pow(ir->rcoulomb, -3.0) + 2*k_rf); } else if (EEL_PME(ir->coulombtype) || ir->coulombtype == eelEWALD) { real b, rc, br; b = calc_ewaldcoeff_q(ir->rcoulomb, ir->ewald_rtol); rc = ir->rcoulomb; br = b*rc; md1_el = elfac*(b*exp(-br*br)*M_2_SQRTPI/rc + gmx_erfc(br)/(rc*rc)); d2_el = elfac/(rc*rc)*(2*b*(1 + br*br)*exp(-br*br)*M_2_SQRTPI + 2*gmx_erfc(br)/rc); } else { gmx_fatal(FARGS, "Energy drift calculation is only implemented for Reaction-Field and Ewald electrostatics"); } /* Determine the variance of the atomic displacement * over nstlist-1 steps: kT_fac * For inertial dynamics (not Brownian dynamics) the mass factor * is not included in kT_fac, it is added later. */ if (ir->eI == eiBD) { /* Get the displacement distribution from the random component only. * With accurate integration the systematic (force) displacement * should be negligible (unless nstlist is extremely large, which * you wouldn't do anyhow). */ kT_fac = 2*BOLTZ*reference_temperature*(ir->nstlist-1)*ir->delta_t; if (ir->bd_fric > 0) { /* This is directly sigma^2 of the displacement */ kT_fac /= ir->bd_fric; /* Set the masses to 1 as kT_fac is the full sigma^2, * but we divide by m in ener_drift(). */ for (i = 0; i < natt; i++) { att[i].prop.mass = 1; } } else { real tau_t; /* Per group tau_t is not implemented yet, use the maximum */ tau_t = ir->opts.tau_t[0]; for (i = 1; i < ir->opts.ngtc; i++) { tau_t = max(tau_t, ir->opts.tau_t[i]); } kT_fac *= tau_t; /* This kT_fac needs to be divided by the mass to get sigma^2 */ } } else { kT_fac = BOLTZ*reference_temperature*sqr((ir->nstlist-1)*ir->delta_t); } mass_min = att[0].prop.mass; for (i = 1; i < natt; i++) { mass_min = min(mass_min, att[i].prop.mass); } if (debug) { fprintf(debug, "md1_ljd %9.2e d2_ljd %9.2e md3_ljd %9.2e\n", md1_ljd, d2_ljd, md3_ljd); fprintf(debug, "md1_ljr %9.2e d2_ljr %9.2e md3_ljr %9.2e\n", md1_ljr, d2_ljr, md3_ljr); fprintf(debug, "md1_el %9.2e d2_el %9.2e\n", md1_el, d2_el); fprintf(debug, "sqrt(kT_fac) %f\n", sqrt(kT_fac)); fprintf(debug, "mass_min %f\n", mass_min); } /* Search using bisection */ ib0 = -1; /* The drift will be neglible at 5 times the max sigma */ ib1 = (int)(5*2*sqrt(kT_fac/mass_min)/resolution) + 1; while (ib1 - ib0 > 1) { ib = (ib0 + ib1)/2; rb = ib*resolution; rl = max(ir->rvdw, ir->rcoulomb) + rb; /* Calculate the average energy drift at the last step * of the nstlist steps at which the pair-list is used. */ drift = ener_drift(att, natt, &mtop->ffparams, kT_fac, md1_ljd, d2_ljd, md3_ljd, md1_ljr, d2_ljr, md3_ljr, md1_el, d2_el, rb, rl, boxvol); /* Correct for the fact that we are using a Ni x Nj particle pair list * and not a 1 x 1 particle pair list. This reduces the drift. */ /* We don't have a formula for 8 (yet), use 4 which is conservative */ nb_clust_frac_pairs_not_in_list_at_cutoff = surface_frac(min(list_setup->cluster_size_i, 4), particle_distance, rl)* surface_frac(min(list_setup->cluster_size_j, 4), particle_distance, rl); drift *= nb_clust_frac_pairs_not_in_list_at_cutoff; /* Convert the drift to drift per unit time per atom */ drift /= ir->nstlist*ir->delta_t*mtop->natoms; if (debug) { fprintf(debug, "ib %3d %3d %3d rb %.3f %dx%d fac %.3f drift %.1e\n", ib0, ib, ib1, rb, list_setup->cluster_size_i, list_setup->cluster_size_j, nb_clust_frac_pairs_not_in_list_at_cutoff, drift); } if (fabs(drift) > ir->verletbuf_tol) { ib0 = ib; } else { ib1 = ib; } } sfree(att); *rlist = max(ir->rvdw, ir->rcoulomb) + ib1*resolution; }
int mdrunner(gmx_hw_opt_t *hw_opt, FILE *fplog, t_commrec *cr, int nfile, const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose, gmx_bool bCompact, int nstglobalcomm, ivec ddxyz, int dd_node_order, real rdd, real rconstr, const char *dddlb_opt, real dlb_scale, const char *ddcsx, const char *ddcsy, const char *ddcsz, const char *nbpu_opt, int nstlist_cmdline, gmx_int64_t nsteps_cmdline, int nstepout, int resetstep, int gmx_unused nmultisim, int repl_ex_nst, int repl_ex_nex, int repl_ex_seed, real pforce, real cpt_period, real max_hours, int imdport, unsigned long Flags) { gmx_bool bForceUseGPU, bTryUseGPU, bRerunMD; t_inputrec *inputrec; t_state *state = NULL; matrix box; gmx_ddbox_t ddbox = {0}; int npme_major, npme_minor; t_nrnb *nrnb; gmx_mtop_t *mtop = NULL; t_mdatoms *mdatoms = NULL; t_forcerec *fr = NULL; t_fcdata *fcd = NULL; real ewaldcoeff_q = 0; real ewaldcoeff_lj = 0; struct gmx_pme_t **pmedata = NULL; gmx_vsite_t *vsite = NULL; gmx_constr_t constr; int nChargePerturbed = -1, nTypePerturbed = 0, status; gmx_wallcycle_t wcycle; gmx_bool bReadEkin; gmx_walltime_accounting_t walltime_accounting = NULL; int rc; gmx_int64_t reset_counters; gmx_edsam_t ed = NULL; int nthreads_pme = 1; int nthreads_pp = 1; gmx_membed_t membed = NULL; gmx_hw_info_t *hwinfo = NULL; /* The master rank decides early on bUseGPU and broadcasts this later */ gmx_bool bUseGPU = FALSE; /* CAUTION: threads may be started later on in this function, so cr doesn't reflect the final parallel state right now */ snew(inputrec, 1); snew(mtop, 1); if (Flags & MD_APPENDFILES) { fplog = NULL; } bRerunMD = (Flags & MD_RERUN); bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0); bTryUseGPU = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU; /* Detect hardware, gather information. This is an operation that is * global for this process (MPI rank). */ hwinfo = gmx_detect_hardware(fplog, cr, bTryUseGPU); gmx_print_detected_hardware(fplog, cr, hwinfo); if (fplog != NULL) { /* Print references after all software/hardware printing */ please_cite(fplog, "Abraham2015"); please_cite(fplog, "Pall2015"); please_cite(fplog, "Pronk2013"); please_cite(fplog, "Hess2008b"); please_cite(fplog, "Spoel2005a"); please_cite(fplog, "Lindahl2001a"); please_cite(fplog, "Berendsen95a"); } snew(state, 1); if (SIMMASTER(cr)) { /* Read (nearly) all data required for the simulation */ read_tpx_state(ftp2fn(efTPR, nfile, fnm), inputrec, state, NULL, mtop); if (inputrec->cutoff_scheme == ecutsVERLET) { /* Here the master rank decides if all ranks will use GPUs */ bUseGPU = (hwinfo->gpu_info.n_dev_compatible > 0 || getenv("GMX_EMULATE_GPU") != NULL); /* TODO add GPU kernels for this and replace this check by: * (bUseGPU && (ir->vdwtype == evdwPME && * ir->ljpme_combination_rule == eljpmeLB)) * update the message text and the content of nbnxn_acceleration_supported. */ if (bUseGPU && !nbnxn_gpu_acceleration_supported(fplog, cr, inputrec, bRerunMD)) { /* Fallback message printed by nbnxn_acceleration_supported */ if (bForceUseGPU) { gmx_fatal(FARGS, "GPU acceleration requested, but not supported with the given input settings"); } bUseGPU = FALSE; } prepare_verlet_scheme(fplog, cr, inputrec, nstlist_cmdline, mtop, state->box, bUseGPU); } else { if (nstlist_cmdline > 0) { gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme"); } if (hwinfo->gpu_info.n_dev_compatible > 0) { md_print_warn(cr, fplog, "NOTE: GPU(s) found, but the current simulation can not use GPUs\n" " To use a GPU, set the mdp option: cutoff-scheme = Verlet\n"); } if (bForceUseGPU) { gmx_fatal(FARGS, "GPU requested, but can't be used without cutoff-scheme=Verlet"); } #ifdef GMX_TARGET_BGQ md_print_warn(cr, fplog, "NOTE: There is no SIMD implementation of the group scheme kernels on\n" " BlueGene/Q. You will observe better performance from using the\n" " Verlet cut-off scheme.\n"); #endif } if (inputrec->eI == eiSD2) { md_print_warn(cr, fplog, "The stochastic dynamics integrator %s is deprecated, since\n" "it is slower than integrator %s and is slightly less accurate\n" "with constraints. Use the %s integrator.", ei_names[inputrec->eI], ei_names[eiSD1], ei_names[eiSD1]); } } /* Check and update the hardware options for internal consistency */ check_and_update_hw_opt_1(hw_opt, cr); /* Early check for externally set process affinity. */ gmx_check_thread_affinity_set(fplog, cr, hw_opt, hwinfo->nthreads_hw_avail, FALSE); #ifdef GMX_THREAD_MPI if (SIMMASTER(cr)) { if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0) { gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks"); } /* Since the master knows the cut-off scheme, update hw_opt for this. * This is done later for normal MPI and also once more with tMPI * for all tMPI ranks. */ check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme); /* NOW the threads will be started: */ hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo, hw_opt, inputrec, mtop, cr, fplog, bUseGPU); if (hw_opt->nthreads_tmpi > 1) { t_commrec *cr_old = cr; /* now start the threads. */ cr = mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm, oenv, bVerbose, bCompact, nstglobalcomm, ddxyz, dd_node_order, rdd, rconstr, dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz, nbpu_opt, nstlist_cmdline, nsteps_cmdline, nstepout, resetstep, nmultisim, repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce, cpt_period, max_hours, Flags); /* the main thread continues here with a new cr. We don't deallocate the old cr because other threads may still be reading it. */ if (cr == NULL) { gmx_comm("Failed to spawn threads"); } } } #endif /* END OF CAUTION: cr is now reliable */ /* g_membed initialisation * * Because we change the mtop, init_membed is called before the init_parallel * * (in case we ever want to make it run in parallel) */ if (opt2bSet("-membed", nfile, fnm)) { if (MASTER(cr)) { fprintf(stderr, "Initializing membed"); } membed = init_membed(fplog, nfile, fnm, mtop, inputrec, state, cr, &cpt_period); } if (PAR(cr)) { /* now broadcast everything to the non-master nodes/threads: */ init_parallel(cr, inputrec, mtop); /* The master rank decided on the use of GPUs, * broadcast this information to all ranks. */ gmx_bcast_sim(sizeof(bUseGPU), &bUseGPU, cr); } if (fplog != NULL) { pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE); fprintf(fplog, "\n"); } /* now make sure the state is initialized and propagated */ set_state_entries(state, inputrec); /* A parallel command line option consistency check that we can only do after any threads have started. */ if (!PAR(cr) && (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0)) { gmx_fatal(FARGS, "The -dd or -npme option request a parallel simulation, " #ifndef GMX_MPI "but %s was compiled without threads or MPI enabled" #else #ifdef GMX_THREAD_MPI "but the number of threads (option -nt) is 1" #else "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec" #endif #endif , output_env_get_program_display_name(oenv) ); } if (bRerunMD && (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI)) { gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun"); } if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr)) { gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank"); } if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))) { if (cr->npmenodes > 0) { gmx_fatal_collective(FARGS, cr, NULL, "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ"); } cr->npmenodes = 0; } if (bUseGPU && cr->npmenodes < 0) { /* With GPUs we don't automatically use PME-only ranks. PME ranks can * improve performance with many threads per GPU, since our OpenMP * scaling is bad, but it's difficult to automate the setup. */ cr->npmenodes = 0; } #ifdef GMX_FAHCORE if (MASTER(cr)) { fcRegisterSteps(inputrec->nsteps, inputrec->init_step); } #endif /* NMR restraints must be initialized before load_checkpoint, * since with time averaging the history is added to t_state. * For proper consistency check we therefore need to extend * t_state here. * So the PME-only nodes (if present) will also initialize * the distance restraints. */ snew(fcd, 1); /* This needs to be called before read_checkpoint to extend the state */ init_disres(fplog, mtop, inputrec, cr, fcd, state, repl_ex_nst > 0); init_orires(fplog, mtop, state->x, inputrec, cr, &(fcd->orires), state); if (DEFORM(*inputrec)) { /* Store the deform reference box before reading the checkpoint */ if (SIMMASTER(cr)) { copy_mat(state->box, box); } if (PAR(cr)) { gmx_bcast(sizeof(box), box, cr); } /* Because we do not have the update struct available yet * in which the reference values should be stored, * we store them temporarily in static variables. * This should be thread safe, since they are only written once * and with identical values. */ tMPI_Thread_mutex_lock(&deform_init_box_mutex); deform_init_init_step_tpx = inputrec->init_step; copy_mat(box, deform_init_box_tpx); tMPI_Thread_mutex_unlock(&deform_init_box_mutex); } if (opt2bSet("-cpi", nfile, fnm)) { /* Check if checkpoint file exists before doing continuation. * This way we can use identical input options for the first and subsequent runs... */ if (gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr) ) { load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog, cr, ddxyz, inputrec, state, &bReadEkin, (Flags & MD_APPENDFILES), (Flags & MD_APPENDFILESSET)); if (bReadEkin) { Flags |= MD_READ_EKIN; } } } if (MASTER(cr) && (Flags & MD_APPENDFILES)) { gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr, Flags, &fplog); } /* override nsteps with value from cmdline */ override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr); if (SIMMASTER(cr)) { copy_mat(state->box, box); } if (PAR(cr)) { gmx_bcast(sizeof(box), box, cr); } /* Essential dynamics */ if (opt2bSet("-ei", nfile, fnm)) { /* Open input and output files, allocate space for ED data structure */ ed = ed_open(mtop->natoms, &state->edsamstate, nfile, fnm, Flags, oenv, cr); } if (PAR(cr) && !(EI_TPI(inputrec->eI) || inputrec->eI == eiNM)) { cr->dd = init_domain_decomposition(fplog, cr, Flags, ddxyz, rdd, rconstr, dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz, mtop, inputrec, box, state->x, &ddbox, &npme_major, &npme_minor); make_dd_communicators(fplog, cr, dd_node_order); /* Set overallocation to avoid frequent reallocation of arrays */ set_over_alloc_dd(TRUE); } else { /* PME, if used, is done on all nodes with 1D decomposition */ cr->npmenodes = 0; cr->duty = (DUTY_PP | DUTY_PME); npme_major = 1; npme_minor = 1; if (inputrec->ePBC == epbcSCREW) { gmx_fatal(FARGS, "pbc=%s is only implemented with domain decomposition", epbc_names[inputrec->ePBC]); } } if (PAR(cr)) { /* After possible communicator splitting in make_dd_communicators. * we can set up the intra/inter node communication. */ gmx_setup_nodecomm(fplog, cr); } /* Initialize per-physical-node MPI process/thread ID and counters. */ gmx_init_intranode_counters(cr); #ifdef GMX_MPI if (MULTISIM(cr)) { md_print_info(cr, fplog, "This is simulation %d out of %d running as a composite GROMACS\n" "multi-simulation job. Setup for this simulation:\n\n", cr->ms->sim, cr->ms->nsim); } md_print_info(cr, fplog, "Using %d MPI %s\n", cr->nnodes, #ifdef GMX_THREAD_MPI cr->nnodes == 1 ? "thread" : "threads" #else cr->nnodes == 1 ? "process" : "processes" #endif ); fflush(stderr); #endif /* Check and update hw_opt for the cut-off scheme */ check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme); /* Check and update hw_opt for the number of MPI ranks */ check_and_update_hw_opt_3(hw_opt); gmx_omp_nthreads_init(fplog, cr, hwinfo->nthreads_hw_avail, hw_opt->nthreads_omp, hw_opt->nthreads_omp_pme, (cr->duty & DUTY_PP) == 0, inputrec->cutoff_scheme == ecutsVERLET); #ifndef NDEBUG if (integrator[inputrec->eI].func != do_tpi && inputrec->cutoff_scheme == ecutsVERLET) { gmx_feenableexcept(); } #endif if (bUseGPU) { /* Select GPU id's to use */ gmx_select_gpu_ids(fplog, cr, &hwinfo->gpu_info, bForceUseGPU, &hw_opt->gpu_opt); } else { /* Ignore (potentially) manually selected GPUs */ hw_opt->gpu_opt.n_dev_use = 0; } /* check consistency across ranks of things like SIMD * support and number of GPUs selected */ gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt, bUseGPU); /* Now that we know the setup is consistent, check for efficiency */ check_resource_division_efficiency(hwinfo, hw_opt, Flags & MD_NTOMPSET, cr, fplog); if (DOMAINDECOMP(cr)) { /* When we share GPUs over ranks, we need to know this for the DLB */ dd_setup_dlb_resource_sharing(cr, hwinfo, hw_opt); } /* getting number of PP/PME threads PME: env variable should be read only on one node to make sure it is identical everywhere; */ /* TODO nthreads_pp is only used for pinning threads. * This is a temporary solution until we have a hw topology library. */ nthreads_pp = gmx_omp_nthreads_get(emntNonbonded); nthreads_pme = gmx_omp_nthreads_get(emntPME); wcycle = wallcycle_init(fplog, resetstep, cr, nthreads_pp, nthreads_pme); if (PAR(cr)) { /* Master synchronizes its value of reset_counters with all nodes * including PME only nodes */ reset_counters = wcycle_get_reset_counters(wcycle); gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr); wcycle_set_reset_counters(wcycle, reset_counters); } snew(nrnb, 1); if (cr->duty & DUTY_PP) { bcast_state(cr, state); /* Initiate forcerecord */ fr = mk_forcerec(); fr->hwinfo = hwinfo; fr->gpu_opt = &hw_opt->gpu_opt; init_forcerec(fplog, oenv, fr, fcd, inputrec, mtop, cr, box, opt2fn("-table", nfile, fnm), opt2fn("-tabletf", nfile, fnm), opt2fn("-tablep", nfile, fnm), opt2fn("-tableb", nfile, fnm), nbpu_opt, FALSE, pforce); /* version for PCA_NOT_READ_NODE (see md.c) */ /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE, "nofile","nofile","nofile","nofile",FALSE,pforce); */ /* Initialize QM-MM */ if (fr->bQMMM) { init_QMMMrec(cr, mtop, inputrec, fr); } /* Initialize the mdatoms structure. * mdatoms is not filled with atom data, * as this can not be done now with domain decomposition. */ mdatoms = init_mdatoms(fplog, mtop, inputrec->efep != efepNO); /* Initialize the virtual site communication */ vsite = init_vsite(mtop, cr, FALSE); calc_shifts(box, fr->shift_vec); /* With periodic molecules the charge groups should be whole at start up * and the virtual sites should not be far from their proper positions. */ if (!inputrec->bContinuation && MASTER(cr) && !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols)) { /* Make molecules whole at start of run */ if (fr->ePBC != epbcNONE) { do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, state->x); } if (vsite) { /* Correct initial vsite positions are required * for the initial distribution in the domain decomposition * and for the initial shell prediction. */ construct_vsites_mtop(vsite, mtop, state->x); } } if (EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)) { ewaldcoeff_q = fr->ewaldcoeff_q; ewaldcoeff_lj = fr->ewaldcoeff_lj; pmedata = &fr->pmedata; } else { pmedata = NULL; } } else { /* This is a PME only node */ /* We don't need the state */ done_state(state); ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol); ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj); snew(pmedata, 1); } if (hw_opt->thread_affinity != threadaffOFF) { /* Before setting affinity, check whether the affinity has changed * - which indicates that probably the OpenMP library has changed it * since we first checked). */ gmx_check_thread_affinity_set(fplog, cr, hw_opt, hwinfo->nthreads_hw_avail, TRUE); /* Set the CPU affinity */ gmx_set_thread_affinity(fplog, cr, hw_opt, hwinfo); } /* Initiate PME if necessary, * either on all nodes or on dedicated PME nodes only. */ if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)) { if (mdatoms) { nChargePerturbed = mdatoms->nChargePerturbed; if (EVDW_PME(inputrec->vdwtype)) { nTypePerturbed = mdatoms->nTypePerturbed; } } if (cr->npmenodes > 0) { /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/ gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr); gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr); } if (cr->duty & DUTY_PME) { status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec, mtop ? mtop->natoms : 0, nChargePerturbed, nTypePerturbed, (Flags & MD_REPRODUCIBLE), nthreads_pme); if (status != 0) { gmx_fatal(FARGS, "Error %d initializing PME", status); } } } if (integrator[inputrec->eI].func == do_md) { /* Turn on signal handling on all nodes */ /* * (A user signal from the PME nodes (if any) * is communicated to the PP nodes. */ signal_handler_install(); } if (cr->duty & DUTY_PP) { /* Assumes uniform use of the number of OpenMP threads */ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault)); if (inputrec->bPull) { /* Initialize pull code */ inputrec->pull_work = init_pull(fplog, inputrec->pull, inputrec, nfile, fnm, mtop, cr, oenv, inputrec->fepvals->init_lambda, EI_DYNAMICS(inputrec->eI) && MASTER(cr), Flags); } if (inputrec->bRot) { /* Initialize enforced rotation code */ init_rot(fplog, inputrec, nfile, fnm, cr, state->x, box, mtop, oenv, bVerbose, Flags); } if (inputrec->eSwapCoords != eswapNO) { /* Initialize ion swapping code */ init_swapcoords(fplog, bVerbose, inputrec, opt2fn_master("-swap", nfile, fnm, cr), mtop, state->x, state->box, &state->swapstate, cr, oenv, Flags); } constr = init_constraints(fplog, mtop, inputrec, ed, state, cr); if (DOMAINDECOMP(cr)) { GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP"); dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec, Flags & MD_DDBONDCHECK, fr->cginfo_mb); set_dd_parameters(fplog, cr->dd, dlb_scale, inputrec, &ddbox); setup_dd_grid(fplog, cr->dd); } /* Now do whatever the user wants us to do (how flexible...) */ integrator[inputrec->eI].func(fplog, cr, nfile, fnm, oenv, bVerbose, bCompact, nstglobalcomm, vsite, constr, nstepout, inputrec, mtop, fcd, state, mdatoms, nrnb, wcycle, ed, fr, repl_ex_nst, repl_ex_nex, repl_ex_seed, membed, cpt_period, max_hours, imdport, Flags, walltime_accounting); if (inputrec->bPull) { finish_pull(inputrec->pull_work); } if (inputrec->bRot) { finish_rot(inputrec->rot); } } else { GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP"); /* do PME only */ walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME)); gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff_q, ewaldcoeff_lj, inputrec); } wallcycle_stop(wcycle, ewcRUN); /* Finish up, write some stuff * if rerunMD, don't write last frame again */ finish_run(fplog, cr, inputrec, nrnb, wcycle, walltime_accounting, fr ? fr->nbv : NULL, EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr)); /* Free GPU memory and context */ free_gpu_resources(fr, cr, &hwinfo->gpu_info, fr ? fr->gpu_opt : NULL); if (opt2bSet("-membed", nfile, fnm)) { sfree(membed); } gmx_hardware_info_free(hwinfo); /* Does what it says */ print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime()); walltime_accounting_destroy(walltime_accounting); /* PLUMED */ if(plumedswitch){ plumed_finalize(plumedmain); } /* END PLUMED */ /* Close logfile already here if we were appending to it */ if (MASTER(cr) && (Flags & MD_APPENDFILES)) { gmx_log_close(fplog); } rc = (int)gmx_get_stop_condition(); done_ed(&ed); #ifdef GMX_THREAD_MPI /* we need to join all threads. The sub-threads join when they exit this function, but the master thread needs to be told to wait for that. */ if (PAR(cr) && MASTER(cr)) { tMPI_Finalize(); } #endif return rc; }