static void check_eg_vs_cg(gmx_mtop_t *mtop) { int astart,mb,m,cg,j,firstj; unsigned char firsteg,eg; gmx_moltype_t *molt; /* Go through all the charge groups and make sure all their * atoms are in the same energy group. */ astart = 0; for(mb=0; mb<mtop->nmolblock; mb++) { molt = &mtop->moltype[mtop->molblock[mb].type]; for(m=0; m<mtop->molblock[mb].nmol; m++) { for(cg=0; cg<molt->cgs.nr;cg++) { /* Get the energy group of the first atom in this charge group */ firstj = astart + molt->cgs.index[cg]; firsteg = ggrpnr(&mtop->groups,egcENER,firstj); for(j=molt->cgs.index[cg]+1;j<molt->cgs.index[cg+1];j++) { eg = ggrpnr(&mtop->groups,egcENER,astart+j); if(eg != firsteg) { gmx_fatal(FARGS,"atoms %d and %d in charge group %d of molecule type '%s' are in different energy groups", firstj+1,astart+j+1,cg+1,*molt->name); } } } astart += molt->atoms.nr; } } }
static void cmp_groups(FILE *fp,gmx_groups_t *g0,gmx_groups_t *g1, int natoms0,int natoms1) { int i,j,ndiff; char buf[32]; fprintf(fp,"comparing groups\n"); for(i=0; i<egcNR; i++) { sprintf(buf,"grps[%d].nr",i); cmp_int(fp,buf,-1,g0->grps[i].nr,g1->grps[i].nr); if (g0->grps[i].nr == g1->grps[i].nr) { for(j=0; j<g0->grps[i].nr; j++) { sprintf(buf,"grps[%d].name[%d]",i,j); cmp_str(fp,buf,-1, *g0->grpname[g0->grps[i].nm_ind[j]], *g1->grpname[g1->grps[i].nm_ind[j]]); } } cmp_int(fp,"ngrpnr",i,g0->ngrpnr[i],g1->ngrpnr[i]); if (g0->ngrpnr[i] == g1->ngrpnr[i] && natoms0 == natoms1 && (g0->grpnr[i] != NULL || g1->grpnr[i] != NULL)) { for(j=0; j<natoms0; j++) { cmp_int(fp,gtypes[i],j,ggrpnr(g0,i,j),ggrpnr(g1,i,j)); } } } /* We have compared the names in the groups lists, * so we can skip the grpname list comparison. */ }
static void init_grpstat(FILE *log, gmx_mtop_t *mtop, int ngacc, t_grp_acc gstat[]) { gmx_groups_t *groups; gmx_mtop_atomloop_all_t aloop; int i, grp; t_atom *atom; if (ngacc > 0) { groups = &mtop->groups; aloop = gmx_mtop_atomloop_all_init(mtop); while (gmx_mtop_atomloop_all_next(aloop, &i, &atom)) { grp = ggrpnr(groups, egcACC, i); if ((grp < 0) && (grp >= ngacc)) { gmx_incons("Input for acceleration groups wrong"); } gstat[grp].nat++; /* This will not work for integrator BD */ gstat[grp].mA += atom->m; gstat[grp].mB += atom->mB; } } }
t_mdatoms *init_mdatoms(FILE *fp, gmx_mtop_t *mtop, gmx_bool bFreeEnergy) { int mb, a, g, nmol; double tmA, tmB; t_atom *atom; t_mdatoms *md; gmx_mtop_atomloop_all_t aloop; t_ilist *ilist; snew(md, 1); md->nenergrp = mtop->groups.grps[egcENER].nr; md->bVCMgrps = FALSE; tmA = 0.0; tmB = 0.0; aloop = gmx_mtop_atomloop_all_init(mtop); while (gmx_mtop_atomloop_all_next(aloop, &a, &atom)) { if (ggrpnr(&mtop->groups, egcVCM, a) > 0) { md->bVCMgrps = TRUE; } if (bFreeEnergy && PERTURBED(*atom)) { md->nPerturbed++; if (atom->mB != atom->m) { md->nMassPerturbed++; } if (atom->qB != atom->q) { md->nChargePerturbed++; } if (atom->typeB != atom->type) { md->nTypePerturbed++; } } tmA += atom->m; tmB += atom->mB; } md->tmassA = tmA; md->tmassB = tmB; if (bFreeEnergy && fp) { fprintf(fp, "There are %d atoms and %d charges for free energy perturbation\n", md->nPerturbed, md->nChargePerturbed); } md->bOrires = gmx_mtop_ftype_count(mtop, F_ORIRES); return md; }
static void list_tpx(const char *fn, gmx_bool bShowNumbers, const char *mdpfn, gmx_bool bSysTop) { FILE *gp; int fp, indent, i, j, **gcount, atot; t_state state; rvec *f = NULL; t_inputrec ir; t_tpxheader tpx; gmx_mtop_t mtop; gmx_groups_t *groups; t_topology top; read_tpxheader(fn, &tpx, TRUE, NULL, NULL); read_tpx_state(fn, tpx.bIr ? &ir : NULL, &state, tpx.bF ? f : NULL, tpx.bTop ? &mtop : NULL); if (mdpfn && tpx.bIr) { gp = gmx_fio_fopen(mdpfn, "w"); pr_inputrec(gp, 0, NULL, &(ir), TRUE); gmx_fio_fclose(gp); } if (!mdpfn) { if (bSysTop) { top = gmx_mtop_t_to_t_topology(&mtop); } if (available(stdout, &tpx, 0, fn)) { indent = 0; indent = pr_title(stdout, indent, fn); pr_inputrec(stdout, 0, "inputrec", tpx.bIr ? &(ir) : NULL, FALSE); indent = 0; pr_header(stdout, indent, "header", &(tpx)); if (!bSysTop) { pr_mtop(stdout, indent, "topology", &(mtop), bShowNumbers); } else { pr_top(stdout, indent, "topology", &(top), bShowNumbers); } pr_rvecs(stdout, indent, "box", tpx.bBox ? state.box : NULL, DIM); pr_rvecs(stdout, indent, "box_rel", tpx.bBox ? state.box_rel : NULL, DIM); pr_rvecs(stdout, indent, "boxv", tpx.bBox ? state.boxv : NULL, DIM); pr_rvecs(stdout, indent, "pres_prev", tpx.bBox ? state.pres_prev : NULL, DIM); pr_rvecs(stdout, indent, "svir_prev", tpx.bBox ? state.svir_prev : NULL, DIM); pr_rvecs(stdout, indent, "fvir_prev", tpx.bBox ? state.fvir_prev : NULL, DIM); /* leave nosehoover_xi in for now to match the tpr version */ pr_doubles(stdout, indent, "nosehoover_xi", state.nosehoover_xi, state.ngtc); /*pr_doubles(stdout,indent,"nosehoover_vxi",state.nosehoover_vxi,state.ngtc);*/ /*pr_doubles(stdout,indent,"therm_integral",state.therm_integral,state.ngtc);*/ pr_rvecs(stdout, indent, "x", tpx.bX ? state.x : NULL, state.natoms); pr_rvecs(stdout, indent, "v", tpx.bV ? state.v : NULL, state.natoms); if (tpx.bF) { pr_rvecs(stdout, indent, "f", f, state.natoms); } } groups = &mtop.groups; snew(gcount, egcNR); for (i = 0; (i < egcNR); i++) { snew(gcount[i], groups->grps[i].nr); } for (i = 0; (i < mtop.natoms); i++) { for (j = 0; (j < egcNR); j++) { gcount[j][ggrpnr(groups, j, i)]++; } } printf("Group statistics\n"); for (i = 0; (i < egcNR); i++) { atot = 0; printf("%-12s: ", gtypes[i]); for (j = 0; (j < groups->grps[i].nr); j++) { printf(" %5d", gcount[i][j]); atot += gcount[i][j]; } printf(" (total %d atoms)\n", atot); sfree(gcount[i]); } sfree(gcount); } done_state(&state); sfree(f); }
void init_QMMMrec(t_commrec *cr, matrix box, gmx_mtop_t *mtop, t_inputrec *ir, t_forcerec *fr) { /* we put the atomsnumbers of atoms that belong to the QMMM group in * an array that will be copied later to QMMMrec->indexQM[..]. Also * it will be used to create an QMMMrec->bQMMM index array that * simply contains true/false for QM and MM (the other) atoms. */ gmx_groups_t *groups; atom_id *qm_arr=NULL,vsite,ai,aj; int qm_max=0,qm_nr=0,i,j,jmax,k,l,nrvsite2=0; t_QMMMrec *qr; t_MMrec *mm; t_iatom *iatoms; real c12au,c6au; gmx_mtop_atomloop_all_t aloop; t_atom *atom; gmx_mtop_ilistloop_all_t iloop; int a_offset; t_ilist *ilist_mol; c6au = (HARTREE2KJ*AVOGADRO*pow(BOHR2NM,6)); c12au = (HARTREE2KJ*AVOGADRO*pow(BOHR2NM,12)); fprintf(stderr,"there we go!\n"); /* Make a local copy of the QMMMrec */ qr = fr->qr; /* bQMMM[..] is an array containing TRUE/FALSE for atoms that are * QM/not QM. We first set all elemenst at false. Afterwards we use * the qm_arr (=MMrec->indexQM) to changes the elements * corresponding to the QM atoms at TRUE. */ qr->QMMMscheme = ir->QMMMscheme; /* we take the possibility into account that a user has * defined more than one QM group: */ /* an ugly work-around in case there is only one group In this case * the whole system is treated as QM. Otherwise the second group is * always the rest of the total system and is treated as MM. */ /* small problem if there is only QM.... so no MM */ jmax = ir->opts.ngQM; if(qr->QMMMscheme==eQMMMschemeoniom) qr->nrQMlayers = jmax; else qr->nrQMlayers = 1; groups = &mtop->groups; /* there are jmax groups of QM atoms. In case of multiple QM groups * I assume that the users wants to do ONIOM. However, maybe it * should also be possible to define more than one QM subsystem with * independent neighbourlists. I have to think about * that.. 11-11-2003 */ snew(qr->qm,jmax); for(j=0;j<jmax;j++){ /* new layer */ aloop = gmx_mtop_atomloop_all_init(mtop); while (gmx_mtop_atomloop_all_next(aloop,&i,&atom)) { if(qm_nr >= qm_max){ qm_max += 1000; srenew(qm_arr,qm_max); } if (ggrpnr(groups,egcQMMM ,i) == j) { /* hack for tip4p */ qm_arr[qm_nr++] = i; } } if(qr->QMMMscheme==eQMMMschemeoniom){ /* add the atoms to the bQMMM array */ /* I assume that users specify the QM groups from small to * big(ger) in the mdp file */ qr->qm[j] = mk_QMrec(); /* we need to throw out link atoms that in the previous layer * existed to separate this QMlayer from the previous * QMlayer. We use the iatoms array in the idef for that * purpose. If all atoms defining the current Link Atom (Dummy2) * are part of the current QM layer it needs to be removed from * qm_arr[]. */ iloop = gmx_mtop_ilistloop_all_init(mtop); while (gmx_mtop_ilistloop_all_next(iloop,&ilist_mol,&a_offset)) { nrvsite2 = ilist_mol[F_VSITE2].nr; iatoms = ilist_mol[F_VSITE2].iatoms; for(k=0; k<nrvsite2; k+=4) { vsite = a_offset + iatoms[k+1]; /* the vsite */ ai = a_offset + iatoms[k+2]; /* constructing atom */ aj = a_offset + iatoms[k+3]; /* constructing atom */ if (ggrpnr(groups, egcQMMM, vsite) == ggrpnr(groups, egcQMMM, ai) && ggrpnr(groups, egcQMMM, vsite) == ggrpnr(groups, egcQMMM, aj)) { /* this dummy link atom needs to be removed from the qm_arr * before making the QMrec of this layer! */ for(i=0;i<qm_nr;i++){ if(qm_arr[i]==vsite){ /* drop the element */ for(l=i;l<qm_nr;l++){ qm_arr[l]=qm_arr[l+1]; } qm_nr--; } } } } } /* store QM atoms in this layer in the QMrec and initialise layer */ init_QMrec(j,qr->qm[j],qm_nr,qm_arr,mtop,ir); /* we now store the LJ C6 and C12 parameters in QM rec in case * we need to do an optimization */ if(qr->qm[j]->bOPT || qr->qm[j]->bTS){ for(i=0;i<qm_nr;i++){ qr->qm[j]->c6[i] = C6(fr->nbfp,mtop->ffparams.atnr, atom->type,atom->type)/c6au; qr->qm[j]->c12[i] = C12(fr->nbfp,mtop->ffparams.atnr, atom->type,atom->type)/c12au; } } /* now we check for frontier QM atoms. These occur in pairs that * construct the vsite */ iloop = gmx_mtop_ilistloop_all_init(mtop); while (gmx_mtop_ilistloop_all_next(iloop,&ilist_mol,&a_offset)) { nrvsite2 = ilist_mol[F_VSITE2].nr; iatoms = ilist_mol[F_VSITE2].iatoms; for(k=0; k<nrvsite2; k+=4){ vsite = a_offset + iatoms[k+1]; /* the vsite */ ai = a_offset + iatoms[k+2]; /* constructing atom */ aj = a_offset + iatoms[k+3]; /* constructing atom */ if(ggrpnr(groups,egcQMMM,ai) < (groups->grps[egcQMMM].nr-1) && (ggrpnr(groups,egcQMMM,aj) >= (groups->grps[egcQMMM].nr-1))){ /* mark ai as frontier atom */ for(i=0;i<qm_nr;i++){ if( (qm_arr[i]==ai) || (qm_arr[i]==vsite) ){ qr->qm[j]->frontatoms[i]=TRUE; } } } else if(ggrpnr(groups,egcQMMM,aj) < (groups->grps[egcQMMM].nr-1) && (ggrpnr(groups,egcQMMM,ai) >= (groups->grps[egcQMMM].nr-1))){ /* mark aj as frontier atom */ for(i=0;i<qm_nr;i++){ if( (qm_arr[i]==aj) || (qm_arr[i]==vsite)){ qr->qm[j]->frontatoms[i]=TRUE; } } } } } } } if(qr->QMMMscheme!=eQMMMschemeoniom){ /* standard QMMM, all layers are merged together so there is one QM * subsystem and one MM subsystem. * Also we set the charges to zero in the md->charge arrays to prevent * the innerloops from doubly counting the electostatic QM MM interaction */ for (k=0;k<qm_nr;k++){ gmx_mtop_atomnr_to_atom(mtop,qm_arr[k],&atom); atom->q = 0.0; atom->qB = 0.0; } qr->qm[0] = mk_QMrec(); /* store QM atoms in the QMrec and initialise */ init_QMrec(0,qr->qm[0],qm_nr,qm_arr,mtop,ir); if(qr->qm[0]->bOPT || qr->qm[0]->bTS){ for(i=0;i<qm_nr;i++){ gmx_mtop_atomnr_to_atom(mtop,qm_arr[i],&atom); qr->qm[0]->c6[i] = C6(fr->nbfp,mtop->ffparams.atnr, atom->type,atom->type)/c6au; qr->qm[0]->c12[i] = C12(fr->nbfp,mtop->ffparams.atnr, atom->type,atom->type)/c12au; } } /* find frontier atoms and mark them true in the frontieratoms array. */ for(i=0;i<qm_nr;i++) { gmx_mtop_atomnr_to_ilist(mtop,qm_arr[i],&ilist_mol,&a_offset); nrvsite2 = ilist_mol[F_VSITE2].nr; iatoms = ilist_mol[F_VSITE2].iatoms; for(k=0;k<nrvsite2;k+=4){ vsite = a_offset + iatoms[k+1]; /* the vsite */ ai = a_offset + iatoms[k+2]; /* constructing atom */ aj = a_offset + iatoms[k+3]; /* constructing atom */ if(ggrpnr(groups,egcQMMM,ai) < (groups->grps[egcQMMM].nr-1) && (ggrpnr(groups,egcQMMM,aj) >= (groups->grps[egcQMMM].nr-1))){ /* mark ai as frontier atom */ if ( (qm_arr[i]==ai) || (qm_arr[i]==vsite) ){ qr->qm[0]->frontatoms[i]=TRUE; } } else if (ggrpnr(groups,egcQMMM,aj) < (groups->grps[egcQMMM].nr-1) && (ggrpnr(groups,egcQMMM,ai) >=(groups->grps[egcQMMM].nr-1))) { /* mark aj as frontier atom */ if ( (qm_arr[i]==aj) || (qm_arr[i]==vsite) ){ qr->qm[0]->frontatoms[i]=TRUE; } } } } /* MM rec creation */ mm = mk_MMrec(); mm->scalefactor = ir->scalefactor; mm->nrMMatoms = (mtop->natoms)-(qr->qm[0]->nrQMatoms); /* rest of the atoms */ qr->mm = mm; } else {/* ONIOM */ /* MM rec creation */ mm = mk_MMrec(); mm->scalefactor = ir->scalefactor; mm->nrMMatoms = 0; qr->mm = mm; } /* these variables get updated in the update QMMMrec */ if(qr->nrQMlayers==1){ /* with only one layer there is only one initialisation * needed. Multilayer is a bit more complicated as it requires * re-initialisation at every step of the simulation. This is due * to the use of COMMON blocks in the fortran QM subroutines. */ if (qr->qm[0]->QMmethod<eQMmethodRHF) { #ifdef GMX_QMMM_MOPAC /* semi-empiprical 1-layer ONIOM calculation requested (mopac93) */ init_mopac(cr,qr->qm[0],qr->mm); #else gmx_fatal(FARGS,"Semi-empirical QM only supported with Mopac."); #endif } else { /* ab initio calculation requested (gamess/gaussian/ORCA) */ #ifdef GMX_QMMM_GAMESS init_gamess(cr,qr->qm[0],qr->mm); #elif defined GMX_QMMM_GAUSSIAN init_gaussian(cr,qr->qm[0],qr->mm); #elif defined GMX_QMMM_ORCA init_orca(cr,qr->qm[0],qr->mm); #else gmx_fatal(FARGS,"Ab-initio calculation only supported with Gamess, Gaussian or ORCA."); #endif } } } /* init_QMMMrec */
gmx_mdoutf_t init_mdoutf(FILE *fplog, int nfile, const t_filenm fnm[], int mdrun_flags, const t_commrec *cr, const t_inputrec *ir, gmx_mtop_t *top_global, const gmx_output_env_t *oenv, gmx_wallcycle_t wcycle) { gmx_mdoutf_t of; char filemode[3]; gmx_bool bAppendFiles, bCiteTng = FALSE; int i; snew(of, 1); of->fp_trn = NULL; of->fp_ene = NULL; of->fp_xtc = NULL; of->tng = NULL; of->tng_low_prec = NULL; of->fp_dhdl = NULL; of->fp_field = NULL; of->eIntegrator = ir->eI; of->bExpanded = ir->bExpanded; of->elamstats = ir->expandedvals->elamstats; of->simulation_part = ir->simulation_part; of->x_compression_precision = static_cast<int>(ir->x_compression_precision); of->wcycle = wcycle; if (MASTER(cr)) { bAppendFiles = (mdrun_flags & MD_APPENDFILES); of->bKeepAndNumCPT = (mdrun_flags & MD_KEEPANDNUMCPT); sprintf(filemode, bAppendFiles ? "a+" : "w+"); if ((EI_DYNAMICS(ir->eI) || EI_ENERGY_MINIMIZATION(ir->eI)) #ifndef GMX_FAHCORE && !(EI_DYNAMICS(ir->eI) && ir->nstxout == 0 && ir->nstvout == 0 && ir->nstfout == 0) #endif ) { const char *filename; filename = ftp2fn(efTRN, nfile, fnm); switch (fn2ftp(filename)) { case efTRR: case efTRN: of->fp_trn = gmx_trr_open(filename, filemode); break; case efTNG: gmx_tng_open(filename, filemode[0], &of->tng); if (filemode[0] == 'w') { gmx_tng_prepare_md_writing(of->tng, top_global, ir); } bCiteTng = TRUE; break; default: gmx_incons("Invalid full precision file format"); } } if (EI_DYNAMICS(ir->eI) && ir->nstxout_compressed > 0) { const char *filename; filename = ftp2fn(efCOMPRESSED, nfile, fnm); switch (fn2ftp(filename)) { case efXTC: of->fp_xtc = open_xtc(filename, filemode); break; case efTNG: gmx_tng_open(filename, filemode[0], &of->tng_low_prec); if (filemode[0] == 'w') { gmx_tng_prepare_low_prec_writing(of->tng_low_prec, top_global, ir); } bCiteTng = TRUE; break; default: gmx_incons("Invalid reduced precision file format"); } } if (EI_DYNAMICS(ir->eI) || EI_ENERGY_MINIMIZATION(ir->eI)) { of->fp_ene = open_enx(ftp2fn(efEDR, nfile, fnm), filemode); } of->fn_cpt = opt2fn("-cpo", nfile, fnm); if ((ir->efep != efepNO || ir->bSimTemp) && ir->fepvals->nstdhdl > 0 && (ir->fepvals->separate_dhdl_file == esepdhdlfileYES ) && EI_DYNAMICS(ir->eI)) { if (bAppendFiles) { of->fp_dhdl = gmx_fio_fopen(opt2fn("-dhdl", nfile, fnm), filemode); } else { of->fp_dhdl = open_dhdl(opt2fn("-dhdl", nfile, fnm), ir, oenv); } } if (opt2bSet("-field", nfile, fnm) && (ir->ex[XX].n || ir->ex[YY].n || ir->ex[ZZ].n)) { if (bAppendFiles) { of->fp_field = gmx_fio_fopen(opt2fn("-field", nfile, fnm), filemode); } else { of->fp_field = xvgropen(opt2fn("-field", nfile, fnm), "Applied electric field", "Time (ps)", "E (V/nm)", oenv); } } /* Set up atom counts so they can be passed to actual trajectory-writing routines later. Also, XTC writing needs to know what (and how many) atoms might be in the XTC groups, and how to look up later which ones they are. */ of->natoms_global = top_global->natoms; of->groups = &top_global->groups; of->natoms_x_compressed = 0; for (i = 0; (i < top_global->natoms); i++) { if (ggrpnr(of->groups, egcCompressedX, i) == 0) { of->natoms_x_compressed++; } } } if (bCiteTng) { please_cite(fplog, "Lundborg2014"); } return of; }
void mdoutf_write_to_trajectory_files(FILE *fplog, t_commrec *cr, gmx_mdoutf_t of, int mdof_flags, gmx_mtop_t *top_global, gmx_int64_t step, double t, t_state *state_local, t_state *state_global, rvec *f_local, rvec *f_global) { rvec *local_v; rvec *global_v; /* MRS -- defining these variables is to manage the difference * between half step and full step velocities, but there must be a better way . . . */ local_v = state_local->v; global_v = state_global->v; if (DOMAINDECOMP(cr)) { if (mdof_flags & MDOF_CPT) { dd_collect_state(cr->dd, state_local, state_global); } else { if (mdof_flags & (MDOF_X | MDOF_X_COMPRESSED)) { dd_collect_vec(cr->dd, state_local, state_local->x, state_global->x); } if (mdof_flags & MDOF_V) { dd_collect_vec(cr->dd, state_local, local_v, global_v); } } if (mdof_flags & MDOF_F) { dd_collect_vec(cr->dd, state_local, f_local, f_global); } } else { if (mdof_flags & MDOF_CPT) { /* All pointers in state_local are equal to state_global, * but we need to copy the non-pointer entries. */ state_global->lambda = state_local->lambda; state_global->veta = state_local->veta; state_global->vol0 = state_local->vol0; copy_mat(state_local->box, state_global->box); copy_mat(state_local->boxv, state_global->boxv); copy_mat(state_local->svir_prev, state_global->svir_prev); copy_mat(state_local->fvir_prev, state_global->fvir_prev); copy_mat(state_local->pres_prev, state_global->pres_prev); } } if (MASTER(cr)) { if (mdof_flags & MDOF_CPT) { fflush_tng(of->tng); fflush_tng(of->tng_low_prec); write_checkpoint(of->fn_cpt, of->bKeepAndNumCPT, fplog, cr, of->eIntegrator, of->simulation_part, of->bExpanded, of->elamstats, step, t, state_global); } if (mdof_flags & (MDOF_X | MDOF_V | MDOF_F)) { if (of->fp_trn) { gmx_trr_write_frame(of->fp_trn, step, t, state_local->lambda[efptFEP], state_local->box, top_global->natoms, (mdof_flags & MDOF_X) ? state_global->x : NULL, (mdof_flags & MDOF_V) ? global_v : NULL, (mdof_flags & MDOF_F) ? f_global : NULL); if (gmx_fio_flush(of->fp_trn) != 0) { gmx_file("Cannot write trajectory; maybe you are out of disk space?"); } } gmx_fwrite_tng(of->tng, FALSE, step, t, state_local->lambda[efptFEP], state_local->box, top_global->natoms, (mdof_flags & MDOF_X) ? state_global->x : NULL, (mdof_flags & MDOF_V) ? global_v : NULL, (mdof_flags & MDOF_F) ? f_global : NULL); } if (mdof_flags & MDOF_X_COMPRESSED) { rvec *xxtc = NULL; if (of->natoms_x_compressed == of->natoms_global) { /* We are writing the positions of all of the atoms to the compressed output */ xxtc = state_global->x; } else { /* We are writing the positions of only a subset of the atoms to the compressed output, so we have to make a copy of the subset of coordinates. */ int i, j; snew(xxtc, of->natoms_x_compressed); for (i = 0, j = 0; (i < of->natoms_global); i++) { if (ggrpnr(of->groups, egcCompressedX, i) == 0) { copy_rvec(state_global->x[i], xxtc[j++]); } } } if (write_xtc(of->fp_xtc, of->natoms_x_compressed, step, t, state_local->box, xxtc, of->x_compression_precision) == 0) { gmx_fatal(FARGS, "XTC error - maybe you are out of disk space?"); } gmx_fwrite_tng(of->tng_low_prec, TRUE, step, t, state_local->lambda[efptFEP], state_local->box, of->natoms_x_compressed, xxtc, NULL, NULL); if (of->natoms_x_compressed != of->natoms_global) { sfree(xxtc); } } } }
/* Create a TNG molecule representing the selection groups * to write */ static void add_selection_groups(tng_trajectory_t tng, const gmx_mtop_t *mtop) { const gmx_moltype_t *molType; const t_atoms *atoms; const t_atom *at; const t_resinfo *resInfo; const t_ilist *ilist; int nAtoms = 0, i = 0, j, molIt, atomIt, nameIndex; int atom_offset = 0; tng_molecule_t mol, iterMol; tng_chain_t chain; tng_residue_t res; tng_atom_t atom; tng_bond_t tngBond; gmx_int64_t nMols; char *groupName; /* The name of the TNG molecule containing the selection group is the * same as the name of the selection group. */ nameIndex = *mtop->groups.grps[egcCompressedX].nm_ind; groupName = *mtop->groups.grpname[nameIndex]; tng_molecule_alloc(tng, &mol); tng_molecule_name_set(tng, mol, groupName); tng_molecule_chain_add(tng, mol, "", &chain); for (molIt = 0; molIt < mtop->nmoltype; molIt++) { molType = &mtop->moltype[mtop->molblock[molIt].type]; atoms = &molType->atoms; for (j = 0; j < mtop->molblock[molIt].nmol; j++) { bool bAtomsAdded = FALSE; for (atomIt = 0; atomIt < atoms->nr; atomIt++, i++) { char *res_name; int res_id; if (ggrpnr(&mtop->groups, egcCompressedX, i) != 0) { continue; } at = &atoms->atom[atomIt]; if (atoms->nres > 0) { resInfo = &atoms->resinfo[at->resind]; /* FIXME: When TNG supports both residue index and residue * number the latter should be used. */ res_name = *resInfo->name; res_id = at->resind + 1; } else { res_name = (char *)""; res_id = 0; } if (tng_chain_residue_find(tng, chain, res_name, res_id, &res) != TNG_SUCCESS) { /* Since there is ONE chain for selection groups do not keep the * original residue IDs - otherwise there might be conflicts. */ tng_chain_residue_add(tng, chain, res_name, &res); } tng_residue_atom_w_id_add(tng, res, *(atoms->atomname[atomIt]), *(atoms->atomtype[atomIt]), atom_offset + atomIt, &atom); nAtoms++; bAtomsAdded = TRUE; } /* Add bonds. */ if (bAtomsAdded) { for (int k = 0; k < F_NRE; k++) { if (IS_CHEMBOND(k)) { ilist = &molType->ilist[k]; if (ilist) { int l = 1; while (l < ilist->nr) { int atom1, atom2; atom1 = ilist->iatoms[l] + atom_offset; atom2 = ilist->iatoms[l+1] + atom_offset; if (ggrpnr(&mtop->groups, egcCompressedX, atom1) == 0 && ggrpnr(&mtop->groups, egcCompressedX, atom2) == 0) { tng_molecule_bond_add(tng, mol, ilist->iatoms[l], ilist->iatoms[l+1], &tngBond); } l += 3; } } } } /* Settle is described using three atoms */ ilist = &molType->ilist[F_SETTLE]; if (ilist) { int l = 1; while (l < ilist->nr) { int atom1, atom2, atom3; atom1 = ilist->iatoms[l] + atom_offset; atom2 = ilist->iatoms[l+1] + atom_offset; atom3 = ilist->iatoms[l+2] + atom_offset; if (ggrpnr(&mtop->groups, egcCompressedX, atom1) == 0) { if (ggrpnr(&mtop->groups, egcCompressedX, atom2) == 0) { tng_molecule_bond_add(tng, mol, atom1, atom2, &tngBond); } if (ggrpnr(&mtop->groups, egcCompressedX, atom3) == 0) { tng_molecule_bond_add(tng, mol, atom1, atom3, &tngBond); } } l += 4; } } } atom_offset += atoms->nr; } } if (nAtoms != i) { tng_molecule_existing_add(tng, &mol); tng_molecule_cnt_set(tng, mol, 1); tng_num_molecule_types_get(tng, &nMols); for (gmx_int64_t k = 0; k < nMols; k++) { tng_molecule_of_index_get(tng, k, &iterMol); if (iterMol == mol) { continue; } tng_molecule_cnt_set(tng, iterMol, 0); } } else { tng_molecule_free(tng, &mol); } }
void init_orires(FILE *fplog,const gmx_mtop_t *mtop, rvec xref[], const t_inputrec *ir, const gmx_multisim_t *ms,t_oriresdata *od, t_state *state) { int i,j,d,ex,nmol,nr,*nr_ex; double mtot; rvec com; gmx_mtop_ilistloop_t iloop; t_ilist *il; gmx_mtop_atomloop_all_t aloop; t_atom *atom; od->fc = ir->orires_fc; od->nex = 0; od->S = NULL; od->nr = gmx_mtop_ftype_count(mtop,F_ORIRES); if (od->nr == 0) { return; } nr_ex = NULL; iloop = gmx_mtop_ilistloop_init(mtop); while (gmx_mtop_ilistloop_next(iloop,&il,&nmol)) { for(i=0; i<il[F_ORIRES].nr; i+=3) { ex = mtop->ffparams.iparams[il[F_ORIRES].iatoms[i]].orires.ex; if (ex >= od->nex) { srenew(nr_ex,ex+1); for(j=od->nex; j<ex+1; j++) { nr_ex[j] = 0; } od->nex = ex+1; } nr_ex[ex]++; } } snew(od->S,od->nex); /* When not doing time averaging, the instaneous and time averaged data * are indentical and the pointers can point to the same memory. */ snew(od->Dinsl,od->nr); if (ms) { snew(od->Dins,od->nr); } else { od->Dins = od->Dinsl; } if (ir->orires_tau == 0) { od->Dtav = od->Dins; od->edt = 0.0; od->edt1 = 1.0; } else { snew(od->Dtav,od->nr); od->edt = exp(-ir->delta_t/ir->orires_tau); od->edt1 = 1.0 - od->edt; /* Extend the state with the orires history */ state->flags |= (1<<estORIRE_INITF); state->hist.orire_initf = 1; state->flags |= (1<<estORIRE_DTAV); state->hist.norire_Dtav = od->nr*5; snew(state->hist.orire_Dtav,state->hist.norire_Dtav); } snew(od->oinsl,od->nr); if (ms) { snew(od->oins,od->nr); } else { od->oins = od->oinsl; } if (ir->orires_tau == 0) { od->otav = od->oins; } else { snew(od->otav,od->nr); } snew(od->tmp,od->nex); snew(od->TMP,od->nex); for(ex=0; ex<od->nex; ex++) { snew(od->TMP[ex],5); for(i=0; i<5; i++) { snew(od->TMP[ex][i],5); } } od->nref = 0; for(i=0; i<mtop->natoms; i++) { if (ggrpnr(&mtop->groups,egcORFIT,i) == 0) { od->nref++; } } snew(od->mref,od->nref); snew(od->xref,od->nref); snew(od->xtmp,od->nref); snew(od->eig,od->nex*12); /* Determine the reference structure on the master node. * Copy it to the other nodes after checking multi compatibility, * so we are sure the subsystems match before copying. */ clear_rvec(com); mtot = 0.0; j = 0; aloop = gmx_mtop_atomloop_all_init(mtop); while(gmx_mtop_atomloop_all_next(aloop,&i,&atom)) { if (mtop->groups.grpnr[egcORFIT] == NULL || mtop->groups.grpnr[egcORFIT][i] == 0) { /* Not correct for free-energy with changing masses */ od->mref[j] = atom->m; if (ms==NULL || MASTERSIM(ms)) { copy_rvec(xref[i],od->xref[j]); for(d=0; d<DIM; d++) { com[d] += od->mref[j]*xref[i][d]; } } mtot += od->mref[j]; j++; } } svmul(1.0/mtot,com,com); if (ms==NULL || MASTERSIM(ms)) { for(j=0; j<od->nref; j++) { rvec_dec(od->xref[j],com); } } fprintf(fplog,"Found %d orientation experiments\n",od->nex); for(i=0; i<od->nex; i++) { fprintf(fplog," experiment %d has %d restraints\n",i+1,nr_ex[i]); } sfree(nr_ex); fprintf(fplog," the fit group consists of %d atoms and has total mass %g\n", od->nref,mtot); if (ms) { fprintf(fplog," the orientation restraints are ensemble averaged over %d systems\n",ms->nsim); check_multi_int(fplog,ms,od->nr, "the number of orientation restraints"); check_multi_int(fplog,ms,od->nref, "the number of fit atoms for orientation restraining"); check_multi_int(fplog,ms,ir->nsteps,"nsteps"); /* Copy the reference coordinates from the master to the other nodes */ gmx_sum_sim(DIM*od->nref,od->xref[0],ms); } please_cite(fplog,"Hess2003"); }
void atoms2md(const gmx_mtop_t *mtop, const t_inputrec *ir, int nindex, const int *index, int homenr, t_mdatoms *md) { gmx_bool bLJPME; gmx_mtop_atomlookup_t alook; int i; const t_grpopts *opts; const gmx_groups_t *groups; int nthreads gmx_unused; const real oneOverSix = 1.0 / 6.0; bLJPME = EVDW_PME(ir->vdwtype); opts = &ir->opts; groups = &mtop->groups; /* Index==NULL indicates no DD (unless we have a DD node with no * atoms), so also check for homenr. This should be * signaled properly with an extra parameter or nindex==-1. */ if (index == NULL && (homenr > 0)) { md->nr = mtop->natoms; } else { md->nr = nindex; } if (md->nr > md->nalloc) { md->nalloc = over_alloc_dd(md->nr); if (md->nMassPerturbed) { srenew(md->massA, md->nalloc); srenew(md->massB, md->nalloc); } srenew(md->massT, md->nalloc); srenew(md->invmass, md->nalloc); srenew(md->chargeA, md->nalloc); srenew(md->typeA, md->nalloc); if (md->nPerturbed) { srenew(md->chargeB, md->nalloc); srenew(md->typeB, md->nalloc); } if (bLJPME) { srenew(md->sqrt_c6A, md->nalloc); srenew(md->sigmaA, md->nalloc); srenew(md->sigma3A, md->nalloc); if (md->nPerturbed) { srenew(md->sqrt_c6B, md->nalloc); srenew(md->sigmaB, md->nalloc); srenew(md->sigma3B, md->nalloc); } } srenew(md->ptype, md->nalloc); if (opts->ngtc > 1) { srenew(md->cTC, md->nalloc); /* We always copy cTC with domain decomposition */ } srenew(md->cENER, md->nalloc); if (opts->ngacc > 1) { srenew(md->cACC, md->nalloc); } if (opts->nFreeze && (opts->ngfrz > 1 || opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ])) { srenew(md->cFREEZE, md->nalloc); } if (md->bVCMgrps) { srenew(md->cVCM, md->nalloc); } if (md->bOrires) { srenew(md->cORF, md->nalloc); } if (md->nPerturbed) { srenew(md->bPerturbed, md->nalloc); } /* Note that these user t_mdatoms array pointers are NULL * when there is only one group present. * Therefore, when adding code, the user should use something like: * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex]) */ if (mtop->groups.grpnr[egcUser1] != NULL) { srenew(md->cU1, md->nalloc); } if (mtop->groups.grpnr[egcUser2] != NULL) { srenew(md->cU2, md->nalloc); } if (ir->bQMMM) { srenew(md->bQM, md->nalloc); } if (ir->bAdress) { srenew(md->wf, md->nalloc); srenew(md->tf_table_index, md->nalloc); } } alook = gmx_mtop_atomlookup_init(mtop); // cppcheck-suppress unreadVariable nthreads = gmx_omp_nthreads_get(emntDefault); #pragma omp parallel for num_threads(nthreads) schedule(static) for (i = 0; i < md->nr; i++) { try { int g, ag; real mA, mB, fac; real c6, c12; t_atom *atom; if (index == NULL) { ag = i; } else { ag = index[i]; } gmx_mtop_atomnr_to_atom(alook, ag, &atom); if (md->cFREEZE) { md->cFREEZE[i] = ggrpnr(groups, egcFREEZE, ag); } if (EI_ENERGY_MINIMIZATION(ir->eI)) { /* Displacement is proportional to F, masses used for constraints */ mA = 1.0; mB = 1.0; } else if (ir->eI == eiBD) { /* With BD the physical masses are irrelevant. * To keep the code simple we use most of the normal MD code path * for BD. Thus for constraining the masses should be proportional * to the friction coefficient. We set the absolute value such that * m/2<(dx/dt)^2> = m/2*2kT/fric*dt = kT/2 => m=fric*dt/2 * Then if we set the (meaningless) velocity to v=dx/dt, we get the * correct kinetic energy and temperature using the usual code path. * Thus with BD v*dt will give the displacement and the reported * temperature can signal bad integration (too large time step). */ if (ir->bd_fric > 0) { mA = 0.5*ir->bd_fric*ir->delta_t; mB = 0.5*ir->bd_fric*ir->delta_t; } else { /* The friction coefficient is mass/tau_t */ fac = ir->delta_t/opts->tau_t[md->cTC ? groups->grpnr[egcTC][ag] : 0]; mA = 0.5*atom->m*fac; mB = 0.5*atom->mB*fac; } } else { mA = atom->m; mB = atom->mB; } if (md->nMassPerturbed) { md->massA[i] = mA; md->massB[i] = mB; } md->massT[i] = mA; if (mA == 0.0) { md->invmass[i] = 0; } else if (md->cFREEZE) { g = md->cFREEZE[i]; if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ]) { /* Set the mass of completely frozen particles to ALMOST_ZERO iso 0 * to avoid div by zero in lincs or shake. * Note that constraints can still move a partially frozen particle. */ md->invmass[i] = ALMOST_ZERO; } else { md->invmass[i] = 1.0/mA; } } else { md->invmass[i] = 1.0/mA; } md->chargeA[i] = atom->q; md->typeA[i] = atom->type; if (bLJPME) { c6 = mtop->ffparams.iparams[atom->type*(mtop->ffparams.atnr+1)].lj.c6; c12 = mtop->ffparams.iparams[atom->type*(mtop->ffparams.atnr+1)].lj.c12; md->sqrt_c6A[i] = sqrt(c6); if (c6 == 0.0 || c12 == 0) { md->sigmaA[i] = 1.0; } else { md->sigmaA[i] = pow(c12/c6, oneOverSix); } md->sigma3A[i] = 1/(md->sigmaA[i]*md->sigmaA[i]*md->sigmaA[i]); } if (md->nPerturbed) { md->bPerturbed[i] = PERTURBED(*atom); md->chargeB[i] = atom->qB; md->typeB[i] = atom->typeB; if (bLJPME) { c6 = mtop->ffparams.iparams[atom->typeB*(mtop->ffparams.atnr+1)].lj.c6; c12 = mtop->ffparams.iparams[atom->typeB*(mtop->ffparams.atnr+1)].lj.c12; md->sqrt_c6B[i] = sqrt(c6); if (c6 == 0.0 || c12 == 0) { md->sigmaB[i] = 1.0; } else { md->sigmaB[i] = pow(c12/c6, oneOverSix); } md->sigma3B[i] = 1/(md->sigmaB[i]*md->sigmaB[i]*md->sigmaB[i]); } } md->ptype[i] = atom->ptype; if (md->cTC) { md->cTC[i] = groups->grpnr[egcTC][ag]; } md->cENER[i] = (groups->grpnr[egcENER] ? groups->grpnr[egcENER][ag] : 0); if (md->cACC) { md->cACC[i] = groups->grpnr[egcACC][ag]; } if (md->cVCM) { md->cVCM[i] = groups->grpnr[egcVCM][ag]; } if (md->cORF) { md->cORF[i] = groups->grpnr[egcORFIT][ag]; } if (md->cU1) { md->cU1[i] = groups->grpnr[egcUser1][ag]; } if (md->cU2) { md->cU2[i] = groups->grpnr[egcUser2][ag]; } if (ir->bQMMM) { if (groups->grpnr[egcQMMM] == 0 || groups->grpnr[egcQMMM][ag] < groups->grps[egcQMMM].nr-1) { md->bQM[i] = TRUE; } else { md->bQM[i] = FALSE; } } /* Initialize AdResS weighting functions to adressw */ if (ir->bAdress) { md->wf[i] = 1.0; /* if no tf table groups specified, use default table */ md->tf_table_index[i] = DEFAULT_TF_TABLE; if (ir->adress->n_tf_grps > 0) { /* if tf table groups specified, tf is only applied to thoose energy groups*/ md->tf_table_index[i] = NO_TF_TABLE; /* check wether atom is in one of the relevant energy groups and assign a table index */ for (g = 0; g < ir->adress->n_tf_grps; g++) { if (md->cENER[i] == ir->adress->tf_table_index[g]) { md->tf_table_index[i] = g; } } } } } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR; } gmx_mtop_atomlookup_destroy(alook); md->homenr = homenr; md->lambda = 0; }
void atoms2md(gmx_mtop_t *mtop,t_inputrec *ir, int nindex,int *index, int start,int homenr, t_mdatoms *md) { t_atoms *atoms_mol; int i,g,ag,as,ae,molb; real mA,mB,fac; t_atom *atom; t_grpopts *opts; gmx_groups_t *groups; gmx_molblock_t *molblock; opts = &ir->opts; groups = &mtop->groups; molblock = mtop->molblock; if (index == NULL) { md->nr = mtop->natoms; } else { md->nr = nindex; } if (md->nr > md->nalloc) { md->nalloc = over_alloc_dd(md->nr); if (md->nMassPerturbed) { srenew(md->massA,md->nalloc); srenew(md->massB,md->nalloc); } srenew(md->massT,md->nalloc); srenew(md->invmass,md->nalloc); srenew(md->chargeA,md->nalloc); if (md->nPerturbed) { srenew(md->chargeB,md->nalloc); } srenew(md->typeA,md->nalloc); if (md->nPerturbed) { srenew(md->typeB,md->nalloc); } srenew(md->ptype,md->nalloc); if (opts->ngtc > 1) { srenew(md->cTC,md->nalloc); /* We always copy cTC with domain decomposition */ } srenew(md->cENER,md->nalloc); if (opts->ngacc > 1) srenew(md->cACC,md->nalloc); if (opts->nFreeze && (opts->ngfrz > 1 || opts->nFreeze[0][XX] || opts->nFreeze[0][YY] || opts->nFreeze[0][ZZ])) srenew(md->cFREEZE,md->nalloc); if (md->bVCMgrps) srenew(md->cVCM,md->nalloc); if (md->bOrires) srenew(md->cORF,md->nalloc); if (md->nPerturbed) srenew(md->bPerturbed,md->nalloc); /* Note that these user t_mdatoms array pointers are NULL * when there is only one group present. * Therefore, when adding code, the user should use something like: * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex]) */ if (mtop->groups.grpnr[egcUser1] != NULL) srenew(md->cU1,md->nalloc); if (mtop->groups.grpnr[egcUser2] != NULL) srenew(md->cU2,md->nalloc); if (ir->bQMMM) srenew(md->bQM,md->nalloc); } for(i=0; (i<md->nr); i++) { if (index == NULL) { ag = i; gmx_mtop_atomnr_to_atom(mtop,ag,&atom); } else { ag = index[i]; molb = -1; ae = 0; do { molb++; as = ae; ae = as + molblock[molb].nmol*molblock[molb].natoms_mol; } while (ag >= ae); atoms_mol = &mtop->moltype[molblock[molb].type].atoms; atom = &atoms_mol->atom[(ag - as) % atoms_mol->nr]; } if (md->cFREEZE) { md->cFREEZE[i] = ggrpnr(groups,egcFREEZE,ag); } if (EI_ENERGY_MINIMIZATION(ir->eI)) { mA = 1.0; mB = 1.0; } else if (ir->eI == eiBD) { /* Make the mass proportional to the friction coefficient for BD. * This is necessary for the constraint algorithms. */ if (ir->bd_fric) { mA = ir->bd_fric*ir->delta_t; mB = ir->bd_fric*ir->delta_t; } else { fac = ir->delta_t/opts->tau_t[md->cTC ? groups->grpnr[egcTC][ag] : 0]; mA = atom->m*fac; mB = atom->mB*fac; } } else { mA = atom->m; mB = atom->mB; } if (md->nMassPerturbed) { md->massA[i] = mA; md->massB[i] = mB; } md->massT[i] = mA; if (mA == 0.0) { md->invmass[i] = 0; } else if (md->cFREEZE) { g = md->cFREEZE[i]; if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ]) /* Set the mass of completely frozen particles to ALMOST_ZERO iso 0 * to avoid div by zero in lincs or shake. * Note that constraints can still move a partially frozen particle. */ md->invmass[i] = ALMOST_ZERO; else md->invmass[i] = 1.0/mA; } else { md->invmass[i] = 1.0/mA; } md->chargeA[i] = atom->q; md->typeA[i] = atom->type; if (md->nPerturbed) { md->chargeB[i] = atom->qB; md->typeB[i] = atom->typeB; md->bPerturbed[i] = PERTURBED(*atom); } md->ptype[i] = atom->ptype; if (md->cTC) md->cTC[i] = groups->grpnr[egcTC][ag]; md->cENER[i] = (groups->grpnr[egcENER] ? groups->grpnr[egcENER][ag] : 0); if (md->cACC) md->cACC[i] = groups->grpnr[egcACC][ag]; if (md->cVCM) md->cVCM[i] = groups->grpnr[egcVCM][ag]; if (md->cORF) md->cORF[i] = groups->grpnr[egcORFIT][ag]; if (md->cU1) md->cU1[i] = groups->grpnr[egcUser1][ag]; if (md->cU2) md->cU2[i] = groups->grpnr[egcUser2][ag]; if (ir->bQMMM) { if (groups->grpnr[egcQMMM] == 0 || groups->grpnr[egcQMMM][ag] < groups->grps[egcQMMM].nr-1) { md->bQM[i] = TRUE; } else { md->bQM[i] = FALSE; } } } md->start = start; md->homenr = homenr; md->lambda = 0; }
static void init_pull_group_index(FILE *fplog, t_commrec *cr, int start, int end, int g, t_pull_group *pg, ivec pulldims, gmx_mtop_t *mtop, t_inputrec *ir, real lambda) { int i, ii, d, nfrozen, ndim; real m, w, mbd; double tmass, wmass, wwmass; gmx_bool bDomDec; gmx_ga2la_t ga2la = NULL; gmx_groups_t *groups; gmx_mtop_atomlookup_t alook; t_atom *atom; bDomDec = (cr && DOMAINDECOMP(cr)); if (bDomDec) { ga2la = cr->dd->ga2la; } if (EI_ENERGY_MINIMIZATION(ir->eI) || ir->eI == eiBD) { /* There are no masses in the integrator. * But we still want to have the correct mass-weighted COMs. * So we store the real masses in the weights. * We do not set nweight, so these weights do not end up in the tpx file. */ if (pg->nweight == 0) { snew(pg->weight, pg->nat); } } if (cr && PAR(cr)) { pg->nat_loc = 0; pg->nalloc_loc = 0; pg->ind_loc = NULL; pg->weight_loc = NULL; } else { pg->nat_loc = pg->nat; pg->ind_loc = pg->ind; if (pg->epgrppbc == epgrppbcCOS) { snew(pg->weight_loc, pg->nat); } else { pg->weight_loc = pg->weight; } } groups = &mtop->groups; alook = gmx_mtop_atomlookup_init(mtop); nfrozen = 0; tmass = 0; wmass = 0; wwmass = 0; for (i = 0; i < pg->nat; i++) { ii = pg->ind[i]; gmx_mtop_atomnr_to_atom(alook, ii, &atom); if (cr && PAR(cr) && !bDomDec && ii >= start && ii < end) { pg->ind_loc[pg->nat_loc++] = ii; } if (ir->opts.nFreeze) { for (d = 0; d < DIM; d++) { if (pulldims[d] && ir->opts.nFreeze[ggrpnr(groups, egcFREEZE, ii)][d]) { nfrozen++; } } } if (ir->efep == efepNO) { m = atom->m; } else { m = (1 - lambda)*atom->m + lambda*atom->mB; } if (pg->nweight > 0) { w = pg->weight[i]; } else { w = 1; } if (EI_ENERGY_MINIMIZATION(ir->eI)) { /* Move the mass to the weight */ w *= m; m = 1; pg->weight[i] = w; } else if (ir->eI == eiBD) { if (ir->bd_fric) { mbd = ir->bd_fric*ir->delta_t; } else { if (groups->grpnr[egcTC] == NULL) { mbd = ir->delta_t/ir->opts.tau_t[0]; } else { mbd = ir->delta_t/ir->opts.tau_t[groups->grpnr[egcTC][ii]]; } } w *= m/mbd; m = mbd; pg->weight[i] = w; } tmass += m; wmass += m*w; wwmass += m*w*w; } gmx_mtop_atomlookup_destroy(alook); if (wmass == 0) { gmx_fatal(FARGS, "The total%s mass of pull group %d is zero", pg->weight ? " weighted" : "", g); } if (fplog) { fprintf(fplog, "Pull group %d: %5d atoms, mass %9.3f", g, pg->nat, tmass); if (pg->weight || EI_ENERGY_MINIMIZATION(ir->eI) || ir->eI == eiBD) { fprintf(fplog, ", weighted mass %9.3f", wmass*wmass/wwmass); } if (pg->epgrppbc == epgrppbcCOS) { fprintf(fplog, ", cosine weighting will be used"); } fprintf(fplog, "\n"); } if (nfrozen == 0) { /* A value > 0 signals not frozen, it is updated later */ pg->invtm = 1.0; } else { ndim = 0; for (d = 0; d < DIM; d++) { ndim += pulldims[d]*pg->nat; } if (fplog && nfrozen > 0 && nfrozen < ndim) { fprintf(fplog, "\nWARNING: In pull group %d some, but not all of the degrees of freedom\n" " that are subject to pulling are frozen.\n" " For pulling the whole group will be frozen.\n\n", g); } pg->invtm = 0.0; pg->wscale = 1.0; } }