void gmx_sumli_sim(int nr, gmx_large_int_t r[], const gmx_multisim_t *ms) { #ifndef GMX_MPI gmx_call("gmx_sumli_sim"); #else #if defined(MPI_IN_PLACE_EXISTS) || defined(GMX_THREAD_MPI) MPI_Allreduce(MPI_IN_PLACE, r, nr, GMX_MPI_LARGE_INT, MPI_SUM, ms->mpi_comm_masters); #else /* this is thread-unsafe, but it will do for now: */ int i; if (nr > ms->mpb->libuf_alloc) { ms->mpb->libuf_alloc = nr; srenew(ms->mpb->libuf, ms->mpb->libuf_alloc); } MPI_Allreduce(r, ms->mpb->libuf, nr, GMX_MPI_LARGE_INT, MPI_SUM, ms->mpi_comm_masters); for (i = 0; i < nr; i++) { r[i] = ms->mpb->libuf[i]; } #endif #endif }
void gmx_finalize(const t_commrec *cr) { int ret; #ifndef GMX_MPI gmx_call("gmx_finalize"); #else /* We sync the processes here to try to avoid problems * with buggy MPI implementations that could cause * unfinished processes to terminate. */ MPI_Barrier(MPI_COMM_WORLD); /* if (DOMAINDECOMP(cr)) { if (cr->npmenodes > 0 || cr->dd->bCartesian) MPI_Comm_free(&cr->mpi_comm_mygroup); if (cr->dd->bCartesian) MPI_Comm_free(&cr->mpi_comm_mysim); } */ /* Apparently certain mpich implementations cause problems * with MPI_Finalize. In that case comment out MPI_Finalize. */ if (debug) fprintf(debug,"Will call MPI_Finalize now\n"); ret = MPI_Finalize(); if (debug) fprintf(debug,"Return code from MPI_Finalize = %d\n",ret); #endif }
void gmx_abort(int noderank, int nnodes, int errorno) { #ifndef GMX_MPI gmx_call("gmx_abort"); #else #ifdef GMX_THREAD_MPI fprintf(stderr, "Halting program %s\n", ShortProgram()); thanx(stderr); exit(1); #else if (nnodes > 1) { fprintf(stderr, "Halting parallel program %s on CPU %d out of %d\n", ShortProgram(), noderank, nnodes); } else { fprintf(stderr, "Halting program %s\n", ShortProgram()); } thanx(stderr); MPI_Abort(MPI_COMM_WORLD, errorno); exit(1); #endif #endif }
void gmx_sumi(int nr,int r[],const t_commrec *cr) { #ifndef GMX_MPI gmx_call("gmx_sumi"); #else static int *buf=NULL; static int nalloc=0; int i; if (nr > nalloc) { nalloc = nr; srenew(buf,nalloc); } if (cr->nc.bUse) { /* Use two step summing */ MPI_Allreduce(r,buf,nr,MPI_INT,MPI_SUM,cr->nc.comm_intra); if (cr->nc.rank_intra == 0) { /* Sum with the buffers reversed */ MPI_Allreduce(buf,r,nr,MPI_INT,MPI_SUM,cr->nc.comm_inter); } MPI_Bcast(r,nr,MPI_INT,0,cr->nc.comm_intra); } else { MPI_Allreduce(r,buf,nr,MPI_INT,MPI_SUM,cr->mpi_comm_mygroup); for(i=0; i<nr; i++) r[i] = buf[i]; } #endif }
void gmx_bcast_sim(int nbytes, void *b, const t_commrec *cr) { #ifndef GMX_MPI gmx_call("gmx_bast"); #else MPI_Bcast(b, nbytes, MPI_BYTE, MASTERRANK(cr), cr->mpi_comm_mysim); #endif }
void gmx_barrier(const t_commrec *cr) { #ifndef GMX_MPI gmx_call("gmx_barrier"); #else MPI_Barrier(cr->mpi_comm_mygroup); #endif }
void gmx_sumf_sim(int nr, float r[], const gmx_multisim_t *ms) { #ifndef GMX_MPI gmx_call("gmx_sumf_sim"); #else gmx_sumf_comm(nr, r, ms->mpi_comm_masters); #endif }
void gmx_sumd_sim(int nr,double r[],const gmx_multisim_t *ms) { #ifndef GMX_MPI gmx_call("gmx_sumd"); #else gmx_sumd_comm(nr,r,ms->mpi_comm_masters); #endif }
void gmx_sumi(int nr, int r[], const t_commrec *cr) { #ifndef GMX_MPI gmx_call("gmx_sumi"); #else #if defined(MPI_IN_PLACE_EXISTS) || defined(GMX_THREAD_MPI) if (cr->nc.bUse) { /* Use two step summing */ if (cr->nc.rank_intra == 0) { MPI_Reduce(MPI_IN_PLACE, r, nr, MPI_INT, MPI_SUM, 0, cr->nc.comm_intra); /* Sum with the buffers reversed */ MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_INT, MPI_SUM, cr->nc.comm_inter); } else { /* This is here because of the silly MPI specification that MPI_IN_PLACE should be put in sendbuf instead of recvbuf */ MPI_Reduce(r, NULL, nr, MPI_INT, MPI_SUM, 0, cr->nc.comm_intra); } MPI_Bcast(r, nr, MPI_INT, 0, cr->nc.comm_intra); } else { MPI_Allreduce(MPI_IN_PLACE, r, nr, MPI_INT, MPI_SUM, cr->mpi_comm_mygroup); } #else int i; if (nr > cr->mpb->ibuf_alloc) { cr->mpb->ibuf_alloc = nr; srenew(cr->mpb->ibuf, cr->mpb->ibuf_alloc); } if (cr->nc.bUse) { /* Use two step summing */ MPI_Allreduce(r, cr->mpb->ibuf, nr, MPI_INT, MPI_SUM, cr->nc.comm_intra); if (cr->nc.rank_intra == 0) { /* Sum with the buffers reversed */ MPI_Allreduce(cr->mpb->ibuf, r, nr, MPI_INT, MPI_SUM, cr->nc.comm_inter); } MPI_Bcast(r, nr, MPI_INT, 0, cr->nc.comm_intra); } else { MPI_Allreduce(r, cr->mpb->ibuf, nr, MPI_INT, MPI_SUM, cr->mpi_comm_mygroup); for (i = 0; i < nr; i++) { r[i] = cr->mpb->ibuf[i]; } } #endif #endif }
/*! * \param[in] sym Symbol to query. * \returns The method associated with \p sym, or NULL if \p sym is not a * \ref SYMBOL_METHOD symbol. */ struct gmx_ana_selmethod_t * _gmx_sel_sym_value_method(gmx_sel_symrec_t *sym) { if (sym->type != SYMBOL_METHOD) { gmx_call("symbol is not a method symbol"); return NULL; } return sym->u.meth; }
/*! * \param[in] sym Symbol to query. * \returns The variable expression associated with \p sym, or NULL if * \p sym is not a \ref SYMBOL_VARIABLE symbol. */ struct t_selelem * _gmx_sel_sym_value_var(gmx_sel_symrec_t *sym) { if (sym->type != SYMBOL_VARIABLE) { gmx_call("symbol is not a variable symbol"); return NULL; } return sym->u.var; }
/*! * \param[in] d Trajectory analysis data structure. * \param[out] nanagrps Number of analysis groups specified by the user. * \returns 0 on success, a non-zero error code on error. * * If a specific number (not -1) of analysis groups has been set with * gmx_ana_set_nanagrps(), the value is always the same value. * Hence, you only need to call this function if gmx_ana_set_nanagrps() has * been called with \p nanagrps set to -1. * * Should only be called after gmx_ana_init_selections(). */ int gmx_ana_get_nanagrps(gmx_ana_traj_t *d, int *nanagrps) { if (d->nanagrps == -1) { *nanagrps = 0; gmx_call("gmx_ana_init_selections() not called"); return EINVAL; } *nanagrps = d->nanagrps; return 0; }
/*! * \param[in] d Trajectory analysis data structure. * \param[out] grpnames Array of selection names. * \returns 0 on success, a non-zero error code on error. * * The pointer returned in \p *grpnames should not be freed. * Should only be called after gmx_ana_init_selections(). */ int gmx_ana_get_grpnames(gmx_ana_traj_t *d, char ***grpnames) { if (!d->grpnames) { *grpnames = NULL; gmx_call("gmx_ana_init_selections() not called"); return EINVAL; } *grpnames = d->grpnames; return 0; }
/*! * \param[in,out] d Trajectory analysis data structure. * \param[in] frflags Flags for what to read from the trajectory file. * \returns 0 on success, an error code on error. * * The TRX_NEED_X flag is always set. * If the analysis tools needs some other information (velocities, forces), * it can call this function to load additional information from the * trajectory. */ int gmx_ana_set_frflags(gmx_ana_traj_t *d, int frflags) { if (d->fr) { gmx_call("cannot set trajectory flags after the first frame has been read"); return -1; } frflags |= TRX_NEED_X; d->frflags = frflags; return 0; }
/*! * \param[in] d Trajectory analysis data structure. * \param[in] i Ordinal number of the reference selection to get. * \param[out] sel Selection object for the \p i'th reference group. * \returns 0 on success, a non-zero error code on error. * * The pointer returned in \p *sel should not be freed. * Should only be called after gmx_ana_init_selections(). */ int gmx_ana_get_refsel(gmx_ana_traj_t *d, int i, gmx_ana_selection_t **sel) { if (i < 0 || i >= d->nrefgrps) { *sel = NULL; gmx_call("invalid reference group number"); return EINVAL; } *sel = gmx_ana_selcollection_get_selection(d->sc, i); if (!*sel) { gmx_incons("gmx_ana_init_selections() not called"); return EINVAL; } return 0; }
void gmx_sumi_sim(int nr,int r[],const gmx_multisim_t *ms) { #ifndef GMX_MPI gmx_call("gmx_sumd"); #else static int *buf=NULL; static int nalloc=0; int i; if (nr > nalloc) { nalloc = nr; srenew(buf,nalloc); } MPI_Allreduce(r,buf,nr,MPI_INT,MPI_SUM,ms->mpi_comm_masters); for(i=0; i<nr; i++) r[i] = buf[i]; #endif }
/*! * \param[in,out] sel Selection element to set the type for. * \param[in] vtype Value type for the selection element. * \returns 0 on success, EINVAL if the value type is invalid. * * If the new type is \ref GROUP_VALUE or \ref POS_VALUE, the * \ref SEL_ALLOCDATA flag is also set. * * This function should only be called at most once for each element, * preferably right after calling _gmx_selelem_create(). */ int _gmx_selelem_set_vtype(t_selelem *sel, e_selvalue_t vtype) { if (sel->type == SEL_BOOLEAN && vtype != GROUP_VALUE) { gmx_bug("internal error"); return EINVAL; } if (sel->v.type != NO_VALUE && vtype != sel->v.type) { gmx_call("_gmx_selelem_set_vtype() called more than once"); return EINVAL; } sel->v.type = vtype; if (vtype == GROUP_VALUE || vtype == POS_VALUE) { sel->flags |= SEL_ALLOCDATA; } return 0; }
int gmx_setup(int *argc, char **argv, int *nnodes) { #ifndef GMX_MPI gmx_call("gmx_setup"); return 0; #else char buf[256]; int resultlen; /* actual length of node name */ int i, flag; int mpi_num_nodes; int mpi_my_rank; char mpi_hostname[MPI_MAX_PROCESSOR_NAME]; /* Call the MPI routines */ #ifdef GMX_LIB_MPI #ifdef GMX_FAHCORE (void) fah_MPI_Init(argc, &argv); #else (void) MPI_Init(argc, &argv); #endif #endif (void) MPI_Comm_size( MPI_COMM_WORLD, &mpi_num_nodes ); (void) MPI_Comm_rank( MPI_COMM_WORLD, &mpi_my_rank ); (void) MPI_Get_processor_name( mpi_hostname, &resultlen ); #ifdef GMX_LIB_MPI if (debug) { fprintf(debug, "NNODES=%d, MYRANK=%d, HOSTNAME=%s\n", mpi_num_nodes, mpi_my_rank, mpi_hostname); } #endif *nnodes = mpi_num_nodes; return mpi_my_rank; #endif }
void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp, rvec gmx_unused *f, matrix vir_q, real energy_q, matrix vir_lj, real energy_lj, real dvdlambda_q, real dvdlambda_lj, float cycles) { #ifdef GMX_MPI gmx_pme_comm_vir_ene_t cve; int messages, ind_start, ind_end; cve.cycles = cycles; /* Now the evaluated forces have to be transferred to the PP nodes */ messages = 0; ind_end = 0; for (int receiver = 0; receiver < pme_pp->nnode; receiver++) { ind_start = ind_end; ind_end = ind_start + pme_pp->nat[receiver]; if (MPI_Isend(f[ind_start], (ind_end-ind_start)*sizeof(rvec), MPI_BYTE, pme_pp->node[receiver], 0, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0) { gmx_comm("MPI_Isend failed in do_pmeonly"); } } /* send virial and energy to our last PP node */ copy_mat(vir_q, cve.vir_q); copy_mat(vir_lj, cve.vir_lj); cve.energy_q = energy_q; cve.energy_lj = energy_lj; cve.dvdlambda_q = dvdlambda_q; cve.dvdlambda_lj = dvdlambda_lj; /* check for the signals to send back to a PP node */ cve.stop_cond = gmx_get_stop_condition(); cve.cycles = cycles; if (debug) { fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n", pme_pp->node_peer); } MPI_Isend(&cve, sizeof(cve), MPI_BYTE, pme_pp->node_peer, 1, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]); /* Wait for the forces to arrive */ MPI_Waitall(messages, pme_pp->req, pme_pp->stat); #else gmx_call("MPI not enabled"); GMX_UNUSED_VALUE(pme_pp); GMX_UNUSED_VALUE(f); GMX_UNUSED_VALUE(vir_q); GMX_UNUSED_VALUE(energy_q); GMX_UNUSED_VALUE(vir_lj); GMX_UNUSED_VALUE(energy_lj); GMX_UNUSED_VALUE(dvdlambda_q); GMX_UNUSED_VALUE(dvdlambda_lj); GMX_UNUSED_VALUE(cycles); #endif }
/*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */ static void gmx_pme_send_force_vir_ener(gmx_pme_pp *pme_pp, const rvec *f, matrix vir_q, real energy_q, matrix vir_lj, real energy_lj, real dvdlambda_q, real dvdlambda_lj, float cycles) { #if GMX_MPI gmx_pme_comm_vir_ene_t cve; int messages, ind_start, ind_end; cve.cycles = cycles; /* Now the evaluated forces have to be transferred to the PP nodes */ messages = 0; ind_end = 0; for (const auto &receiver : pme_pp->ppRanks) { ind_start = ind_end; ind_end = ind_start + receiver.numAtoms; if (MPI_Isend(const_cast<void *>(static_cast<const void *>(f[ind_start])), (ind_end-ind_start)*sizeof(rvec), MPI_BYTE, receiver.rankId, 0, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0) { gmx_comm("MPI_Isend failed in do_pmeonly"); } } /* send virial and energy to our last PP node */ copy_mat(vir_q, cve.vir_q); copy_mat(vir_lj, cve.vir_lj); cve.energy_q = energy_q; cve.energy_lj = energy_lj; cve.dvdlambda_q = dvdlambda_q; cve.dvdlambda_lj = dvdlambda_lj; /* check for the signals to send back to a PP node */ cve.stop_cond = gmx_get_stop_condition(); cve.cycles = cycles; if (debug) { fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n", pme_pp->peerRankId); } MPI_Isend(&cve, sizeof(cve), MPI_BYTE, pme_pp->peerRankId, 1, pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]); /* Wait for the forces to arrive */ MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data()); #else gmx_call("MPI not enabled"); GMX_UNUSED_VALUE(pme_pp); GMX_UNUSED_VALUE(f); GMX_UNUSED_VALUE(vir_q); GMX_UNUSED_VALUE(energy_q); GMX_UNUSED_VALUE(vir_lj); GMX_UNUSED_VALUE(energy_lj); GMX_UNUSED_VALUE(dvdlambda_q); GMX_UNUSED_VALUE(dvdlambda_lj); GMX_UNUSED_VALUE(cycles); #endif }
/*! * \param[in,out] d Trajectory analysis data structure. * \returns 0 on success, a non-zero error code on error. * * Initializes the selection data in \c gmx_ana_traj_t based on * the selection options and/or index files provided on the command line. * * This function is called automatically by parse_trjana_args() and should * not be called directly unless \ref ANA_USER_SELINIT is specified. * * \see ANA_USER_SELINIT */ int gmx_ana_init_selections(gmx_ana_traj_t *d) { int rc; int i; int nr; gmx_ana_indexgrps_t *grps; int natoms; bool bStdIn; bool bInteractive; bool bOk; if (d->sel) { gmx_call("init_selections called more than once\n" "perhaps you forgot ANA_USER_SELINIT"); return -1; } /* Check if we need some information from the topology */ if (gmx_ana_selcollection_requires_top(d->sc)) { rc = load_topology(d, TRUE); if (rc != 0) { return rc; } } /* Load the topology and init the index groups */ gmx_ana_indexgrps_init(&grps, d->top, d->ndxfile); /* Parse the selection */ rc = gmx_ana_selmethod_register_defaults(d->sc); if (rc != 0) { gmx_fatal(FARGS, "default selection method registration failed"); return rc; } bStdIn = (d->selfile && d->selfile[0] == '-' && d->selfile[1] == 0) || (d->selection && d->selection[0] == 0) || (!d->selfile && !d->selection); bInteractive = bStdIn && isatty(fileno(stdin)); if (bStdIn && bInteractive) { /* Parse from stdin */ /* First we parse the reference groups if there are any */ if (d->nrefgrps > 0) { fprintf(stderr, "\nSpecify "); if (d->nrefgrps == 1) { fprintf(stderr, "a reference selection"); } else { fprintf(stderr, "%d reference selections", d->nrefgrps); } fprintf(stderr, ":\n"); fprintf(stderr, "(one selection per line, use \\ for line continuation)\n"); rc = gmx_ana_selcollection_parse_stdin(d->sc, d->nrefgrps, grps, TRUE); nr = gmx_ana_selcollection_get_count(d->sc); if (rc != 0 || nr != d->nrefgrps) { gmx_ana_traj_free(d); gmx_input("unrecoverable error in selection parsing"); return rc; } } /* Then, we parse the analysis groups */ fprintf(stderr, "\nSpecify "); if (d->nanagrps == 1) { fprintf(stderr, "a selection"); } else if (d->nanagrps == -1) { fprintf(stderr, "any number of selections"); } else { fprintf(stderr, "%d selections", d->nanagrps); } fprintf(stderr, " for analysis:\n"); fprintf(stderr, "(one selection per line, use \\ for line continuation%s)\n", d->nanagrps == -1 ? ", Ctrl-D to end" : ""); rc = gmx_ana_selcollection_parse_stdin(d->sc, d->nanagrps, grps, TRUE); fprintf(stderr, "\n"); } else if (bStdIn) { rc = gmx_ana_selcollection_parse_stdin(d->sc, -1, grps, FALSE); } else if (d->selection) { rc = gmx_ana_selcollection_parse_str(d->sc, d->selection, grps); } else { rc = gmx_ana_selcollection_parse_file(d->sc, d->selfile, grps); } gmx_ana_indexgrps_free(grps); if (rc != 0) { /* Free memory for memory leak checking */ gmx_ana_traj_free(d); gmx_input("selection(s) could not be parsed"); return rc; } /* Check the number of groups */ nr = gmx_ana_selcollection_get_count(d->sc); if (nr <= d->nrefgrps) { gmx_input("selection does not specify enough index groups"); return -1; } if (d->nanagrps <= 0) { d->nanagrps = nr - d->nrefgrps; } else if (nr != d->nrefgrps + d->nanagrps) { gmx_input("selection does not specify the correct number of index groups"); return -1; } if (d->flags & ANA_DEBUG_SELECTION) { gmx_ana_selcollection_print_tree(stderr, d->sc, FALSE); } if (gmx_ana_selcollection_requires_top(d->sc)) { rc = load_topology(d, TRUE); if (rc != 0) { return rc; } } if (d->top) { natoms = -1; } else { rc = init_first_frame(d); if (rc != 0) { return rc; } natoms = d->fr->natoms; } gmx_ana_selcollection_set_topology(d->sc, d->top, natoms); rc = gmx_ana_selcollection_compile(d->sc); if (rc != 0) { /* Free memory for memory leak checking */ gmx_ana_traj_free(d); gmx_input("selection could not be compiled"); return rc; } /* Create the selection array */ d->ngrps = gmx_ana_selcollection_get_count(d->sc); if (!(d->flags & ANA_USE_FULLGRPS)) { d->ngrps -= d->nrefgrps; } snew(d->sel, d->ngrps); for (i = 0; i < d->ngrps; ++i) { if (d->flags & ANA_USE_FULLGRPS) { d->sel[i] = gmx_ana_selcollection_get_selection(d->sc, i); } else { d->sel[i] = gmx_ana_selcollection_get_selection(d->sc, i + d->nrefgrps); } } if (d->flags & ANA_DEBUG_SELECTION) { fprintf(stderr, "\n"); gmx_ana_selcollection_print_tree(stderr, d->sc, FALSE); fprintf(stderr, "\n"); gmx_ana_poscalc_coll_print_tree(stderr, d->pcc); fprintf(stderr, "\n"); } /* Initialize the position evaluation */ gmx_ana_poscalc_init_eval(d->pcc); if (d->flags & ANA_DEBUG_SELECTION) { gmx_ana_poscalc_coll_print_tree(stderr, d->pcc); fprintf(stderr, "\n"); } /* Check that dynamic selections are not provided if not allowed */ if (d->flags & ANA_NO_DYNSEL) { for (i = 0; i < d->nrefgrps + d->nanagrps; ++i) { gmx_ana_selection_t *sel; sel = gmx_ana_selcollection_get_selection(d->sc, i); if (sel->bDynamic) { gmx_fatal(FARGS, "%s does not support dynamic selections", ShortProgram()); return -1; } } } /* Check that non-atom positions are not provided if not allowed. * TODO: It would be better to have these checks in the parser. */ if (d->flags & ANA_ONLY_ATOMPOS) { for (i = 0; i < d->nanagrps; ++i) { gmx_ana_selection_t *sel; sel = gmx_ana_selcollection_get_selection(d->sc, i + d->nrefgrps); if (sel->p.m.type != INDEX_ATOM) { gmx_fatal(FARGS, "%s does not support non-atom positions", ShortProgram()); return -1; } } } /* Create the names array */ snew(d->grpnames, d->ngrps); for (i = 0; i < d->ngrps; ++i) { d->grpnames[i] = gmx_ana_selection_name(d->sel[i]); } return 0; }
void write_orca_input(int step ,t_forcerec *fr, t_QMrec *qm, t_MMrec *mm) { int i; t_QMMMrec *QMMMrec; FILE *out, *pcFile, *addInputFile, *LJCoeff; char *buf,*orcaInput,*addInputFilename,*LJCoeffFilename, *pcFilename,*exclInName,*exclOutName; QMMMrec = fr->qr; /* write the first part of the input-file */ snew(orcaInput,200); sprintf(orcaInput,"%s.inp",qm->orca_basename); out = fopen(orcaInput,"w"); snew(addInputFilename,200); sprintf(addInputFilename,"%s.ORCAINFO",qm->orca_basename); addInputFile = fopen(addInputFilename,"r"); fprintf(out, "#input-file generated by gromacs\n"); if(qm->bTS) { fprintf(out,"!QMMMOpt TightSCF\n"); fprintf(out,"%s\n","%geom TS_Search EF end"); } else if (qm->bOPT) { fprintf(out,"!QMMMOpt TightSCF\n"); } else { fprintf(out,"!EnGrad TightSCF\n"); } /* here we include the insertion of the additional orca-input */ snew(buf,200); if (addInputFile!=NULL) { while (!feof(addInputFile)) { if (fgets(buf, 200, addInputFile) != NULL) fputs(buf, out); } } else { fprintf(stderr,"No information on the calculation given in <%s>\n",addInputFilename); gmx_call("qm_orca.c"); } fclose(addInputFile); if(qm->bTS||qm->bOPT) { /* freeze the frontier QM atoms and Link atoms. This is * important only if a full QM subsystem optimization is done * with a frozen MM environmeent. For dynamics, or gromacs's own * optimization routines this is not important. */ /* ORCA reads the exclusions from LJCoeffFilename.Excl, *so we have to rename the file */ int didStart = 0; for(i=0; i<qm->nrQMatoms; i++) { if(qm->frontatoms[i]) { if (!didStart) { fprintf(out,"%s\n","%geom"); fprintf(out," Constraints \n"); didStart = 1; } fprintf(out," {C %d C}\n",i); /* counting from 0 */ } } if (didStart) fprintf(out," end\n end\n"); /* make a file with information on the C6 and C12 coefficients */ if(QMMMrec->QMMMscheme!=eQMMMschemeoniom && mm->nrMMatoms) { snew(exclInName,200); snew(exclOutName,200); sprintf(exclInName,"QMMMexcl.dat"); sprintf(exclOutName,"%s.LJ.Excl",qm->orca_basename); rename(exclInName,exclOutName); snew(LJCoeffFilename,200); sprintf(LJCoeffFilename,"%s.LJ",qm->orca_basename); fprintf(out,"%s%s%s\n","%LJCOEFFICIENTS \"",LJCoeffFilename,"\""); /* make a file with information on the C6 and C12 coefficients */ LJCoeff = fopen(LJCoeffFilename,"w"); fprintf(LJCoeff,"%d\n",qm->nrQMatoms); for (i=0; i<qm->nrQMatoms; i++) { #ifdef GMX_DOUBLE fprintf(LJCoeff,"%10.7lf %10.7lf\n",qm->c6[i],qm->c12[i]); #else fprintf(LJCoeff,"%10.7f %10.7f\n",qm->c6[i],qm->c12[i]); #endif } fprintf(LJCoeff,"%d\n",mm->nrMMatoms); for (i=0; i<mm->nrMMatoms; i++) { #ifdef GMX_DOUBLE fprintf(LJCoeff,"%10.7lf %10.7lf\n",mm->c6[i],mm->c12[i]); #else fprintf(LJCoeff,"%10.7f %10.7f\n",mm->c6[i],mm->c12[i]); #endif } fclose(LJCoeff); } } /* write charge and multiplicity */ fprintf(out,"*xyz %2d%2d\n",qm->QMcharge,qm->multiplicity); /* write the QM coordinates */ for (i=0; i<qm->nrQMatoms; i++) { int atomNr; if (qm->atomicnumberQM[i]==0) atomNr = 1; else atomNr = qm->atomicnumberQM[i]; #ifdef GMX_DOUBLE fprintf(out,"%3d %10.7lf %10.7lf %10.7lf\n", atomNr, qm->xQM[i][XX]/0.1, qm->xQM[i][YY]/0.1, qm->xQM[i][ZZ]/0.1); #else fprintf(out,"%3d %10.7f %10.7f %10.7f\n", atomNr, qm->xQM[i][XX]/0.1, qm->xQM[i][YY]/0.1, qm->xQM[i][ZZ]/0.1); #endif } fprintf(out,"*\n"); /* write the MM point charge data */ if(QMMMrec->QMMMscheme!=eQMMMschemeoniom && mm->nrMMatoms) { /* name of the point charge file */ snew(pcFilename,200); sprintf(pcFilename,"%s.pc",qm->orca_basename); fprintf(out,"%s%s%s\n","%pointcharges \"",pcFilename,"\""); pcFile = fopen(pcFilename,"w"); fprintf(pcFile,"%d\n",mm->nrMMatoms); for(i=0; i<mm->nrMMatoms; i++) { #ifdef GMX_DOUBLE fprintf(pcFile,"%8.4lf %10.7lf %10.7lf %10.7lf\n", mm->MMcharges[i], mm->xMM[i][XX]/0.1, mm->xMM[i][YY]/0.1, mm->xMM[i][ZZ]/0.1); #else fprintf(pcFile,"%8.4f %10.7f %10.7f %10.7f\n", mm->MMcharges[i], mm->xMM[i][XX]/0.1, mm->xMM[i][YY]/0.1, mm->xMM[i][ZZ]/0.1); #endif } fprintf(pcFile,"\n"); fclose(pcFile); } fprintf(out,"\n"); fclose(out); } /* write_orca_input */
int gmx_setup(int *argc,char **argv,int *nnodes) { #ifndef GMX_MPI gmx_call("gmx_setup"); return 0; #else char buf[256]; int resultlen; /* actual length of node name */ int i,flag; int mpi_num_nodes; int mpi_my_rank; char mpi_hostname[MPI_MAX_PROCESSOR_NAME]; /* Call the MPI routines */ (void) MPI_Init(argc,&argv); (void) MPI_Comm_size( MPI_COMM_WORLD, &mpi_num_nodes ); (void) MPI_Comm_rank( MPI_COMM_WORLD, &mpi_my_rank ); (void) MPI_Get_processor_name( mpi_hostname, &resultlen ); #ifdef USE_MPE /* MPE logging routines. Get event IDs from MPE: */ /* General events */ ev_timestep1 = MPE_Log_get_event_number( ); ev_timestep2 = MPE_Log_get_event_number( ); ev_force_start = MPE_Log_get_event_number( ); ev_force_finish = MPE_Log_get_event_number( ); ev_do_fnbf_start = MPE_Log_get_event_number( ); ev_do_fnbf_finish = MPE_Log_get_event_number( ); ev_ns_start = MPE_Log_get_event_number( ); ev_ns_finish = MPE_Log_get_event_number( ); ev_calc_bonds_start = MPE_Log_get_event_number( ); ev_calc_bonds_finish = MPE_Log_get_event_number( ); ev_global_stat_start = MPE_Log_get_event_number( ); ev_global_stat_finish = MPE_Log_get_event_number( ); ev_virial_start = MPE_Log_get_event_number( ); ev_virial_finish = MPE_Log_get_event_number( ); /* Shift related events */ ev_shift_start = MPE_Log_get_event_number( ); ev_shift_finish = MPE_Log_get_event_number( ); ev_unshift_start = MPE_Log_get_event_number( ); ev_unshift_finish = MPE_Log_get_event_number( ); ev_mk_mshift_start = MPE_Log_get_event_number( ); ev_mk_mshift_finish = MPE_Log_get_event_number( ); /* PME related events */ ev_pme_start = MPE_Log_get_event_number( ); ev_pme_finish = MPE_Log_get_event_number( ); ev_spread_on_grid_start = MPE_Log_get_event_number( ); ev_spread_on_grid_finish = MPE_Log_get_event_number( ); ev_sum_qgrid_start = MPE_Log_get_event_number( ); ev_sum_qgrid_finish = MPE_Log_get_event_number( ); ev_gmxfft3d_start = MPE_Log_get_event_number( ); ev_gmxfft3d_finish = MPE_Log_get_event_number( ); ev_solve_pme_start = MPE_Log_get_event_number( ); ev_solve_pme_finish = MPE_Log_get_event_number( ); ev_gather_f_bsplines_start = MPE_Log_get_event_number( ); ev_gather_f_bsplines_finish= MPE_Log_get_event_number( ); ev_reduce_start = MPE_Log_get_event_number( ); ev_reduce_finish = MPE_Log_get_event_number( ); ev_rscatter_start = MPE_Log_get_event_number( ); ev_rscatter_finish = MPE_Log_get_event_number( ); ev_alltoall_start = MPE_Log_get_event_number( ); ev_alltoall_finish = MPE_Log_get_event_number( ); ev_pmeredist_start = MPE_Log_get_event_number( ); ev_pmeredist_finish = MPE_Log_get_event_number( ); ev_init_pme_start = MPE_Log_get_event_number( ); ev_init_pme_finish = MPE_Log_get_event_number( ); ev_send_coordinates_start = MPE_Log_get_event_number( ); ev_send_coordinates_finish = MPE_Log_get_event_number( ); ev_update_fr_start = MPE_Log_get_event_number( ); ev_update_fr_finish = MPE_Log_get_event_number( ); ev_clear_rvecs_start = MPE_Log_get_event_number( ); ev_clear_rvecs_finish = MPE_Log_get_event_number( ); ev_update_start = MPE_Log_get_event_number( ); ev_update_finish = MPE_Log_get_event_number( ); ev_output_start = MPE_Log_get_event_number( ); ev_output_finish = MPE_Log_get_event_number( ); ev_sum_lrforces_start = MPE_Log_get_event_number( ); ev_sum_lrforces_finish = MPE_Log_get_event_number( ); ev_sort_start = MPE_Log_get_event_number( ); ev_sort_finish = MPE_Log_get_event_number( ); ev_sum_qgrid_start = MPE_Log_get_event_number( ); ev_sum_qgrid_finish = MPE_Log_get_event_number( ); /* Essential dynamics related events */ ev_edsam_start = MPE_Log_get_event_number( ); ev_edsam_finish = MPE_Log_get_event_number( ); ev_get_coords_start = MPE_Log_get_event_number( ); ev_get_coords_finish = MPE_Log_get_event_number( ); ev_ed_apply_cons_start = MPE_Log_get_event_number( ); ev_ed_apply_cons_finish = MPE_Log_get_event_number( ); ev_fit_to_reference_start = MPE_Log_get_event_number( ); ev_fit_to_reference_finish = MPE_Log_get_event_number( ); /* describe events: */ if ( mpi_my_rank == 0 ) { /* General events */ MPE_Describe_state(ev_timestep1, ev_timestep2, "timestep START", "magenta" ); MPE_Describe_state(ev_force_start, ev_force_finish, "force", "cornflower blue" ); MPE_Describe_state(ev_do_fnbf_start, ev_do_fnbf_finish, "do_fnbf", "navy" ); MPE_Describe_state(ev_ns_start, ev_ns_finish, "neighbor search", "tomato" ); MPE_Describe_state(ev_calc_bonds_start, ev_calc_bonds_finish, "bonded forces", "slate blue" ); MPE_Describe_state(ev_global_stat_start, ev_global_stat_finish, "global stat", "firebrick3"); MPE_Describe_state(ev_update_fr_start, ev_update_fr_finish, "update forcerec", "goldenrod"); MPE_Describe_state(ev_clear_rvecs_start, ev_clear_rvecs_finish, "clear rvecs", "bisque"); MPE_Describe_state(ev_update_start, ev_update_finish, "update", "cornsilk"); MPE_Describe_state(ev_output_start, ev_output_finish, "output", "black"); MPE_Describe_state(ev_virial_start, ev_virial_finish, "calc_virial", "thistle4"); /* PME related events */ MPE_Describe_state(ev_pme_start, ev_pme_finish, "doing PME", "grey" ); MPE_Describe_state(ev_spread_on_grid_start, ev_spread_on_grid_finish, "spread", "dark orange" ); MPE_Describe_state(ev_sum_qgrid_start, ev_sum_qgrid_finish, "sum qgrid", "slate blue"); MPE_Describe_state(ev_gmxfft3d_start, ev_gmxfft3d_finish, "fft3d", "snow2" ); MPE_Describe_state(ev_solve_pme_start, ev_solve_pme_finish, "solve PME", "indian red" ); MPE_Describe_state(ev_gather_f_bsplines_start, ev_gather_f_bsplines_finish, "bsplines", "light sea green" ); MPE_Describe_state(ev_reduce_start, ev_reduce_finish, "reduce", "cyan1" ); MPE_Describe_state(ev_rscatter_start, ev_rscatter_finish, "rscatter", "cyan3" ); MPE_Describe_state(ev_alltoall_start, ev_alltoall_finish, "alltoall", "LightCyan4" ); MPE_Describe_state(ev_pmeredist_start, ev_pmeredist_finish, "pmeredist", "thistle" ); MPE_Describe_state(ev_init_pme_start, ev_init_pme_finish, "init PME", "snow4"); MPE_Describe_state(ev_send_coordinates_start, ev_send_coordinates_finish, "send_coordinates","blue"); MPE_Describe_state(ev_sum_lrforces_start, ev_sum_lrforces_finish, "sum_LRforces", "lime green"); MPE_Describe_state(ev_sort_start, ev_sort_finish, "sort pme atoms", "brown"); MPE_Describe_state(ev_sum_qgrid_start, ev_sum_qgrid_finish, "sum charge grid", "medium orchid"); /* Shift related events */ MPE_Describe_state(ev_shift_start, ev_shift_finish, "shift", "orange"); MPE_Describe_state(ev_unshift_start, ev_unshift_finish, "unshift", "dark orange"); MPE_Describe_state(ev_mk_mshift_start, ev_mk_mshift_finish, "mk_mshift", "maroon"); /* Essential dynamics related events */ MPE_Describe_state(ev_edsam_start, ev_edsam_finish, "EDSAM", "deep sky blue"); MPE_Describe_state(ev_get_coords_start, ev_get_coords_finish, "ED get coords", "steel blue"); MPE_Describe_state(ev_ed_apply_cons_start, ev_ed_apply_cons_finish, "ED apply constr", "forest green"); MPE_Describe_state(ev_fit_to_reference_start, ev_fit_to_reference_finish, "ED fit to ref", "lavender"); } MPE_Init_log(); #endif fprintf(stderr,"NNODES=%d, MYRANK=%d, HOSTNAME=%s\n", mpi_num_nodes,mpi_my_rank,mpi_hostname); *nnodes=mpi_num_nodes; return mpi_my_rank; #endif }