Ejemplo n.º 1
0
/*! \brief
 * Implementation for evaluate_compare() if either value is non-integer.
 *
 * \param[in]  top   Not used.
 * \param[in]  fr    Not used.
 * \param[in]  pbc   Not used.
 * \param[in]  g     Evaluation index group.
 * \param[out] out   Output data structure (\p out->u.g is used).
 * \param[in]  data  Should point to a \c t_methoddata_compare.
 *
 * Left value is assumed to be real-valued; right value can be either.
 * This is ensured by the initialization method.
 */
static void
evaluate_compare_real(t_topology *top, t_trxframe *fr, t_pbc *pbc,
                      gmx_ana_index_t *g, gmx_ana_selvalue_t *out, void *data)
{
    t_methoddata_compare *d = (t_methoddata_compare *)data;
    int                   i, i1, i2, ig;
    real                  a, b;
    bool                  bAccept;

    GMX_UNUSED_VALUE(top);
    GMX_UNUSED_VALUE(fr);
    GMX_UNUSED_VALUE(pbc);
    for (i = i1 = i2 = ig = 0; i < g->isize; ++i)
    {
        a       = d->left.r[i1];
        b       = (d->right.flags & CMP_REALVAL) ? d->right.r[i2] : d->right.i[i2];
        bAccept = false;
        switch (d->cmpt)
        {
            case CMP_INVALID: break;
            case CMP_LESS:    bAccept = a <  b; break;
            case CMP_LEQ:     bAccept = a <= b; break;
            case CMP_GTR:     bAccept = a >  b; break;
            case CMP_GEQ:     bAccept = a >= b; break;
            case CMP_EQUAL:   bAccept =  gmx_within_tol(a, b, GMX_REAL_EPS); break;
            case CMP_NEQ:     bAccept = !gmx_within_tol(a, b, GMX_REAL_EPS); break;
        }
        if (bAccept)
        {
            out->u.g->index[ig++] = g->index[i];
        }
        if (!(d->left.flags & CMP_SINGLEVAL))
        {
            ++i1;
        }
        if (!(d->right.flags & CMP_SINGLEVAL))
        {
            ++i2;
        }
    }
    out->u.g->isize = ig;
}
Ejemplo n.º 2
0
void fflush_tng(tng_trajectory_t tng)
{
#ifdef GMX_USE_TNG
    if (!tng)
    {
        return;
    }
    tng_frame_set_premature_write(tng, TNG_USE_HASH);
#else
    GMX_UNUSED_VALUE(tng);
#endif
}
Ejemplo n.º 3
0
void gmx_tng_close(tng_trajectory_t *tng)
{
    /* We have to check that tng is set because
     * tng_util_trajectory_close wants to return a NULL in it, and
     * gives a fatal error if it is NULL. */
#ifdef GMX_USE_TNG
    if (tng)
    {
        tng_util_trajectory_close(tng);
    }
#else
    GMX_UNUSED_VALUE(tng);
#endif
}
Ejemplo n.º 4
0
float gmx_tng_get_time_of_final_frame(tng_trajectory_t tng)
{
#ifdef GMX_USE_TNG
    gmx_int64_t nFrames;
    double      time;
    float       fTime;

    tng_num_frames_get(tng, &nFrames);
    tng_util_time_of_frame_get(tng, nFrames - 1, &time);

    fTime = time / PICO;
    return fTime;
#else
    GMX_UNUSED_VALUE(tng);
    return -1.0;
#endif
}
Ejemplo n.º 5
0
void
ddSendrecv(const struct gmx_domdec_t *dd,
           int                        ddDimensionIndex,
           int                        direction,
           T                         *sendBuffer,
           int                        numElementsToSend,
           T                         *receiveBuffer,
           int                        numElementsToReceive)
{
#if GMX_MPI
    int           sendRank    = dd->neighbor[ddDimensionIndex][direction == dddirForward ? 0 : 1];
    int           receiveRank = dd->neighbor[ddDimensionIndex][direction == dddirForward ? 1 : 0];

    constexpr int mpiTag      = 0;
    MPI_Status    mpiStatus;
    if (numElementsToSend > 0 && numElementsToReceive > 0)
    {
        MPI_Sendrecv(sendBuffer,    numElementsToSend*sizeof(T),    MPI_BYTE,
                     sendRank,      mpiTag,
                     receiveBuffer, numElementsToReceive*sizeof(T), MPI_BYTE,
                     receiveRank,   mpiTag,
                     dd->mpi_comm_all,
                     &mpiStatus);
    }
    else if (numElementsToSend > 0)
    {
        MPI_Send(    sendBuffer,    numElementsToSend*sizeof(T),    MPI_BYTE,
                     sendRank,      mpiTag,
                     dd->mpi_comm_all);
    }
    else if (numElementsToReceive > 0)
    {
        MPI_Recv(    receiveBuffer, numElementsToReceive*sizeof(T), MPI_BYTE,
                     receiveRank,   mpiTag,
                     dd->mpi_comm_all,
                     &mpiStatus);
    }
#else // GMX_MPI
    GMX_UNUSED_VALUE(dd);
    GMX_UNUSED_VALUE(ddDimensionIndex);
    GMX_UNUSED_VALUE(direction);
    GMX_UNUSED_VALUE(sendBuffer);
    GMX_UNUSED_VALUE(numElementsToSend);
    GMX_UNUSED_VALUE(receiveBuffer);
    GMX_UNUSED_VALUE(numElementsToReceive);
#endif // GMX_MPI
}
Ejemplo n.º 6
0
/*! \brief
 * Does common initialization to all merging modifiers.
 *
 * \param[in]     top   Topology data structure.
 * \param[in,out] out   Pointer to output data structure.
 * \param[in,out] data  Should point to \c t_methoddata_merge.
 */
static void
init_output_common(t_topology *top, gmx_ana_selvalue_t *out, void *data)
{
    t_methoddata_merge *d = (t_methoddata_merge *)data;

    GMX_UNUSED_VALUE(top);
    if (d->p1.m.type != d->p2.m.type)
    {
        /* TODO: Maybe we could pick something else here? */
        out->u.p->m.type = INDEX_UNKNOWN;
    }
    else
    {
        out->u.p->m.type = d->p1.m.type;
    }
    gmx_ana_pos_reserve_for_append(out->u.p, d->p1.count() + d->p2.count(),
                                   d->p1.m.b.nra + d->p2.m.b.nra,
                                   d->p1.v != NULL, d->p1.f != NULL);
    gmx_ana_pos_empty_init(out->u.p);
}
Ejemplo n.º 7
0
gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr)
{
    struct gmx_pme_pp *pme_pp;

    snew(pme_pp, 1);

#ifdef GMX_MPI
    int rank;

    pme_pp->mpi_comm_mysim = cr->mpi_comm_mysim;
    MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);
    get_pme_ddnodes(cr, rank, &pme_pp->nnode, &pme_pp->node, &pme_pp->node_peer);
    snew(pme_pp->nat, pme_pp->nnode);
    snew(pme_pp->req, eCommType_NR*pme_pp->nnode);
    snew(pme_pp->stat, eCommType_NR*pme_pp->nnode);
    pme_pp->nalloc       = 0;
    pme_pp->flags_charge = 0;
#else
    GMX_UNUSED_VALUE(cr);
#endif

    return pme_pp;
}
Ejemplo n.º 8
0
gmx_bool gmx_get_tng_data_block_types_of_next_frame(tng_trajectory_t     input,
                                                    int                  frame,
                                                    int                  nRequestedIds,
                                                    gmx_int64_t         *requestedIds,
                                                    gmx_int64_t         *nextFrame,
                                                    gmx_int64_t         *nBlocks,
                                                    gmx_int64_t        **blockIds)
{
#if GMX_USE_TNG
    tng_function_status stat;

    stat = tng_util_trajectory_next_frame_present_data_blocks_find(input, frame,
                                                                   nRequestedIds, requestedIds,
                                                                   nextFrame,
                                                                   nBlocks, blockIds);

    if (stat == TNG_CRITICAL)
    {
        gmx_file("Cannot read TNG file. Cannot find data blocks of next frame.");
    }
    else if (stat == TNG_FAILURE)
    {
        return FALSE;
    }
    return TRUE;
#else
    GMX_UNUSED_VALUE(input);
    GMX_UNUSED_VALUE(frame);
    GMX_UNUSED_VALUE(nRequestedIds);
    GMX_UNUSED_VALUE(requestedIds);
    GMX_UNUSED_VALUE(nextFrame);
    GMX_UNUSED_VALUE(nBlocks);
    GMX_UNUSED_VALUE(blockIds);
    return FALSE;
#endif
}
Ejemplo n.º 9
0
/*! \brief Called by PME-only ranks to receive coefficients and coordinates
 *
 * \param[in,out] pme_pp    PME-PP communication structure.
 * \param[out] natoms       Number of received atoms.
 * \param[out] box        System box, if received.
 * \param[out] maxshift_x        Maximum shift in X direction, if received.
 * \param[out] maxshift_y        Maximum shift in Y direction, if received.
 * \param[out] lambda_q         Free-energy lambda for electrostatics, if received.
 * \param[out] lambda_lj         Free-energy lambda for Lennard-Jones, if received.
 * \param[out] bEnerVir          Set to true if this is an energy/virial calculation step, otherwise set to false.
 * \param[out] step              MD integration step number.
 * \param[out] grid_size         PME grid size, if received.
 * \param[out] ewaldcoeff_q         Ewald cut-off parameter for electrostatics, if received.
 * \param[out] ewaldcoeff_lj         Ewald cut-off parameter for Lennard-Jones, if received.
 * \param[out] atomSetChanged    Set to true only if the local domain atom data (charges/coefficients)
 *                               has been received (after DD) and should be reinitialized. Otherwise not changed.
 *
 * \retval pmerecvqxX             All parameters were set, chargeA and chargeB can be NULL.
 * \retval pmerecvqxFINISH        No parameters were set.
 * \retval pmerecvqxSWITCHGRID    Only grid_size and *ewaldcoeff were set.
 * \retval pmerecvqxRESETCOUNTERS *step was set.
 */
static int gmx_pme_recv_coeffs_coords(gmx_pme_pp        *pme_pp,
                                      int               *natoms,
                                      matrix             box,
                                      int               *maxshift_x,
                                      int               *maxshift_y,
                                      real              *lambda_q,
                                      real              *lambda_lj,
                                      gmx_bool          *bEnerVir,
                                      int64_t           *step,
                                      ivec              *grid_size,
                                      real              *ewaldcoeff_q,
                                      real              *ewaldcoeff_lj,
                                      bool              *atomSetChanged)
{
    int status = -1;
    int nat    = 0;

#if GMX_MPI
    unsigned int flags    = 0;
    int          messages = 0;

    do
    {
        gmx_pme_comm_n_box_t cnb;
        cnb.flags = 0;

        /* Receive the send count, box and time step from the peer PP node */
        MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
                 pme_pp->peerRankId, eCommType_CNB,
                 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);

        /* We accumulate all received flags */
        flags |= cnb.flags;

        *step  = cnb.step;

        if (debug)
        {
            fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
                    (cnb.flags & PP_PME_CHARGE)        ? " charges" : "",
                    (cnb.flags & PP_PME_COORD )        ? " coordinates" : "",
                    (cnb.flags & PP_PME_FINISH)        ? " finish" : "",
                    (cnb.flags & PP_PME_SWITCHGRID)    ? " switch grid" : "",
                    (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
        }

        if (cnb.flags & PP_PME_FINISH)
        {
            status = pmerecvqxFINISH;
        }

        if (cnb.flags & PP_PME_SWITCHGRID)
        {
            /* Special case, receive the new parameters and return */
            copy_ivec(cnb.grid_size, *grid_size);
            *ewaldcoeff_q  = cnb.ewaldcoeff_q;
            *ewaldcoeff_lj = cnb.ewaldcoeff_lj;

            status         = pmerecvqxSWITCHGRID;
        }

        if (cnb.flags & PP_PME_RESETCOUNTERS)
        {
            /* Special case, receive the step (set above) and return */
            status = pmerecvqxRESETCOUNTERS;
        }

        if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
        {
            *atomSetChanged = true;

            /* Receive the send counts from the other PP nodes */
            for (auto &sender : pme_pp->ppRanks)
            {
                if (sender.rankId == pme_pp->peerRankId)
                {
                    sender.numAtoms = cnb.natoms;
                }
                else
                {
                    MPI_Irecv(&sender.numAtoms, sizeof(sender.numAtoms),
                              MPI_BYTE,
                              sender.rankId, eCommType_CNB,
                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                }
            }
            MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
            messages = 0;

            nat = 0;
            for (const auto &sender : pme_pp->ppRanks)
            {
                nat += sender.numAtoms;
            }

            if (cnb.flags & PP_PME_CHARGE)
            {
                pme_pp->chargeA.resizeWithPadding(nat);
            }
            if (cnb.flags & PP_PME_CHARGEB)
            {
                pme_pp->chargeB.resize(nat);
            }
            if (cnb.flags & PP_PME_SQRTC6)
            {
                pme_pp->sqrt_c6A.resize(nat);
            }
            if (cnb.flags & PP_PME_SQRTC6B)
            {
                pme_pp->sqrt_c6B.resize(nat);
            }
            if (cnb.flags & PP_PME_SIGMA)
            {
                pme_pp->sigmaA.resize(nat);
            }
            if (cnb.flags & PP_PME_SIGMAB)
            {
                pme_pp->sigmaB.resize(nat);
            }
            pme_pp->x.resizeWithPadding(nat);
            pme_pp->f.resize(nat);

            /* maxshift is sent when the charges are sent */
            *maxshift_x = cnb.maxshift_x;
            *maxshift_y = cnb.maxshift_y;

            /* Receive the charges in place */
            for (int q = 0; q < eCommType_NR; q++)
            {
                real *bufferPtr;

                if (!(cnb.flags & (PP_PME_CHARGE<<q)))
                {
                    continue;
                }
                switch (q)
                {
                    case eCommType_ChargeA: bufferPtr = pme_pp->chargeA.data();  break;
                    case eCommType_ChargeB: bufferPtr = pme_pp->chargeB.data();  break;
                    case eCommType_SQRTC6A: bufferPtr = pme_pp->sqrt_c6A.data(); break;
                    case eCommType_SQRTC6B: bufferPtr = pme_pp->sqrt_c6B.data(); break;
                    case eCommType_SigmaA:  bufferPtr = pme_pp->sigmaA.data();   break;
                    case eCommType_SigmaB:  bufferPtr = pme_pp->sigmaB.data();   break;
                    default: gmx_incons("Wrong eCommType");
                }
                nat = 0;
                for (const auto &sender : pme_pp->ppRanks)
                {
                    if (sender.numAtoms > 0)
                    {
                        MPI_Irecv(bufferPtr+nat,
                                  sender.numAtoms*sizeof(real),
                                  MPI_BYTE,
                                  sender.rankId, q,
                                  pme_pp->mpi_comm_mysim,
                                  &pme_pp->req[messages++]);
                        nat += sender.numAtoms;
                        if (debug)
                        {
                            fprintf(debug, "Received from PP rank %d: %d %s\n",
                                    sender.rankId, sender.numAtoms,
                                    (q == eCommType_ChargeA ||
                                     q == eCommType_ChargeB) ? "charges" : "params");
                        }
                    }
                }
            }
        }

        if (cnb.flags & PP_PME_COORD)
        {
            /* The box, FE flag and lambda are sent along with the coordinates
             *  */
            copy_mat(cnb.box, box);
            *lambda_q       = cnb.lambda_q;
            *lambda_lj      = cnb.lambda_lj;
            *bEnerVir       = ((cnb.flags & PP_PME_ENER_VIR) != 0u);
            *step           = cnb.step;

            /* Receive the coordinates in place */
            nat = 0;
            for (const auto &sender : pme_pp->ppRanks)
            {
                if (sender.numAtoms > 0)
                {
                    MPI_Irecv(pme_pp->x[nat],
                              sender.numAtoms*sizeof(rvec),
                              MPI_BYTE,
                              sender.rankId, eCommType_COORD,
                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                    nat += sender.numAtoms;
                    if (debug)
                    {
                        fprintf(debug, "Received from PP rank %d: %d "
                                "coordinates\n",
                                sender.rankId, sender.numAtoms);
                    }
                }
            }

            status = pmerecvqxX;
        }

        /* Wait for the coordinates and/or charges to arrive */
        MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
        messages = 0;
    }
    while (status == -1);
#else
    GMX_UNUSED_VALUE(pme_pp);
    GMX_UNUSED_VALUE(box);
    GMX_UNUSED_VALUE(maxshift_x);
    GMX_UNUSED_VALUE(maxshift_y);
    GMX_UNUSED_VALUE(lambda_q);
    GMX_UNUSED_VALUE(lambda_lj);
    GMX_UNUSED_VALUE(bEnerVir);
    GMX_UNUSED_VALUE(step);
    GMX_UNUSED_VALUE(grid_size);
    GMX_UNUSED_VALUE(ewaldcoeff_q);
    GMX_UNUSED_VALUE(ewaldcoeff_lj);
    GMX_UNUSED_VALUE(atomSetChanged);

    status = pmerecvqxX;
#endif

    if (status == pmerecvqxX)
    {
        *natoms   = nat;
    }

    return status;
}
Ejemplo n.º 10
0
/*! \brief Helper function for parsing various input about the number
    of OpenMP threads to use in various modules and deciding what to
    do about it. */
static void manage_number_of_openmp_threads(const gmx::MDLogger &mdlog,
                                            const t_commrec     *cr,
                                            bool                 bOMP,
                                            int                  nthreads_hw_avail,
                                            int                  omp_nthreads_req,
                                            int                  omp_nthreads_pme_req,
                                            gmx_bool gmx_unused  bThisNodePMEOnly,
                                            gmx_bool             bFullOmpSupport,
                                            int                  numRanksOnThisNode,
                                            gmx_bool             bSepPME)
{
    int      nth;
    char    *env;

#if GMX_THREAD_MPI
    /* modth is shared among tMPI threads, so for thread safety, the
     * detection is done on the master only. It is not thread-safe
     * with multiple simulations, but that's anyway not supported by
     * tMPI. */
    if (!SIMMASTER(cr))
    {
        return;
    }
#else
    GMX_UNUSED_VALUE(cr);
#endif

    if (modth.initialized)
    {
        /* Just return if the initialization has already been
           done. This could only happen if gmx_omp_nthreads_init() has
           already been called. */
        return;
    }

    /* With full OpenMP support (verlet scheme) set the number of threads
     * per process / default:
     * - 1 if not compiled with OpenMP or
     * - OMP_NUM_THREADS if the env. var is set, or
     * - omp_nthreads_req = #of threads requested by the user on the mdrun
     *   command line, otherwise
     * - take the max number of available threads and distribute them
     *   on the processes/tMPI threads.
     * ~ The GMX_*_NUM_THREADS env var overrides the number of threads of
     *   the respective module and it has to be used in conjunction with
     *   OMP_NUM_THREADS.
     *
     * With the group scheme OpenMP multithreading is only supported in PME,
     * for all other modules nthreads is set to 1.
     * The number of PME threads is equal to:
     * - 1 if not compiled with OpenMP or
     * - GMX_PME_NUM_THREADS if defined, otherwise
     * - OMP_NUM_THREADS if defined, otherwise
     * - 1
     */
    nth = 1;
    if ((env = getenv("OMP_NUM_THREADS")) != nullptr)
    {
        if (!bOMP && (std::strncmp(env, "1", 1) != 0))
        {
            gmx_warning("OMP_NUM_THREADS is set, but %s was compiled without OpenMP support!",
                        gmx::getProgramContext().displayName());
        }
        else
        {
            nth = gmx_omp_get_max_threads();
        }
    }
    else if (omp_nthreads_req > 0)
    {
        nth = omp_nthreads_req;
    }
    else if (bFullOmpSupport && bOMP)
    {
        /* max available threads per node */
        nth = nthreads_hw_avail;

        /* divide the threads among the MPI ranks */
        if (nth >= numRanksOnThisNode)
        {
            nth /= numRanksOnThisNode;
        }
        else
        {
            nth = 1;
        }
    }

    /* now we have the global values, set them:
     * - 1 if not compiled with OpenMP and for the group scheme
     * - nth for the verlet scheme when compiled with OpenMP
     */
    if (bFullOmpSupport && bOMP)
    {
        modth.gnth = nth;
    }
    else
    {
        modth.gnth = 1;
    }

    if (bSepPME)
    {
        if (omp_nthreads_pme_req > 0)
        {
            modth.gnth_pme = omp_nthreads_pme_req;
        }
        else
        {
            modth.gnth_pme = nth;
        }
    }
    else
    {
        modth.gnth_pme = 0;
    }

    /* now set the per-module values */
    modth.nth[emntDefault] = modth.gnth;
    pick_module_nthreads(mdlog, emntDomdec, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntPairsearch, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntNonbonded, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntBonded, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntPME, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntUpdate, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntVSITE, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntLINCS, bFullOmpSupport, bSepPME);
    pick_module_nthreads(mdlog, emntSETTLE, bFullOmpSupport, bSepPME);

    /* set the number of threads globally */
    if (bOMP)
    {
#if !GMX_THREAD_MPI
        if (bThisNodePMEOnly)
        {
            gmx_omp_set_num_threads(modth.gnth_pme);
        }
        else
#endif      /* GMX_THREAD_MPI */
        {
            if (bFullOmpSupport)
            {
                gmx_omp_set_num_threads(nth);
            }
            else
            {
                gmx_omp_set_num_threads(1);
            }
        }
    }

    modth.initialized = TRUE;
}
Ejemplo n.º 11
0
void gmx_tng_open(const char       *filename,
                  char              mode,
                  tng_trajectory_t *tng)
{
#ifdef GMX_USE_TNG
    /* First check whether we have to make a backup,
     * only for writing, not for read or append.
     */
    if (mode == 'w')
    {
#ifndef GMX_FAHCORE
        /* only make backups for normal gromacs */
        make_backup(filename);
#endif
    }

    /* tng must not be pointing at already allocated memory.
     * Memory will be allocated by tng_util_trajectory_open() and must
     * later on be freed by tng_util_trajectory_close(). */
    if (TNG_SUCCESS != tng_util_trajectory_open(filename, mode, tng))
    {
        /* TNG does return more than one degree of error, but there is
           no use case for GROMACS handling the non-fatal errors
           gracefully. */
        gmx_fatal(FARGS,
                  "%s while opening %s for %s",
                  gmx_strerror("file"),
                  filename,
                  modeToVerb(mode));
    }

    if (mode == 'w' || mode == 'a')
    {
        /* FIXME in TNG: When adding data to the header, subsequent blocks might get
         * overwritten. This could be solved by moving the first trajectory
         * frame set(s) to the end of the file. Could that cause other problems,
         * e.g. when continuing a simulation? */
        char hostname[256];
        gmx_gethostname(hostname, 256);
        if (mode == 'w')
        {
            tng_first_computer_name_set(*tng, hostname);
        }
/* TODO: This should be implemented when the above fixme is done (adding data to
 * the header). */
//         else
//         {
//             tng_last_computer_name_set(*tng, hostname);
//         }

        char        programInfo[256];
        const char *precisionString = "";
#ifdef GMX_DOUBLE
        precisionString = " (double precision)";
#endif
        sprintf(programInfo, "%.100s, %.128s%.24s",
                gmx::getProgramContext().displayName(),
                GromacsVersion(), precisionString);
        if (mode == 'w')
        {
            tng_first_program_name_set(*tng, programInfo);
        }
/* TODO: This should be implemented when the above fixme is done (adding data to
 * the header). */
//         else
//         {
//             tng_last_program_name_set(*tng, programInfo);
//         }

#ifdef HAVE_UNISTD_H
        char username[256];
        getlogin_r(username, 256);
        if (mode == 'w')
        {
            tng_first_user_name_set(*tng, username);
        }
/* TODO: This should be implemented when the above fixme is done (adding data to
 * the header). */
//         else
//         {
//             tng_last_user_name_set(*tng, username);
//         }
#endif
    }
#else
    gmx_file("GROMACS was compiled without TNG support, cannot handle this file type");
    GMX_UNUSED_VALUE(filename);
    GMX_UNUSED_VALUE(mode);
    GMX_UNUSED_VALUE(tng);
#endif
}
Ejemplo n.º 12
0
void gmx_fwrite_tng(tng_trajectory_t tng,
                    const gmx_bool   bUseLossyCompression,
                    int              step,
                    real             elapsedPicoSeconds,
                    real             lambda,
                    const rvec      *box,
                    int              nAtoms,
                    const rvec      *x,
                    const rvec      *v,
                    const rvec      *f)
{
#ifdef GMX_USE_TNG
    typedef tng_function_status (*write_data_func_pointer)(tng_trajectory_t,
                                                           const gmx_int64_t,
                                                           const double,
                                                           const real*,
                                                           const gmx_int64_t,
                                                           const gmx_int64_t,
                                                           const char*,
                                                           const char,
                                                           const char);
#ifdef GMX_DOUBLE
    static write_data_func_pointer           write_data           = tng_util_generic_with_time_double_write;
#else
    static write_data_func_pointer           write_data           = tng_util_generic_with_time_write;
#endif
    double                                   elapsedSeconds = elapsedPicoSeconds * PICO;
    gmx_int64_t                              nParticles;
    char                                     compression;


    if (!tng)
    {
        /* This function might get called when the type of the
           compressed trajectory is actually XTC. So we exit and move
           on. */
        return;
    }

    tng_num_particles_get(tng, &nParticles);
    if (nAtoms != (int)nParticles)
    {
        tng_implicit_num_particles_set(tng, nAtoms);
    }

    if (bUseLossyCompression)
    {
        compression = TNG_TNG_COMPRESSION;
    }
    else
    {
        compression = TNG_GZIP_COMPRESSION;
    }

    /* The writing is done using write_data, which writes float or double
     * depending on the GROMACS compilation. */
    if (x)
    {
        GMX_ASSERT(box, "Need a non-NULL box if positions are written");

        if (write_data(tng, step, elapsedSeconds,
                       reinterpret_cast<const real *>(x),
                       3, TNG_TRAJ_POSITIONS, "POSITIONS",
                       TNG_PARTICLE_BLOCK_DATA,
                       compression) != TNG_SUCCESS)
        {
            gmx_file("Cannot write TNG trajectory frame; maybe you are out of disk space?");
        }
        /* TNG-MF1 compression only compresses positions and velocities. Use lossless
         * compression for box shape regardless of output mode */
        if (write_data(tng, step, elapsedSeconds,
                       reinterpret_cast<const real *>(box),
                       9, TNG_TRAJ_BOX_SHAPE, "BOX SHAPE",
                       TNG_NON_PARTICLE_BLOCK_DATA,
                       TNG_GZIP_COMPRESSION) != TNG_SUCCESS)
        {
            gmx_file("Cannot write TNG trajectory frame; maybe you are out of disk space?");
        }
    }

    if (v)
    {
        if (write_data(tng, step, elapsedSeconds,
                       reinterpret_cast<const real *>(v),
                       3, TNG_TRAJ_VELOCITIES, "VELOCITIES",
                       TNG_PARTICLE_BLOCK_DATA,
                       compression) != TNG_SUCCESS)
        {
            gmx_file("Cannot write TNG trajectory frame; maybe you are out of disk space?");
        }
    }

    if (f)
    {
        /* TNG-MF1 compression only compresses positions and velocities. Use lossless
         * compression for forces regardless of output mode */
        if (write_data(tng, step, elapsedSeconds,
                       reinterpret_cast<const real *>(f),
                       3, TNG_TRAJ_FORCES, "FORCES",
                       TNG_PARTICLE_BLOCK_DATA,
                       TNG_GZIP_COMPRESSION) != TNG_SUCCESS)
        {
            gmx_file("Cannot write TNG trajectory frame; maybe you are out of disk space?");
        }
    }

    /* TNG-MF1 compression only compresses positions and velocities. Use lossless
     * compression for lambdas regardless of output mode */
    if (write_data(tng, step, elapsedSeconds,
                   reinterpret_cast<const real *>(&lambda),
                   1, TNG_GMX_LAMBDA, "LAMBDAS",
                   TNG_NON_PARTICLE_BLOCK_DATA,
                   TNG_GZIP_COMPRESSION) != TNG_SUCCESS)
    {
        gmx_file("Cannot write TNG trajectory frame; maybe you are out of disk space?");
    }
#else
    GMX_UNUSED_VALUE(tng);
    GMX_UNUSED_VALUE(bUseLossyCompression);
    GMX_UNUSED_VALUE(step);
    GMX_UNUSED_VALUE(elapsedPicoSeconds);
    GMX_UNUSED_VALUE(lambda);
    GMX_UNUSED_VALUE(box);
    GMX_UNUSED_VALUE(nAtoms);
    GMX_UNUSED_VALUE(x);
    GMX_UNUSED_VALUE(v);
    GMX_UNUSED_VALUE(f);
#endif
}
Ejemplo n.º 13
0
void gmx_pme_send_force_vir_ener(struct gmx_pme_pp *pme_pp,
                                 rvec gmx_unused *f,
                                 matrix vir_q, real energy_q,
                                 matrix vir_lj, real energy_lj,
                                 real dvdlambda_q, real dvdlambda_lj,
                                 float cycles)
{
#ifdef GMX_MPI
    gmx_pme_comm_vir_ene_t cve;
    int                    messages, ind_start, ind_end;
    cve.cycles = cycles;

    /* Now the evaluated forces have to be transferred to the PP nodes */
    messages = 0;
    ind_end  = 0;
    for (int receiver = 0; receiver < pme_pp->nnode; receiver++)
    {
        ind_start = ind_end;
        ind_end   = ind_start + pme_pp->nat[receiver];
        if (MPI_Isend(f[ind_start], (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
                      pme_pp->node[receiver], 0,
                      pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
        {
            gmx_comm("MPI_Isend failed in do_pmeonly");
        }
    }

    /* send virial and energy to our last PP node */
    copy_mat(vir_q, cve.vir_q);
    copy_mat(vir_lj, cve.vir_lj);
    cve.energy_q     = energy_q;
    cve.energy_lj    = energy_lj;
    cve.dvdlambda_q  = dvdlambda_q;
    cve.dvdlambda_lj = dvdlambda_lj;
    /* check for the signals to send back to a PP node */
    cve.stop_cond = gmx_get_stop_condition();

    cve.cycles = cycles;

    if (debug)
    {
        fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
                pme_pp->node_peer);
    }
    MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
              pme_pp->node_peer, 1,
              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);

    /* Wait for the forces to arrive */
    MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
#else
    gmx_call("MPI not enabled");
    GMX_UNUSED_VALUE(pme_pp);
    GMX_UNUSED_VALUE(f);
    GMX_UNUSED_VALUE(vir_q);
    GMX_UNUSED_VALUE(energy_q);
    GMX_UNUSED_VALUE(vir_lj);
    GMX_UNUSED_VALUE(energy_lj);
    GMX_UNUSED_VALUE(dvdlambda_q);
    GMX_UNUSED_VALUE(dvdlambda_lj);
    GMX_UNUSED_VALUE(cycles);
#endif
}
Ejemplo n.º 14
0
int gmx_pme_recv_coeffs_coords(struct gmx_pme_pp *pme_pp,
                               int               *natoms,
                               real             **chargeA,
                               real             **chargeB,
                               real             **sqrt_c6A,
                               real             **sqrt_c6B,
                               real             **sigmaA,
                               real             **sigmaB,
                               matrix             box,
                               rvec             **x,
                               rvec             **f,
                               int               *maxshift_x,
                               int               *maxshift_y,
                               gmx_bool          *bFreeEnergy_q,
                               gmx_bool          *bFreeEnergy_lj,
                               real              *lambda_q,
                               real              *lambda_lj,
                               gmx_bool          *bEnerVir,
                               int               *pme_flags,
                               gmx_int64_t       *step,
                               ivec               grid_size,
                               real              *ewaldcoeff_q,
                               real              *ewaldcoeff_lj)
{
    int                  nat = 0, status;

    *pme_flags = 0;
#ifdef GMX_MPI
    gmx_pme_comm_n_box_t cnb;
    int                  messages;

    cnb.flags  = 0;
    messages   = 0;
    do
    {

        /* Receive the send count, box and time step from the peer PP node */
        MPI_Recv(&cnb, sizeof(cnb), MPI_BYTE,
                 pme_pp->node_peer, eCommType_CNB,
                 pme_pp->mpi_comm_mysim, MPI_STATUS_IGNORE);

        if (debug)
        {
            fprintf(debug, "PME only rank receiving:%s%s%s%s%s\n",
                    (cnb.flags & PP_PME_CHARGE)        ? " charges" : "",
                    (cnb.flags & PP_PME_COORD )        ? " coordinates" : "",
                    (cnb.flags & PP_PME_FINISH)        ? " finish" : "",
                    (cnb.flags & PP_PME_SWITCHGRID)    ? " switch grid" : "",
                    (cnb.flags & PP_PME_RESETCOUNTERS) ? " reset counters" : "");
        }

        if (cnb.flags & PP_PME_SWITCHGRID)
        {
            /* Special case, receive the new parameters and return */
            copy_ivec(cnb.grid_size, grid_size);
            *ewaldcoeff_q  = cnb.ewaldcoeff_q;
            *ewaldcoeff_lj = cnb.ewaldcoeff_lj;
            return pmerecvqxSWITCHGRID;
        }

        if (cnb.flags & PP_PME_RESETCOUNTERS)
        {
            /* Special case, receive the step and return */
            *step = cnb.step;

            return pmerecvqxRESETCOUNTERS;
        }

        if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
        {
            /* Receive the send counts from the other PP nodes */
            for (int sender = 0; sender < pme_pp->nnode; sender++)
            {
                if (pme_pp->node[sender] == pme_pp->node_peer)
                {
                    pme_pp->nat[sender] = cnb.natoms;
                }
                else
                {
                    MPI_Irecv(&(pme_pp->nat[sender]), sizeof(pme_pp->nat[0]),
                              MPI_BYTE,
                              pme_pp->node[sender], eCommType_CNB,
                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                }
            }
            MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
            messages = 0;

            nat = 0;
            for (int sender = 0; sender < pme_pp->nnode; sender++)
            {
                nat += pme_pp->nat[sender];
            }

            if (nat > pme_pp->nalloc)
            {
                pme_pp->nalloc = over_alloc_dd(nat);
                if (cnb.flags & PP_PME_CHARGE)
                {
                    srenew(pme_pp->chargeA, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_CHARGEB)
                {
                    srenew(pme_pp->chargeB, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SQRTC6)
                {
                    srenew(pme_pp->sqrt_c6A, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SQRTC6B)
                {
                    srenew(pme_pp->sqrt_c6B, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SIGMA)
                {
                    srenew(pme_pp->sigmaA, pme_pp->nalloc);
                }
                if (cnb.flags & PP_PME_SIGMAB)
                {
                    srenew(pme_pp->sigmaB, pme_pp->nalloc);
                }
                srenew(pme_pp->x, pme_pp->nalloc);
                srenew(pme_pp->f, pme_pp->nalloc);
            }

            /* maxshift is sent when the charges are sent */
            *maxshift_x = cnb.maxshift_x;
            *maxshift_y = cnb.maxshift_y;

            /* Receive the charges in place */
            for (int q = 0; q < eCommType_NR; q++)
            {
                real *charge_pp;

                if (!(cnb.flags & (PP_PME_CHARGE<<q)))
                {
                    continue;
                }
                switch (q)
                {
                    case eCommType_ChargeA: charge_pp = pme_pp->chargeA;  break;
                    case eCommType_ChargeB: charge_pp = pme_pp->chargeB;  break;
                    case eCommType_SQRTC6A: charge_pp = pme_pp->sqrt_c6A; break;
                    case eCommType_SQRTC6B: charge_pp = pme_pp->sqrt_c6B; break;
                    case eCommType_SigmaA:  charge_pp = pme_pp->sigmaA;   break;
                    case eCommType_SigmaB:  charge_pp = pme_pp->sigmaB;   break;
                    default: gmx_incons("Wrong eCommType");
                }
                nat = 0;
                for (int sender = 0; sender < pme_pp->nnode; sender++)
                {
                    if (pme_pp->nat[sender] > 0)
                    {
                        MPI_Irecv(charge_pp+nat,
                                  pme_pp->nat[sender]*sizeof(real),
                                  MPI_BYTE,
                                  pme_pp->node[sender], q,
                                  pme_pp->mpi_comm_mysim,
                                  &pme_pp->req[messages++]);
                        nat += pme_pp->nat[sender];
                        if (debug)
                        {
                            fprintf(debug, "Received from PP rank %d: %d %s\n",
                                    pme_pp->node[sender], pme_pp->nat[sender],
                                    (q == eCommType_ChargeA ||
                                     q == eCommType_ChargeB) ? "charges" : "params");
                        }
                    }
                }
            }

            pme_pp->flags_charge = cnb.flags;
        }

        if (cnb.flags & PP_PME_COORD)
        {
            if (!(pme_pp->flags_charge & (PP_PME_CHARGE | PP_PME_SQRTC6)))
            {
                gmx_incons("PME-only rank received coordinates before charges and/or C6-values"
                           );
            }

            /* The box, FE flag and lambda are sent along with the coordinates
             *  */
            copy_mat(cnb.box, box);
            *bFreeEnergy_q  = ((cnb.flags & GMX_PME_DO_COULOMB) &&
                               (cnb.flags & PP_PME_FEP_Q));
            *bFreeEnergy_lj = ((cnb.flags & GMX_PME_DO_LJ) &&
                               (cnb.flags & PP_PME_FEP_LJ));
            *lambda_q       = cnb.lambda_q;
            *lambda_lj      = cnb.lambda_lj;
            *bEnerVir       = (cnb.flags & PP_PME_ENER_VIR);
            *pme_flags      = cnb.flags;

            if (*bFreeEnergy_q && !(pme_pp->flags_charge & PP_PME_CHARGEB))
            {
                gmx_incons("PME-only rank received free energy request, but "
                           "did not receive B-state charges");
            }

            if (*bFreeEnergy_lj && !(pme_pp->flags_charge & PP_PME_SQRTC6B))
            {
                gmx_incons("PME-only rank received free energy request, but "
                           "did not receive B-state C6-values");
            }

            /* Receive the coordinates in place */
            nat = 0;
            for (int sender = 0; sender < pme_pp->nnode; sender++)
            {
                if (pme_pp->nat[sender] > 0)
                {
                    MPI_Irecv(pme_pp->x[nat], pme_pp->nat[sender]*sizeof(rvec),
                              MPI_BYTE,
                              pme_pp->node[sender], eCommType_COORD,
                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                    nat += pme_pp->nat[sender];
                    if (debug)
                    {
                        fprintf(debug, "Received from PP rank %d: %d "
                                "coordinates\n",
                                pme_pp->node[sender], pme_pp->nat[sender]);
                    }
                }
            }
        }

        /* Wait for the coordinates and/or charges to arrive */
        MPI_Waitall(messages, pme_pp->req, pme_pp->stat);
        messages = 0;
    }
    while (!(cnb.flags & (PP_PME_COORD | PP_PME_FINISH)));
    status = ((cnb.flags & PP_PME_FINISH) ? pmerecvqxFINISH : pmerecvqxX);

    *step = cnb.step;
#else
    GMX_UNUSED_VALUE(box);
    GMX_UNUSED_VALUE(maxshift_x);
    GMX_UNUSED_VALUE(maxshift_y);
    GMX_UNUSED_VALUE(bFreeEnergy_q);
    GMX_UNUSED_VALUE(bFreeEnergy_lj);
    GMX_UNUSED_VALUE(lambda_q);
    GMX_UNUSED_VALUE(lambda_lj);
    GMX_UNUSED_VALUE(bEnerVir);
    GMX_UNUSED_VALUE(step);
    GMX_UNUSED_VALUE(grid_size);
    GMX_UNUSED_VALUE(ewaldcoeff_q);
    GMX_UNUSED_VALUE(ewaldcoeff_lj);

    status = pmerecvqxX;
#endif

    *natoms   = nat;
    *chargeA  = pme_pp->chargeA;
    *chargeB  = pme_pp->chargeB;
    *sqrt_c6A = pme_pp->sqrt_c6A;
    *sqrt_c6B = pme_pp->sqrt_c6B;
    *sigmaA   = pme_pp->sigmaA;
    *sigmaB   = pme_pp->sigmaB;
    *x        = pme_pp->x;
    *f        = pme_pp->f;

    return status;
}
Ejemplo n.º 15
0
void gmx_tng_setup_atom_subgroup(tng_trajectory_t tng,
                                 const int        nind,
                                 const int       *ind,
                                 const char      *name)
{
#if GMX_USE_TNG
    gmx_int64_t              nAtoms, cnt, nMols;
    tng_molecule_t           mol, iterMol;
    tng_chain_t              chain;
    tng_residue_t            res;
    tng_atom_t               atom;
    tng_function_status      stat;

    tng_num_particles_get(tng, &nAtoms);

    if (nAtoms == nind)
    {
        return;
    }

    stat = tng_molecule_find(tng, name, -1, &mol);
    if (stat == TNG_SUCCESS)
    {
        tng_molecule_num_atoms_get(tng, mol, &nAtoms);
        tng_molecule_cnt_get(tng, mol, &cnt);
        if (nAtoms == nind)
        {
            stat = TNG_SUCCESS;
        }
        else
        {
            stat = TNG_FAILURE;
        }
    }
    if (stat == TNG_FAILURE)
    {
        /* The indexed atoms are added to one separate molecule. */
        tng_molecule_alloc(tng, &mol);
        tng_molecule_name_set(tng, mol, name);
        tng_molecule_chain_add(tng, mol, "", &chain);

        for (int i = 0; i < nind; i++)
        {
            char        temp_name[256], temp_type[256];

            /* Try to retrieve the residue name of the atom */
            stat = tng_residue_name_of_particle_nr_get(tng, ind[i], temp_name, 256);
            if (stat != TNG_SUCCESS)
            {
                temp_name[0] = '\0';
            }
            /* Check if the molecule of the selection already contains this residue */
            if (tng_chain_residue_find(tng, chain, temp_name, -1, &res)
                != TNG_SUCCESS)
            {
                tng_chain_residue_add(tng, chain, temp_name, &res);
            }
            /* Try to find the original name and type of the atom */
            stat = tng_atom_name_of_particle_nr_get(tng, ind[i], temp_name, 256);
            if (stat != TNG_SUCCESS)
            {
                temp_name[0] = '\0';
            }
            stat = tng_atom_type_of_particle_nr_get(tng, ind[i], temp_type, 256);
            if (stat != TNG_SUCCESS)
            {
                temp_type[0] = '\0';
            }
            tng_residue_atom_w_id_add(tng, res, temp_name, temp_type, ind[i], &atom);
        }
        tng_molecule_existing_add(tng, &mol);
    }
    /* Set the count of the molecule containing the selected atoms to 1 and all
     * other molecules to 0 */
    tng_molecule_cnt_set(tng, mol, 1);
    tng_num_molecule_types_get(tng, &nMols);
    for (gmx_int64_t k = 0; k < nMols; k++)
    {
        tng_molecule_of_index_get(tng, k, &iterMol);
        if (iterMol == mol)
        {
            continue;
        }
        tng_molecule_cnt_set(tng, iterMol, 0);
    }
#else
    GMX_UNUSED_VALUE(tng);
    GMX_UNUSED_VALUE(nind);
    GMX_UNUSED_VALUE(ind);
    GMX_UNUSED_VALUE(name);
#endif
}
Ejemplo n.º 16
0
gmx_multisim_t *init_multisystem(MPI_Comm                         comm,
                                 gmx::ArrayRef<const std::string> multidirs)
{
    gmx_multisim_t *ms;
#if GMX_MPI
    MPI_Group       mpi_group_world;
    int            *rank;
#endif

    if (multidirs.empty())
    {
        return nullptr;
    }

    if (!GMX_LIB_MPI && !multidirs.empty())
    {
        gmx_fatal(FARGS, "mdrun -multidir is only supported when GROMACS has been "
                  "configured with a proper external MPI library.");
    }

    if (multidirs.size() == 1)
    {
        /* NOTE: It would be nice if this special case worked, but this requires checks/tests. */
        gmx_fatal(FARGS, "To run mdrun in multiple simulation mode, more then one "
                  "actual simulation is required. The single simulation case is not supported.");
    }

#if GMX_MPI
    int numRanks;
    MPI_Comm_size(comm, &numRanks);
    if (numRanks % multidirs.size() != 0)
    {
        gmx_fatal(FARGS, "The number of ranks (%d) is not a multiple of the number of simulations (%td)", numRanks, multidirs.size());
    }

    int numRanksPerSim = numRanks/multidirs.size();
    int rankWithinComm;
    MPI_Comm_rank(comm, &rankWithinComm);

    if (debug)
    {
        fprintf(debug, "We have %td simulations, %d ranks per simulation, local simulation is %d\n", multidirs.size(), numRanksPerSim, rankWithinComm/numRanksPerSim);
    }

    ms       = new gmx_multisim_t;
    ms->nsim = multidirs.size();
    ms->sim  = rankWithinComm/numRanksPerSim;
    /* Create a communicator for the master nodes */
    snew(rank, ms->nsim);
    for (int i = 0; i < ms->nsim; i++)
    {
        rank[i] = i*numRanksPerSim;
    }
    MPI_Comm_group(comm, &mpi_group_world);
    MPI_Group_incl(mpi_group_world, ms->nsim, rank, &ms->mpi_group_masters);
    sfree(rank);
    MPI_Comm_create(MPI_COMM_WORLD, ms->mpi_group_masters,
                    &ms->mpi_comm_masters);

#if !MPI_IN_PLACE_EXISTS
    /* initialize the MPI_IN_PLACE replacement buffers */
    snew(ms->mpb, 1);
    ms->mpb->ibuf        = NULL;
    ms->mpb->libuf       = NULL;
    ms->mpb->fbuf        = NULL;
    ms->mpb->dbuf        = NULL;
    ms->mpb->ibuf_alloc  = 0;
    ms->mpb->libuf_alloc = 0;
    ms->mpb->fbuf_alloc  = 0;
    ms->mpb->dbuf_alloc  = 0;
#endif

    // TODO This should throw upon error
    gmx_chdir(multidirs[ms->sim].c_str());
#else
    GMX_UNUSED_VALUE(comm);
    ms = nullptr;
#endif

    return ms;
}
Ejemplo n.º 17
0
bool lmfit_exp(int          nfit,
               const double x[],
               const double y[],
               const double dy[],
               double       parm[], // NOLINT(readability-non-const-parameter)
               bool         bVerbose,
               int          eFitFn,
               int          nfix)
{
    if ((eFitFn < 0) || (eFitFn >= effnNR))
    {
        fprintf(stderr, "fitfn = %d, should be in the range 0..%d\n",
                eFitFn, effnNR-1);
        return false;
    }
#if HAVE_LMFIT
    double             chisq, ochisq;
    gmx_bool           bCont;
    int                j;
    int                maxiter = 100;
    lm_control_struct  control;
    lm_status_struct  *status;
    int                nparam = effnNparams(eFitFn);
    int                p2;
    gmx_bool           bSkipLast;

    /* Using default control structure for double precision fitting that
     * comes with the lmfit package (i.e. from the include file).
     */
    control            = lm_control_double;
    control.verbosity  = (bVerbose ? 1 : 0);
    control.n_maxpri   = 0;
    control.m_maxpri   = 0;

    snew(status, 1);
    /* Initial params */
    chisq  = 1e12;
    j      = 0;
    if (bVerbose)
    {
        printf("%4s  %10s  Parameters\n", "Step", "chi^2");
    }
    /* Check whether we have to skip some params */
    if (nfix > 0)
    {
        do
        {
            p2        = 1 << (nparam-1);
            bSkipLast = ((p2 & nfix) == p2);
            if (bSkipLast)
            {
                nparam--;
                nfix -= p2;
            }
        }
        while ((nparam > 0) && (bSkipLast));
        if (bVerbose)
        {
            printf("Using %d out of %d parameters\n", nparam, effnNparams(eFitFn));
        }
    }
    do
    {
        ochisq = chisq;
        gmx_lmcurve(nparam, parm, nfit, x, y, dy,
                    lmcurves[eFitFn], &control, status);
        chisq = gmx::square(status->fnorm);
        if (bVerbose)
        {
            printf("status: fnorm = %g, nfev = %d, userbreak = %d\noutcome = %s\n",
                   status->fnorm, status->nfev, status->userbreak,
                   lm_infmsg[status->outcome]);
        }
        if (bVerbose)
        {
            int mmm;
            printf("%4d  %8g", j, chisq);
            for (mmm = 0; (mmm < effnNparams(eFitFn)); mmm++)
            {
                printf("  %8g", parm[mmm]);
            }
            printf("\n");
        }
        j++;
        bCont = (fabs(ochisq - chisq) > fabs(control.ftol*chisq));
    }
    while (bCont && (j < maxiter));

    sfree(status);
#else
    gmx_fatal(FARGS, "This build of GROMACS was not configured with support "
              "for lmfit, so the requested fitting cannot be performed. "
              "See the install guide for instructions on how to build "
              "GROMACS with lmfit supported.");
    GMX_UNUSED_VALUE(nfit);
    GMX_UNUSED_VALUE(x);
    GMX_UNUSED_VALUE(y);
    GMX_UNUSED_VALUE(dy);
    GMX_UNUSED_VALUE(parm);
    GMX_UNUSED_VALUE(bVerbose);
    GMX_UNUSED_VALUE(eFitFn);
    GMX_UNUSED_VALUE(nfix);
#endif
    return true;
}
Ejemplo n.º 18
0
gmx_bool gmx_get_tng_data_next_frame_of_block_type(tng_trajectory_t     input,
                                                   gmx_int64_t          blockId,
                                                   real               **values,
                                                   gmx_int64_t         *frameNumber,
                                                   double              *frameTime,
                                                   gmx_int64_t         *nValuesPerFrame,
                                                   gmx_int64_t         *nAtoms,
                                                   real                *prec,
                                                   char                *name,
                                                   int                  maxLen,
                                                   gmx_bool            *bOK)
{
#if GMX_USE_TNG
    tng_function_status stat;
    char                datatype = -1;
    gmx_int64_t         codecId;
    int                 blockDependency;
    void               *data = 0;
    double              localPrec;

    stat = tng_data_block_name_get(input, blockId, name, maxLen);
    if (stat != TNG_SUCCESS)
    {
        gmx_file("Cannot read next frame of TNG file");
    }
    stat = tng_data_block_dependency_get(input, blockId, &blockDependency);
    if (stat != TNG_SUCCESS)
    {
        gmx_file("Cannot read next frame of TNG file");
    }
    if (blockDependency & TNG_PARTICLE_DEPENDENT)
    {
        tng_num_particles_get(input, nAtoms);
        stat = tng_util_particle_data_next_frame_read(input,
                                                      blockId,
                                                      &data,
                                                      &datatype,
                                                      frameNumber,
                                                      frameTime);
    }
    else
    {
        *nAtoms = 1; /* There are not actually any atoms, but it is used for
                        allocating memory */
        stat    = tng_util_non_particle_data_next_frame_read(input,
                                                             blockId,
                                                             &data,
                                                             &datatype,
                                                             frameNumber,
                                                             frameTime);
    }
    if (stat == TNG_CRITICAL)
    {
        gmx_file("Cannot read next frame of TNG file");
    }
    if (stat == TNG_FAILURE)
    {
        *bOK = TRUE;
        return FALSE;
    }

    stat = tng_data_block_num_values_per_frame_get(input, blockId, nValuesPerFrame);
    if (stat != TNG_SUCCESS)
    {
        gmx_file("Cannot read next frame of TNG file");
    }
    snew(*values, sizeof(real) * *nValuesPerFrame * *nAtoms);
    convert_array_to_real_array(data,
                                *values,
                                getDistanceScaleFactor(input),
                                *nAtoms,
                                *nValuesPerFrame,
                                datatype);

    tng_util_frame_current_compression_get(input, blockId, &codecId, &localPrec);

    /* This must be updated if/when more lossy compression methods are added */
    if (codecId != TNG_TNG_COMPRESSION)
    {
        *prec = -1.0;
    }
    else
    {
        *prec = localPrec;
    }

    *bOK = TRUE;
    return TRUE;
#else
    GMX_UNUSED_VALUE(input);
    GMX_UNUSED_VALUE(blockId);
    GMX_UNUSED_VALUE(values);
    GMX_UNUSED_VALUE(frameNumber);
    GMX_UNUSED_VALUE(frameTime);
    GMX_UNUSED_VALUE(nValuesPerFrame);
    GMX_UNUSED_VALUE(nAtoms);
    GMX_UNUSED_VALUE(prec);
    GMX_UNUSED_VALUE(name);
    GMX_UNUSED_VALUE(maxLen);
    GMX_UNUSED_VALUE(bOK);
    return FALSE;
#endif
}
Ejemplo n.º 19
0
void gmx_print_tng_molecule_system(tng_trajectory_t input,
                                   FILE            *stream)
{
#if GMX_USE_TNG
    gmx_int64_t        nMolecules, nChains, nResidues, nAtoms, *molCntList;
    tng_molecule_t     molecule;
    tng_chain_t        chain;
    tng_residue_t      residue;
    tng_atom_t         atom;
    char               str[256], varNAtoms;

    tng_num_molecule_types_get(input, &nMolecules);
    tng_molecule_cnt_list_get(input, &molCntList);
    /* Can the number of particles change in the trajectory or is it constant? */
    tng_num_particles_variable_get(input, &varNAtoms);

    for (gmx_int64_t i = 0; i < nMolecules; i++)
    {
        tng_molecule_of_index_get(input, i, &molecule);
        tng_molecule_name_get(input, molecule, str, 256);
        if (varNAtoms == TNG_CONSTANT_N_ATOMS)
        {
            if ((int)molCntList[i] == 0)
            {
                continue;
            }
            fprintf(stream, "Molecule: %s, count: %d\n", str, (int)molCntList[i]);
        }
        else
        {
            fprintf(stream, "Molecule: %s\n", str);
        }
        tng_molecule_num_chains_get(input, molecule, &nChains);
        if (nChains > 0)
        {
            for (gmx_int64_t j = 0; j < nChains; j++)
            {
                tng_molecule_chain_of_index_get(input, molecule, j, &chain);
                tng_chain_name_get(input, chain, str, 256);
                fprintf(stream, "\tChain: %s\n", str);
                tng_chain_num_residues_get(input, chain, &nResidues);
                for (gmx_int64_t k = 0; k < nResidues; k++)
                {
                    tng_chain_residue_of_index_get(input, chain, k, &residue);
                    tng_residue_name_get(input, residue, str, 256);
                    fprintf(stream, "\t\tResidue: %s\n", str);
                    tng_residue_num_atoms_get(input, residue, &nAtoms);
                    for (gmx_int64_t l = 0; l < nAtoms; l++)
                    {
                        tng_residue_atom_of_index_get(input, residue, l, &atom);
                        tng_atom_name_get(input, atom, str, 256);
                        fprintf(stream, "\t\t\tAtom: %s", str);
                        tng_atom_type_get(input, atom, str, 256);
                        fprintf(stream, " (%s)\n", str);
                    }
                }
            }
        }
        /* It is possible to have a molecule without chains, in which case
         * residues in the molecule can be iterated through without going
         * through chains. */
        else
        {
            tng_molecule_num_residues_get(input, molecule, &nResidues);
            if (nResidues > 0)
            {
                for (gmx_int64_t k = 0; k < nResidues; k++)
                {
                    tng_molecule_residue_of_index_get(input, molecule, k, &residue);
                    tng_residue_name_get(input, residue, str, 256);
                    fprintf(stream, "\t\tResidue: %s\n", str);
                    tng_residue_num_atoms_get(input, residue, &nAtoms);
                    for (gmx_int64_t l = 0; l < nAtoms; l++)
                    {
                        tng_residue_atom_of_index_get(input, residue, l, &atom);
                        tng_atom_name_get(input, atom, str, 256);
                        fprintf(stream, "\t\t\tAtom: %s", str);
                        tng_atom_type_get(input, atom, str, 256);
                        fprintf(stream, " (%s)\n", str);
                    }
                }
            }
            else
            {
                tng_molecule_num_atoms_get(input, molecule, &nAtoms);
                for (gmx_int64_t l = 0; l < nAtoms; l++)
                {
                    tng_molecule_atom_of_index_get(input, molecule, l, &atom);
                    tng_atom_name_get(input, atom, str, 256);
                    fprintf(stream, "\t\t\tAtom: %s", str);
                    tng_atom_type_get(input, atom, str, 256);
                    fprintf(stream, " (%s)\n", str);
                }
            }
        }
    }
#else
    GMX_UNUSED_VALUE(input);
    GMX_UNUSED_VALUE(stream);
#endif
}
Ejemplo n.º 20
0
void gmx_prepare_tng_writing(const char              *filename,
                             char                     mode,
                             tng_trajectory_t        *input,
                             tng_trajectory_t        *output,
                             int                      nAtoms,
                             const gmx_mtop_t        *mtop,
                             const int               *index,
                             const char              *indexGroupName)
{
#if GMX_USE_TNG
    /* FIXME after 5.0: Currently only standard block types are read */
    const int           defaultNumIds              = 5;
    static gmx_int64_t  fallbackIds[defaultNumIds] =
    {
        TNG_TRAJ_BOX_SHAPE, TNG_TRAJ_POSITIONS,
        TNG_TRAJ_VELOCITIES, TNG_TRAJ_FORCES,
        TNG_GMX_LAMBDA
    };
    static char         fallbackNames[defaultNumIds][32] =
    {
        "BOX SHAPE", "POSITIONS", "VELOCITIES",
        "FORCES", "LAMBDAS"
    };

    typedef tng_function_status (*set_writing_interval_func_pointer)(tng_trajectory_t,
                                                                     const gmx_int64_t,
                                                                     const gmx_int64_t,
                                                                     const gmx_int64_t,
                                                                     const char*,
                                                                     const char,
                                                                     const char);
#if GMX_DOUBLE
    set_writing_interval_func_pointer set_writing_interval = tng_util_generic_write_interval_double_set;
#else
    set_writing_interval_func_pointer set_writing_interval = tng_util_generic_write_interval_set;
#endif

    gmx_tng_open(filename, mode, output);

    /* Do we have an input file in TNG format? If so, then there's
       more data we can copy over, rather than having to improvise. */
    if (*input)
    {
        /* Set parameters (compression, time per frame, molecule
         * information, number of frames per frame set and writing
         * intervals of positions, box shape and lambdas) of the
         * output tng container based on their respective values int
         * the input tng container */
        double      time, compression_precision;
        gmx_int64_t n_frames_per_frame_set, interval = -1;

        tng_compression_precision_get(*input, &compression_precision);
        tng_compression_precision_set(*output, compression_precision);
        // TODO make this configurable in a future version
        char compression_type = TNG_TNG_COMPRESSION;

        tng_molecule_system_copy(*input, *output);

        tng_time_per_frame_get(*input, &time);
        tng_time_per_frame_set(*output, time);

        tng_num_frames_per_frame_set_get(*input, &n_frames_per_frame_set);
        tng_num_frames_per_frame_set_set(*output, n_frames_per_frame_set);

        for (int i = 0; i < defaultNumIds; i++)
        {
            if (tng_data_get_stride_length(*input, fallbackIds[i], -1, &interval)
                == TNG_SUCCESS)
            {
                switch (fallbackIds[i])
                {
                    case TNG_TRAJ_POSITIONS:
                    case TNG_TRAJ_VELOCITIES:
                        set_writing_interval(*output, interval, 3, fallbackIds[i],
                                             fallbackNames[i], TNG_PARTICLE_BLOCK_DATA,
                                             compression_type);
                        break;
                    case TNG_TRAJ_FORCES:
                        set_writing_interval(*output, interval, 3, fallbackIds[i],
                                             fallbackNames[i], TNG_PARTICLE_BLOCK_DATA,
                                             TNG_GZIP_COMPRESSION);
                        break;
                    case TNG_TRAJ_BOX_SHAPE:
                        set_writing_interval(*output, interval, 9, fallbackIds[i],
                                             fallbackNames[i], TNG_NON_PARTICLE_BLOCK_DATA,
                                             TNG_GZIP_COMPRESSION);
                        break;
                    case TNG_GMX_LAMBDA:
                        set_writing_interval(*output, interval, 1, fallbackIds[i],
                                             fallbackNames[i], TNG_NON_PARTICLE_BLOCK_DATA,
                                             TNG_GZIP_COMPRESSION);
                    default:
                        continue;
                }
            }
        }

    }
    else
    {
        /* TODO after trjconv is modularized: fix this so the user can
           change precision when they are doing an operation where
           this makes sense, and not otherwise.

           char compression = bUseLossyCompression ? TNG_TNG_COMPRESSION : TNG_GZIP_COMPRESSION;
           gmx_tng_set_compression_precision(*output, ndec2prec(nDecimalsOfPrecision));
         */
        gmx_tng_add_mtop(*output, mtop);
        tng_num_frames_per_frame_set_set(*output, 1);
    }

    if (index && nAtoms > 0)
    {
        gmx_tng_setup_atom_subgroup(*output, nAtoms, index, indexGroupName);
    }

    /* If for some reason there are more requested atoms than there are atoms in the
     * molecular system create a number of implicit atoms (without atom data) to
     * compensate for that. */
    if (nAtoms >= 0)
    {
        tng_implicit_num_particles_set(*output, nAtoms);
    }
#else
    GMX_UNUSED_VALUE(filename);
    GMX_UNUSED_VALUE(mode);
    GMX_UNUSED_VALUE(input);
    GMX_UNUSED_VALUE(output);
    GMX_UNUSED_VALUE(nAtoms);
    GMX_UNUSED_VALUE(mtop);
    GMX_UNUSED_VALUE(index);
    GMX_UNUSED_VALUE(indexGroupName);
#endif
}
Ejemplo n.º 21
0
/* TODO: If/when TNG acquires the ability to copy data blocks without
 * uncompressing them, then this implemenation should be reconsidered.
 * Ideally, gmx trjconv -f a.tng -o b.tng -b 10 -e 20 would be fast
 * and lose no information. */
gmx_bool gmx_read_next_tng_frame(tng_trajectory_t            input,
                                 t_trxframe                 *fr,
                                 gmx_int64_t                *requestedIds,
                                 int                         numRequestedIds)
{
#if GMX_USE_TNG
    gmx_bool                bOK = TRUE;
    tng_function_status     stat;
    gmx_int64_t             numberOfAtoms = -1, frameNumber = -1;
    gmx_int64_t             nBlocks, blockId, *blockIds = NULL, codecId;
    char                    datatype      = -1;
    void                   *values        = NULL;
    double                  frameTime     = -1.0;
    int                     size, blockDependency;
    double                  prec;
    const int               defaultNumIds = 5;
    static gmx_int64_t      fallbackRequestedIds[defaultNumIds] =
    {
        TNG_TRAJ_BOX_SHAPE, TNG_TRAJ_POSITIONS,
        TNG_TRAJ_VELOCITIES, TNG_TRAJ_FORCES,
        TNG_GMX_LAMBDA
    };


    fr->bStep     = FALSE;
    fr->bTime     = FALSE;
    fr->bLambda   = FALSE;
    fr->bAtoms    = FALSE;
    fr->bPrec     = FALSE;
    fr->bX        = FALSE;
    fr->bV        = FALSE;
    fr->bF        = FALSE;
    fr->bBox      = FALSE;

    /* If no specific IDs were requested read all block types that can
     * currently be interpreted */
    if (!requestedIds || numRequestedIds == 0)
    {
        numRequestedIds = defaultNumIds;
        requestedIds    = fallbackRequestedIds;
    }

    stat = tng_num_particles_get(input, &numberOfAtoms);
    if (stat != TNG_SUCCESS)
    {
        gmx_file("Cannot determine number of atoms from TNG file.");
    }
    fr->natoms = numberOfAtoms;

    if (!gmx_get_tng_data_block_types_of_next_frame(input,
                                                    fr->step,
                                                    numRequestedIds,
                                                    requestedIds,
                                                    &frameNumber,
                                                    &nBlocks,
                                                    &blockIds))
    {
        return FALSE;
    }

    if (nBlocks == 0)
    {
        return FALSE;
    }

    for (gmx_int64_t i = 0; i < nBlocks; i++)
    {
        blockId = blockIds[i];
        tng_data_block_dependency_get(input, blockId, &blockDependency);
        if (blockDependency & TNG_PARTICLE_DEPENDENT)
        {
            stat = tng_util_particle_data_next_frame_read(input,
                                                          blockId,
                                                          &values,
                                                          &datatype,
                                                          &frameNumber,
                                                          &frameTime);
        }
        else
        {
            stat = tng_util_non_particle_data_next_frame_read(input,
                                                              blockId,
                                                              &values,
                                                              &datatype,
                                                              &frameNumber,
                                                              &frameTime);
        }
        if (stat == TNG_CRITICAL)
        {
            gmx_file("Cannot read positions from TNG file.");
            return FALSE;
        }
        else if (stat == TNG_FAILURE)
        {
            continue;
        }
        switch (blockId)
        {
            case TNG_TRAJ_BOX_SHAPE:
                switch (datatype)
                {
                    case TNG_INT_DATA:
                        size = sizeof(gmx_int64_t);
                        break;
                    case TNG_FLOAT_DATA:
                        size = sizeof(float);
                        break;
                    case TNG_DOUBLE_DATA:
                        size = sizeof(double);
                        break;
                    default:
                        gmx_incons("Illegal datatype of box shape values!");
                }
                for (int i = 0; i < DIM; i++)
                {
                    convert_array_to_real_array(reinterpret_cast<char *>(values) + size * i * DIM,
                                                reinterpret_cast<real *>(fr->box[i]),
                                                getDistanceScaleFactor(input),
                                                1,
                                                DIM,
                                                datatype);
                }
                fr->bBox = TRUE;
                break;
            case TNG_TRAJ_POSITIONS:
                srenew(fr->x, fr->natoms);
                convert_array_to_real_array(values,
                                            reinterpret_cast<real *>(fr->x),
                                            getDistanceScaleFactor(input),
                                            fr->natoms,
                                            DIM,
                                            datatype);
                fr->bX = TRUE;
                tng_util_frame_current_compression_get(input, blockId, &codecId, &prec);
                /* This must be updated if/when more lossy compression methods are added */
                if (codecId == TNG_TNG_COMPRESSION)
                {
                    fr->prec  = prec;
                    fr->bPrec = TRUE;
                }
                break;
            case TNG_TRAJ_VELOCITIES:
                srenew(fr->v, fr->natoms);
                convert_array_to_real_array(values,
                                            (real *) fr->v,
                                            getDistanceScaleFactor(input),
                                            fr->natoms,
                                            DIM,
                                            datatype);
                fr->bV = TRUE;
                tng_util_frame_current_compression_get(input, blockId, &codecId, &prec);
                /* This must be updated if/when more lossy compression methods are added */
                if (codecId == TNG_TNG_COMPRESSION)
                {
                    fr->prec  = prec;
                    fr->bPrec = TRUE;
                }
                break;
            case TNG_TRAJ_FORCES:
                srenew(fr->f, fr->natoms);
                convert_array_to_real_array(values,
                                            reinterpret_cast<real *>(fr->f),
                                            getDistanceScaleFactor(input),
                                            fr->natoms,
                                            DIM,
                                            datatype);
                fr->bF = TRUE;
                break;
            case TNG_GMX_LAMBDA:
                switch (datatype)
                {
                    case TNG_FLOAT_DATA:
                        fr->lambda = *(reinterpret_cast<float *>(values));
                        break;
                    case TNG_DOUBLE_DATA:
                        fr->lambda = *(reinterpret_cast<double *>(values));
                        break;
                    default:
                        gmx_incons("Illegal datatype lambda value!");
                }
                fr->bLambda = TRUE;
                break;
            default:
                gmx_warning("Illegal block type! Currently GROMACS tools can only handle certain data types. Skipping block.");
        }
        /* values does not have to be freed before reading next frame. It will
         * be reallocated if it is not NULL. */
    }

    fr->step  = static_cast<int>(frameNumber);
    fr->bStep = TRUE;
    // Convert the time to ps
    fr->time  = frameTime / PICO;
    fr->bTime = TRUE;

    /* values must be freed before leaving this function */
    sfree(values);

    return bOK;
#else
    GMX_UNUSED_VALUE(input);
    GMX_UNUSED_VALUE(fr);
    GMX_UNUSED_VALUE(requestedIds);
    GMX_UNUSED_VALUE(numRequestedIds);
    return FALSE;
#endif
}
Ejemplo n.º 22
0
void check_resource_division_efficiency(const gmx_hw_info_t *hwinfo,
                                        const gmx_hw_opt_t  *hw_opt,
                                        gmx_bool             bNtOmpOptionSet,
                                        t_commrec           *cr,
                                        FILE                *fplog)
{
#if defined GMX_OPENMP && defined GMX_MPI
    int         nth_omp_min, nth_omp_max, ngpu;
    char        buf[1000];
#ifdef GMX_THREAD_MPI
    const char *mpi_option = " (option -ntmpi)";
#else
    const char *mpi_option = "";
#endif

    /* This function should be called after thread-MPI (when configured) and
     * OpenMP have been initialized. Check that here.
     */
#ifdef GMX_THREAD_MPI
    GMX_RELEASE_ASSERT(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max, "Inconsistent OpenMP thread count default values");
    GMX_RELEASE_ASSERT(hw_opt->nthreads_tmpi >= 1, "Must have at least one thread-MPI rank");
#endif
    GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) >= 1, "Must have at least one OpenMP thread");

    nth_omp_min = gmx_omp_nthreads_get(emntDefault);
    nth_omp_max = gmx_omp_nthreads_get(emntDefault);
    ngpu        = hw_opt->gpu_opt.n_dev_use;

    /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
    if (cr->nnodes + cr->npmenodes > 1)
    {
        int count[3], count_max[3];

        count[0] = -nth_omp_min;
        count[1] =  nth_omp_max;
        count[2] =  ngpu;

        MPI_Allreduce(count, count_max, 3, MPI_INT, MPI_MAX, cr->mpi_comm_mysim);

        /* In case of an inhomogeneous run setup we use the maximum counts */
        nth_omp_min = -count_max[0];
        nth_omp_max =  count_max[1];
        ngpu        =  count_max[2];
    }

    int nthreads_omp_mpi_ok_min;

    if (ngpu == 0)
    {
        nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_cpu;
    }
    else
    {
        /* With GPUs we set the minimum number of OpenMP threads to 2 to catch
         * cases where the user specifies #ranks == #cores.
         */
        nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_gpu;
    }

    if (DOMAINDECOMP(cr) && cr->nnodes > 1)
    {
        if (nth_omp_max < nthreads_omp_mpi_ok_min ||
            (!(ngpu > 0 && !gmx_gpu_sharing_supported()) &&
             nth_omp_max > nthreads_omp_mpi_ok_max))
        {
            /* Note that we print target_max here, not ok_max */
            sprintf(buf, "Your choice of number of MPI ranks and amount of resources results in using %d OpenMP threads per rank, which is most likely inefficient. The optimum is usually between %d and %d threads per rank.",
                    nth_omp_max,
                    nthreads_omp_mpi_ok_min,
                    nthreads_omp_mpi_target_max);

            if (bNtOmpOptionSet)
            {
                md_print_warn(cr, fplog, "NOTE: %s\n", buf);
            }
            else
            {
                /* This fatal error, and the one below, is nasty, but it's
                 * probably the only way to ensure that all users don't waste
                 * a lot of resources, since many users don't read logs/stderr.
                 */
                gmx_fatal(FARGS, "%s If you want to run with this setup, specify the -ntomp option. But we suggest to change the number of MPI ranks%s.", buf, mpi_option);
            }
        }
    }
    else
    {
        /* No domain decomposition (or only one domain) */
        if (!(ngpu > 0 && !gmx_gpu_sharing_supported()) &&
            nth_omp_max > nthreads_omp_faster(hwinfo->cpuid_info, ngpu > 0))
        {
            /* To arrive here, the user/system set #ranks and/or #OMPthreads */
            gmx_bool bEnvSet;
            char     buf2[256];

            bEnvSet = (getenv("OMP_NUM_THREADS") != NULL);

            if (bNtOmpOptionSet || bEnvSet)
            {
                sprintf(buf2, "You requested %d OpenMP threads", nth_omp_max);
            }
            else
            {
                sprintf(buf2, "Your choice of %d MPI rank%s and the use of %d total threads %sleads to the use of %d OpenMP threads",
                        cr->nnodes + cr->npmenodes,
                        cr->nnodes + cr->npmenodes == 1 ? "" : "s",
                        hw_opt->nthreads_tot > 0 ? hw_opt->nthreads_tot : hwinfo->nthreads_hw_avail,
                        hwinfo->nphysicalnode > 1 ? "on a node " : "",
                        nth_omp_max);
            }
            sprintf(buf, "%s, whereas we expect the optimum to be with more MPI ranks with %d to %d OpenMP threads.",
                    buf2, nthreads_omp_mpi_ok_min, nthreads_omp_mpi_target_max);

            /* We can not quit with a fatal error when OMP_NUM_THREADS is set
             * with different values per rank or node, since in that case
             * the user can not set -ntomp to override the error.
             */
            if (bNtOmpOptionSet || (bEnvSet && nth_omp_min != nth_omp_max))
            {
                md_print_warn(cr, fplog, "NOTE: %s\n", buf);
            }
            else
            {
                gmx_fatal(FARGS, "%s If you want to run with this many OpenMP threads, specify the -ntomp option. But we suggest to increase the number of MPI ranks%s.", buf, mpi_option);
            }
        }
    }
#else /* GMX_OPENMP && GMX_MPI */
      /* No OpenMP and/or MPI: it doesn't make much sense to check */
    GMX_UNUSED_VALUE(hw_opt);
    GMX_UNUSED_VALUE(bNtOmpOptionSet);
    /* Check if we have more than 1 physical core, if detected,
     * or more than 1 hardware thread if physical cores were not detected.
     */
#if !(defined GMX_OPENMP) && !(defined GMX_MPI)
    if ((hwinfo->ncore > 1) ||
        (hwinfo->ncore == 0 && hwinfo->nthreads_hw_avail > 1))
    {
        md_print_warn(cr, fplog, "NOTE: GROMACS was compiled without OpenMP and (thread-)MPI support, can only use a single CPU core\n");
    }
#else
    GMX_UNUSED_VALUE(hwinfo);
    GMX_UNUSED_VALUE(cr);
    GMX_UNUSED_VALUE(fplog);
#endif

#endif /* GMX_OPENMP && GMX_MPI */
}
Ejemplo n.º 23
0
/*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
static void gmx_pme_send_force_vir_ener(gmx_pme_pp *pme_pp,
                                        const rvec *f,
                                        matrix vir_q, real energy_q,
                                        matrix vir_lj, real energy_lj,
                                        real dvdlambda_q, real dvdlambda_lj,
                                        float cycles)
{
#if GMX_MPI
    gmx_pme_comm_vir_ene_t cve;
    int                    messages, ind_start, ind_end;
    cve.cycles = cycles;

    /* Now the evaluated forces have to be transferred to the PP nodes */
    messages = 0;
    ind_end  = 0;
    for (const auto &receiver : pme_pp->ppRanks)
    {
        ind_start = ind_end;
        ind_end   = ind_start + receiver.numAtoms;
        if (MPI_Isend(const_cast<void *>(static_cast<const void *>(f[ind_start])),
                      (ind_end-ind_start)*sizeof(rvec), MPI_BYTE,
                      receiver.rankId, 0,
                      pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]) != 0)
        {
            gmx_comm("MPI_Isend failed in do_pmeonly");
        }
    }

    /* send virial and energy to our last PP node */
    copy_mat(vir_q, cve.vir_q);
    copy_mat(vir_lj, cve.vir_lj);
    cve.energy_q     = energy_q;
    cve.energy_lj    = energy_lj;
    cve.dvdlambda_q  = dvdlambda_q;
    cve.dvdlambda_lj = dvdlambda_lj;
    /* check for the signals to send back to a PP node */
    cve.stop_cond = gmx_get_stop_condition();

    cve.cycles = cycles;

    if (debug)
    {
        fprintf(debug, "PME rank sending to PP rank %d: virial and energy\n",
                pme_pp->peerRankId);
    }
    MPI_Isend(&cve, sizeof(cve), MPI_BYTE,
              pme_pp->peerRankId, 1,
              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);

    /* Wait for the forces to arrive */
    MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
#else
    gmx_call("MPI not enabled");
    GMX_UNUSED_VALUE(pme_pp);
    GMX_UNUSED_VALUE(f);
    GMX_UNUSED_VALUE(vir_q);
    GMX_UNUSED_VALUE(energy_q);
    GMX_UNUSED_VALUE(vir_lj);
    GMX_UNUSED_VALUE(energy_lj);
    GMX_UNUSED_VALUE(dvdlambda_q);
    GMX_UNUSED_VALUE(dvdlambda_lj);
    GMX_UNUSED_VALUE(cycles);
#endif
}