static int getMaxGpuUsable(FILE *fplog, const t_commrec *cr, const gmx_hw_info_t *hwinfo, int cutoff_scheme)
{
    /* This code relies on the fact that GPU are not detected when GPU
     * acceleration was disabled at run time by the user.
     */
    if (cutoff_scheme == ecutsVERLET &&
        hwinfo->gpu_info.n_dev_compatible > 0)
    {
        if (gmx_multiple_gpu_per_node_supported())
        {
            return hwinfo->gpu_info.n_dev_compatible;
        }
        else
        {
            if (hwinfo->gpu_info.n_dev_compatible > 1)
            {
                md_print_warn(cr, fplog, "More than one compatible GPU is available, but GROMACS can only use one of them. Using a single thread-MPI rank.\n");
            }
            return 1;
        }
    }
    else
    {
        return 0;
    }
}
Example #2
0
int
SimulationRunner::callMdrun(const CommandLine &callerRef)
{
    /* Conforming to style guide by not passing a non-const reference
       to this function. Passing a non-const reference might make it
       easier to write code that incorrectly re-uses callerRef after
       the call to this function. */

    CommandLine caller(callerRef);
    caller.addOption("-s", tprFileName_);

    caller.addOption("-g", logFileName_);
    caller.addOption("-e", edrFileName_);
    caller.addOption("-o", fullPrecisionTrajectoryFileName_);
    caller.addOption("-x", reducedPrecisionTrajectoryFileName_);

    caller.addOption("-deffnm", fixture_->fileManager_.getTemporaryFilePath("state"));

    if (nsteps_ > -2)
    {
        caller.addOption("-nsteps", nsteps_);
    }

#if GMX_MPI
#  if GMX_GPU != GMX_GPU_NONE
#    if GMX_THREAD_MPI
    int         numGpusNeeded = g_numThreads;
#    else   /* Must be real MPI */
    int         numGpusNeeded = gmx_node_num();
#    endif
    std::string gpuIdString(numGpusNeeded, '0');
    caller.addOption("-gpu_id", gpuIdString.c_str());
#  endif
#endif

#if GMX_THREAD_MPI
    caller.addOption("-ntmpi", g_numThreads);
#endif

#if GMX_OPENMP
    caller.addOption("-ntomp", g_numOpenMPThreads);
#endif

#if GMX_GPU != GMX_GPU_NONE
    /* TODO Ideally, with real MPI, we could call
     * gmx_collect_hardware_mpi() here and find out how many nodes
     * mdrun will run on. For now, we assume that we're running on one
     * node regardless of the number of ranks, because that's true in
     * Jenkins and for most developers running the tests. */
    int numberOfNodes = 1;
#if GMX_THREAD_MPI
    /* Can't use gmx_node_num() because it is only valid after spawn of thread-MPI threads */
    int numberOfRanks = g_numThreads;
#elif GMX_LIB_MPI
    int numberOfRanks = gmx_node_num();
#else
    int numberOfRanks = 1;
#endif
    if (numberOfRanks > numberOfNodes && !gmx_multiple_gpu_per_node_supported())
    {
        if (gmx_node_rank() == 0)
        {
            fprintf(stderr, "GROMACS in this build configuration cannot run on more than one GPU per node,\n so with %d ranks and %d nodes, this test will disable GPU support", numberOfRanks, numberOfNodes);
        }
        caller.addOption("-nb", "cpu");
    }
#endif
    return gmx_mdrun(caller.argc(), caller.argv());
}