예제 #1
0
void
ddSendrecv(const gmx_domdec_t *dd,
           int                 ddDimensionIndex,
           int                 direction,
           gmx::ArrayRef<T>    sendBuffer,
           gmx::ArrayRef<T>    receiveBuffer)
{
    ddSendrecv(dd, ddDimensionIndex, direction,
               sendBuffer.data(), sendBuffer.size(),
               receiveBuffer.data(), receiveBuffer.size());
}
예제 #2
0
static void dd_collect_vec_gatherv(gmx_domdec_t                  *dd,
                                   gmx::ArrayRef<const gmx::RVec> lv,
                                   gmx::ArrayRef<gmx::RVec>       v)
{
    int *recvCounts    = nullptr;
    int *displacements = nullptr;

    if (DDMASTER(dd))
    {
        get_commbuffer_counts(dd->ma.get(), &recvCounts, &displacements);
    }

    const int numHomeAtoms = dd->comm->atomRanges.numHomeAtoms();
    dd_gatherv(dd, numHomeAtoms*sizeof(rvec), lv.data(), recvCounts, displacements,
               DDMASTER(dd) ? dd->ma->rvecBuffer.data() : nullptr);

    if (DDMASTER(dd))
    {
        const AtomDistribution &ma     = *dd->ma;

        const t_block          &cgs_gl = dd->comm->cgs_gl;

        int                     bufferAtom = 0;
        for (int rank = 0; rank < dd->nnodes; rank++)
        {
            const auto &domainGroups = ma.domainGroups[rank];
            for (const int &cg : domainGroups.atomGroups)
            {
                for (int globalAtom = cgs_gl.index[cg]; globalAtom < cgs_gl.index[cg + 1]; globalAtom++)
                {
                    copy_rvec(ma.rvecBuffer[bufferAtom++], v[globalAtom]);
                }
            }
        }
    }
}
예제 #3
0
static void dd_collect_vec_sendrecv(gmx_domdec_t                  *dd,
                                    gmx::ArrayRef<const gmx::RVec> lv,
                                    gmx::ArrayRef<gmx::RVec>       v)
{
    if (!DDMASTER(dd))
    {
#if GMX_MPI
        const int numHomeAtoms = dd->comm->atomRanges.numHomeAtoms();
        MPI_Send(const_cast<void *>(static_cast<const void *>(lv.data())), numHomeAtoms*sizeof(rvec), MPI_BYTE,
                 dd->masterrank, dd->rank, dd->mpi_comm_all);
#endif
    }
    else
    {
        AtomDistribution &ma = *dd->ma;

        /* Copy the master coordinates to the global array */
        const t_block &cgs_gl    = dd->comm->cgs_gl;

        int            rank      = dd->masterrank;
        int            localAtom = 0;
        for (const int &i : ma.domainGroups[rank].atomGroups)
        {
            for (int globalAtom = cgs_gl.index[i]; globalAtom < cgs_gl.index[i + 1]; globalAtom++)
            {
                copy_rvec(lv[localAtom++], v[globalAtom]);
            }
        }

        for (int rank = 0; rank < dd->nnodes; rank++)
        {
            if (rank != dd->rank)
            {
                const auto &domainGroups = ma.domainGroups[rank];

                GMX_RELEASE_ASSERT(v.data() != ma.rvecBuffer.data(), "We need different communication and return buffers");

                /* When we send/recv instead of scatter/gather, we might need
                 * to increase the communication buffer size here.
                 */
                if (static_cast<size_t>(domainGroups.numAtoms) > ma.rvecBuffer.size())
                {
                    ma.rvecBuffer.resize(domainGroups.numAtoms);
                }

#if GMX_MPI
                MPI_Recv(ma.rvecBuffer.data(), domainGroups.numAtoms*sizeof(rvec), MPI_BYTE, rank,
                         rank, dd->mpi_comm_all, MPI_STATUS_IGNORE);
#endif
                int localAtom = 0;
                for (const int &cg : domainGroups.atomGroups)
                {
                    for (int globalAtom = cgs_gl.index[cg]; globalAtom < cgs_gl.index[cg + 1]; globalAtom++)
                    {
                        copy_rvec(ma.rvecBuffer[localAtom++], v[globalAtom]);
                    }
                }
            }
        }
    }
}