Exemplo n.º 1
0
static mca_pml_yalla_convertor_t *mca_pml_yalla_get_recv_convertor(void *buf, size_t count,
                                                                   ompi_datatype_t *datatype)
{
    mca_pml_yalla_convertor_t *convertor = PML_YALLA_FREELIST_GET(&ompi_pml_yalla.convs);

    convertor->datatype = datatype;
    OBJ_RETAIN(datatype);
    opal_convertor_copy_and_prepare_for_recv(ompi_proc_local_proc->proc_convertor,
                                             &datatype->super, count, buf, 0,
                                             &convertor->convertor);
    return convertor;
}
Exemplo n.º 2
0
/**
 * shmem_get reads data from a remote address
 * in the symmetric heap via RDMA READ.
 * Get operation:
 * 1. Get the rkey to the remote address.
 * 2. Allocate a get request.
 * 3. Allocated a temporary pre-registered buffer
 *    to copy the data to.
 * 4. Init the request descriptor with remote side
 *    data and local side data.
 * 5. Read the remote buffer to a pre-registered
 *    buffer on the local PE using RDMA READ.
 * 6. Copy the received data to dst_addr if an
 *    intermediate pre-register buffer was used.
 * 7. Clear the request and return.
 *
 * src_addr - address on remote pe.
 * size - the amount on bytes to be read.
 * dst_addr - address on the local pe.
 * src - the pe of remote process.
 */
int mca_spml_yoda_get(void* src_addr, size_t size, void* dst_addr, int src)
{
    int rc = OSHMEM_SUCCESS;
    mca_spml_mkey_t *r_mkey, *l_mkey;
    void* rva;
    unsigned ncopied = 0;
    unsigned int frag_size = 0;
    char *p_src, *p_dst;
    int i;
    int nfrags;
    mca_bml_base_btl_t* bml_btl = NULL;
    mca_btl_base_segment_t* segment;
    mca_btl_base_descriptor_t* des = NULL;
    mca_spml_yoda_rdma_frag_t* frag = NULL;
    struct mca_spml_yoda_getreq_parent get_holder;
    struct yoda_btl *ybtl;
    int btl_id = 0;
    int get_via_send;
    const opal_datatype_t *datatype = &opal_datatype_wchar;
    opal_convertor_t convertor;
    oshmem_proc_t *proc_self;
    size_t prepare_size;
    mca_mpool_base_registration_t* registration;
    mca_spml_yoda_get_request_t* getreq = NULL;

    /*If nothing to get its OK.*/
    if (0 >= size) {
        return rc;
    }

    /* Find bml_btl and its global btl_id */
    bml_btl = get_next_btl(src, &btl_id);
    if (!bml_btl) {
        SPML_ERROR("cannot reach %d pe: no appropriate btl found", oshmem_my_proc_id());
        oshmem_shmem_abort(-1);
    }
    /* Check if btl has GET method. If it doesn't - use SEND*/
    get_via_send = ! ( (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_GET)) &&
                       (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_PUT)) );

    /* Get rkey of remote PE (src proc) which must be on memheap*/
    r_mkey = mca_memheap.memheap_get_cached_mkey(src,
                                                 src_addr,
                                                 btl_id,
                                                 &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        oshmem_shmem_abort(-1);
    }
#if SPML_YODA_DEBUG == 1
    SPML_VERBOSE(100, "get: pe:%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    ybtl = &mca_spml_yoda.btl_type_map[btl_id];

    nfrags = 1;

    /* check if we doing get into shm attached segment and if so
     * just do memcpy
     */
    if ((YODA_BTL_SM == ybtl->btl_type)
            && OPAL_LIKELY(mca_memheap.memheap_is_symmetric_addr(src_addr) && src_addr != rva)) {
        memcpy(dst_addr, (void *) rva, size);
        /* must call progress here to avoid deadlock. Scenarion:
         * pe1 pols pe2 via shm get. pe2 tries to get static variable from node one, which goes to sm btl
         * In this case pe2 is stuck forever because pe1 never calls opal_progress.
         * May be we do not need to call progress on every get() here but rather once in a while.
         */
        opal_progress();
        return OSHMEM_SUCCESS;
    }

    l_mkey = mca_memheap.memheap_get_local_mkey(dst_addr,
                                                btl_id);
    /*
     * Need a copy if local memory has not been registered or
     * we make GET via SEND
     */
    frag_size = ncopied;
    if ((NULL == l_mkey) || get_via_send) {
        calc_nfrags(bml_btl, size, &frag_size, &nfrags, get_via_send);
    }

    p_src = (char*) (unsigned long) rva;
    p_dst = (char*) dst_addr;
    get_holder.active_count = 0;

    for (i = 0; i < nfrags; i++) {
        /**
         * Allocating a get request from a pre-allocated
         * and pre-registered free list.
         */
        getreq = mca_spml_yoda_getreq_alloc(src);
        assert(getreq);
        getreq->p_dst = NULL;
        frag = &getreq->get_frag;
        getreq->parent = &get_holder;

        ncopied = i < nfrags - 1 ? frag_size :(unsigned) ((char *) dst_addr + size - p_dst);
        frag->allocated = 0;
        /* Prepare destination descriptor*/
        assert(0 != r_mkey->len);
        memcpy(&frag->rdma_segs[0].base_seg,
                r_mkey->u.data,
                r_mkey->len);

        frag->rdma_segs[0].base_seg.seg_len = (get_via_send ? ncopied + SPML_YODA_SEND_CONTEXT_SIZE : ncopied);
        if (get_via_send) {
            frag->use_send = 1;
            frag->allocated = 1;
            /**
             * Allocate a temporary buffer on the local PE.
             * The local buffer will store the data read
             * from the remote address.
             */
            mca_spml_yoda_bml_alloc(bml_btl,
                                    &des,
                                    MCA_BTL_NO_ORDER,
                                    (int)frag_size,
                                    MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                                    get_via_send);
            if (OPAL_UNLIKELY(!des || !des->des_src)) {
                SPML_ERROR("shmem OOM error need %d bytes", ncopied);
                SPML_ERROR("src=%p nfrags = %d frag_size=%d",
                           src_addr, nfrags, frag_size);
                oshmem_shmem_abort(-1);
            }

            segment = des->des_src;
            spml_yoda_prepare_for_get((void*)segment->seg_addr.pval, ncopied, (void*)p_src, oshmem_my_proc_id(), (void*)p_dst, (void*) getreq);
            des->des_cbfunc = mca_spml_yoda_get_response_completion;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }
        else {
            /*
             * Register src memory if do GET via GET
             */
            proc_self = oshmem_proc_group_find(oshmem_group_all, oshmem_my_proc_id());
            OBJ_CONSTRUCT(&convertor, opal_convertor_t);

            prepare_size = ncopied;
            opal_convertor_copy_and_prepare_for_recv(proc_self->proc_convertor,
                                                     datatype,
                                                     prepare_size,
                                                     p_dst,
                                                     0,
                                                     &convertor);

            registration = (NULL == l_mkey ? NULL : ((mca_spml_yoda_context_t*)l_mkey->spml_context)->registration);
            des = ybtl->btl->btl_prepare_dst(ybtl->btl,
                                             bml_btl->btl_endpoint,
                                             registration,
                                             &convertor,
                                             MCA_BTL_NO_ORDER,
                                             0,
                                             &prepare_size,
                                             0);
            if (NULL == des) {
                SPML_ERROR("%s: failed to register destination memory %p.",
                           btl_type2str(ybtl->btl_type), p_dst);
            }
            OBJ_DESTRUCT(&convertor);
            frag->rdma_segs[0].base_seg.seg_addr.lval = (uintptr_t) p_src;
            getreq->p_dst = (uint64_t*) p_dst;
            frag->size = ncopied;
            des->des_cbfunc = mca_spml_yoda_get_completion;
            des->des_src = &frag->rdma_segs[0].base_seg;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }

        /**
         * Initialize the remote data fragment
         * with remote address data required for
         * executing RDMA READ from a remote buffer.
         */

        frag->rdma_req = getreq;

        /**
         * Init remote side descriptor.
         */
        des->des_src_cnt = 1;
        des->des_cbdata = frag;

        /**
         *  Do GET operation
         */
        if (get_via_send) {
            rc = mca_bml_base_send(bml_btl, des, MCA_SPML_YODA_GET);
            if (1 == rc)
                rc = OSHMEM_SUCCESS;
        } else {
            rc = mca_bml_base_get(bml_btl, des);
        }

        if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
            if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
                /* No free resources, Block on completion here */
                oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);
                return OSHMEM_SUCCESS;
            } else {
                SPML_ERROR("oshmem_get: error %d", rc);
                oshmem_shmem_abort(-1);
                return rc;
            }
        }
        p_dst += ncopied;
        p_src += ncopied;
        OPAL_THREAD_ADD32(&get_holder.active_count, 1);
    }

    /* revisit if we really need this for self and sm */
    /* if (YODA_BTL_SELF == ybtl->btl_type) */
    opal_progress();

    /* Wait for completion on request */
    while (get_holder.active_count > 0)
        oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);

    return rc;
}
Exemplo n.º 3
0
mca_spml_mkey_t *mca_spml_yoda_register(void* addr,
                                        size_t size,
                                        uint64_t shmid,
                                        int *count)
{
    int i;
    mca_btl_base_descriptor_t* des = NULL;
    const opal_datatype_t *datatype = &opal_datatype_wchar;
    opal_convertor_t convertor;
    mca_spml_mkey_t *mkeys;
    struct yoda_btl *ybtl;
    oshmem_proc_t *proc_self;
    mca_spml_yoda_context_t* yoda_context;
    struct iovec iov;
    uint32_t iov_count = 1;


    SPML_VERBOSE(10, "address %p len %llu", addr, (unsigned long long)size);
    *count = 0;
    /* make sure everything is initialized to 0 */
    mkeys = (mca_spml_mkey_t *) calloc(1,
                                       mca_spml_yoda.n_btls * sizeof(*mkeys));
    if (!mkeys) {
        return NULL ;
    }

    proc_self = oshmem_proc_group_find(oshmem_group_all, oshmem_my_proc_id());
    /* create convertor */
    OBJ_CONSTRUCT(&convertor, opal_convertor_t);

    mca_bml.bml_register( MCA_SPML_YODA_PUT,
                          mca_yoda_put_callback,
                          NULL );
    mca_bml.bml_register( MCA_SPML_YODA_GET,
                          mca_yoda_get_callback,
                          NULL );
    mca_bml.bml_register( MCA_SPML_YODA_GET_RESPONSE,
                          mca_yoda_get_response_callback,
                          NULL );
    /* Register proc memory in every rdma BTL. */
    for (i = 0; i < mca_spml_yoda.n_btls; i++) {

        ybtl = &mca_spml_yoda.btl_type_map[i];
        mkeys[i].va_base = addr;

        if (!ybtl->use_cnt) {
            SPML_VERBOSE(10,
                         "%s: present but not in use. SKIP registration",
                         btl_type2str(ybtl->btl_type));
            continue;
        }

        /* If we have shared memory just save its id*/
        if (YODA_BTL_SM == ybtl->btl_type
                && MEMHEAP_SHM_INVALID != (int) MEMHEAP_SHM_GET_ID(shmid)) {
            mkeys[i].u.key = shmid;
            mkeys[i].va_base = 0;
            continue;
        }

        yoda_context = calloc(1, sizeof(*yoda_context));
        mkeys[i].spml_context = yoda_context;

        yoda_context->registration = NULL;
        if (NULL != ybtl->btl->btl_prepare_src) {
            /* initialize convertor for source descriptor*/
            opal_convertor_copy_and_prepare_for_recv(proc_self->proc_convertor,
                                                     datatype,
                                                     size,
                                                     addr,
                                                     0,
                                                     &convertor);

            if (NULL != ybtl->btl->btl_mpool && NULL != ybtl->btl->btl_mpool->mpool_register) {
                iov.iov_len = size;
                iov.iov_base = NULL;

                opal_convertor_pack(&convertor, &iov, &iov_count, &size);
                ybtl->btl->btl_mpool->mpool_register(ybtl->btl->btl_mpool,
                                                     iov.iov_base, size, 0, &yoda_context->registration);
            }
            /* initialize convertor for source descriptor*/
            opal_convertor_copy_and_prepare_for_recv(proc_self->proc_convertor,
                                                     datatype,
                                                     size,
                                                     addr,
                                                     0,
                                                     &convertor);

            /* register source memory */
            des = ybtl->btl->btl_prepare_src(ybtl->btl,
                                             0,
                                             yoda_context->registration,
                                             &convertor,
                                             MCA_BTL_NO_ORDER,
                                             0,
                                             &size,
                                             0);
            if (NULL == des) {
                SPML_ERROR("%s: failed to register source memory. ",
                           btl_type2str(ybtl->btl_type));
            }

            yoda_context->btl_src_descriptor = des;
            mkeys[i].u.data = des->des_src;
            mkeys[i].len  = ybtl->btl->btl_seg_size;
        }

        SPML_VERBOSE(5,
                     "rank %d btl %s address 0x%p len %llu shmid 0x%X|0x%X",
                     oshmem_proc_local_proc->proc_name.vpid, btl_type2str(ybtl->btl_type), 
                     mkeys[i].va_base, (unsigned long long)size, MEMHEAP_SHM_GET_TYPE(shmid), MEMHEAP_SHM_GET_ID(shmid));
    }
    OBJ_DESTRUCT(&convertor);
    *count = mca_spml_yoda.n_btls;
    return mkeys;
}
/*
 * opal_datatype_sndrcv
 *
 * Function: - copy MPI message from buffer into another
 *           - send/recv done if cannot optimize
 * Accepts:  - send buffer
 *           - send count
 *           - send datatype
 *           - receive buffer
 *           - receive count
 *           - receive datatype
 *           - tag
 *           - communicator
 * Returns:  - MPI_SUCCESS or error code
 */
int32_t ompi_datatype_sndrcv( void *sbuf, int32_t scount, const ompi_datatype_t* sdtype,
                              void *rbuf, int32_t rcount, const ompi_datatype_t* rdtype)
{
    opal_convertor_t send_convertor, recv_convertor;
    struct iovec iov;
    int length, completed;
    uint32_t iov_count;
    size_t max_data;

    /* First check if we really have something to do */
    if (0 == rcount || 0 == rdtype->super.size) {
        return ((0 == scount || 0 == sdtype->super.size) ? MPI_SUCCESS : MPI_ERR_TRUNCATE);
    }

    /* If same datatypes used, just copy. */
    if (sdtype == rdtype) {
        int32_t count = ( scount < rcount ? scount : rcount );
        opal_datatype_copy_content_same_ddt(&(rdtype->super), count, (char*)rbuf, (char*)sbuf);
        return ((scount > rcount) ? MPI_ERR_TRUNCATE : MPI_SUCCESS);
    }

    /* If receive packed. */
    if (rdtype->id == OMPI_DATATYPE_MPI_PACKED) {
        OBJ_CONSTRUCT( &send_convertor, opal_convertor_t );
        opal_convertor_copy_and_prepare_for_send( ompi_mpi_local_convertor,
                                                  &(sdtype->super), scount, sbuf, 0,
                                                  &send_convertor );

        iov_count = 1;
        iov.iov_base = (IOVBASE_TYPE*)rbuf;
        iov.iov_len = scount * sdtype->super.size;
        if( (int32_t)iov.iov_len > rcount ) iov.iov_len = rcount;

        opal_convertor_pack( &send_convertor, &iov, &iov_count, &max_data );
        OBJ_DESTRUCT( &send_convertor );
        return ((max_data < (size_t)rcount) ? MPI_ERR_TRUNCATE : MPI_SUCCESS);
    }

    /* If send packed. */
    if (sdtype->id == OMPI_DATATYPE_MPI_PACKED) {
        OBJ_CONSTRUCT( &recv_convertor, opal_convertor_t );
        opal_convertor_copy_and_prepare_for_recv( ompi_mpi_local_convertor,
                                                  &(rdtype->super), rcount, rbuf, 0,
                                                  &recv_convertor );

        iov_count = 1;
        iov.iov_base = (IOVBASE_TYPE*)sbuf;
        iov.iov_len = rcount * rdtype->super.size;
        if( (int32_t)iov.iov_len > scount ) iov.iov_len = scount;

        opal_convertor_unpack( &recv_convertor, &iov, &iov_count, &max_data );
        OBJ_DESTRUCT( &recv_convertor );
        return (((size_t)scount > max_data) ? MPI_ERR_TRUNCATE : MPI_SUCCESS);
    }

    iov.iov_len = length = 64 * 1024;
    iov.iov_base = (IOVBASE_TYPE*)malloc( length * sizeof(char) );

    OBJ_CONSTRUCT( &send_convertor, opal_convertor_t );
    opal_convertor_copy_and_prepare_for_send( ompi_mpi_local_convertor,
                                              &(sdtype->super), scount, sbuf, 0,
                                              &send_convertor );
    OBJ_CONSTRUCT( &recv_convertor, opal_convertor_t );
    opal_convertor_copy_and_prepare_for_recv( ompi_mpi_local_convertor,
                                              &(rdtype->super), rcount, rbuf, 0,
                                              &recv_convertor );

    completed = 0;
    while( !completed ) {
        iov.iov_len = length;
        iov_count = 1;
        max_data = length;
        completed |= opal_convertor_pack( &send_convertor, &iov, &iov_count, &max_data );
        completed |= opal_convertor_unpack( &recv_convertor, &iov, &iov_count, &max_data );
    }
    free( iov.iov_base );
    OBJ_DESTRUCT( &send_convertor );
    OBJ_DESTRUCT( &recv_convertor );

    return ( (scount * sdtype->super.size) <= (rcount * rdtype->super.size) ? MPI_SUCCESS : MPI_ERR_TRUNCATE );
}
Exemplo n.º 5
0
static inline int ompi_osc_rdma_gacc_master (ompi_osc_rdma_sync_t *sync, const void *source_buffer, int source_count,
                                             ompi_datatype_t *source_datatype, void *result_buffer, int result_count,
                                             ompi_datatype_t *result_datatype, ompi_osc_rdma_peer_t *peer, uint64_t target_address,
                                             mca_btl_base_registration_handle_t *target_handle, int target_count,
                                             ompi_datatype_t *target_datatype, ompi_op_t *op, ompi_osc_rdma_request_t *request)
{
    ompi_osc_rdma_module_t *module = sync->module;
    struct iovec source_iovec[OMPI_OSC_RDMA_DECODE_MAX], target_iovec[OMPI_OSC_RDMA_DECODE_MAX];
    const size_t acc_limit = (mca_osc_rdma_component.buffer_size >> 3);
    uint32_t source_primitive_count, target_primitive_count;
    opal_convertor_t source_convertor, target_convertor;
    uint32_t source_iov_count, target_iov_count;
    uint32_t source_iov_index, target_iov_index;
    ompi_datatype_t *source_primitive, *target_primitive;
    /* needed for opal_convertor_raw but not used */
    size_t source_size, target_size;
    ompi_osc_rdma_request_t *subreq;
    size_t result_position;
    ptrdiff_t lb, extent;
    int ret, acc_len;
    bool done;

    (void) ompi_datatype_get_extent (target_datatype, &lb, &extent);
    target_address += lb;

    /* fast path for accumulate on built-in types */
    if (OPAL_LIKELY((!source_count || ompi_datatype_is_predefined (source_datatype)) &&
                    ompi_datatype_is_predefined (target_datatype) &&
                    (!result_count || ompi_datatype_is_predefined (result_datatype)) &&
                    (target_datatype->super.size * target_count <= acc_limit))) {
        if (NULL == request) {
            OMPI_OSC_RDMA_REQUEST_ALLOC(module, peer, request);
            request->internal = true;
            request->type = result_datatype ? OMPI_OSC_RDMA_TYPE_GET_ACC : OMPI_OSC_RDMA_TYPE_ACC;
        }

        if (source_datatype) {
            (void) ompi_datatype_get_extent (source_datatype, &lb, &extent);
            source_buffer = (void *)((intptr_t) source_buffer + lb);
        }

        if (result_datatype) {
            (void) ompi_datatype_get_extent (result_datatype, &lb, &extent);
            result_buffer = (void *)((intptr_t) result_buffer + lb);
        }

        ret = ompi_osc_rdma_gacc_contig (sync, source_buffer, source_count, source_datatype, result_buffer,
                                         result_count, result_datatype, peer, target_address,
                                         target_handle, target_count, target_datatype, op,
                                         request);
        if (OPAL_LIKELY(OMPI_SUCCESS == ret)) {
            return OMPI_SUCCESS;
        }

        if (source_datatype) {
            /* the convertors will handle the lb */
            (void) ompi_datatype_get_extent (source_datatype, &lb, &extent);
            source_buffer = (void *)((intptr_t) source_buffer - lb);
        }

        if (result_datatype) {
            (void) ompi_datatype_get_extent (result_datatype, &lb, &extent);
            result_buffer = (void *)((intptr_t) result_buffer - lb);
        }
    }

    /* the convertor will handle lb from here */
    (void) ompi_datatype_get_extent (target_datatype, &lb, &extent);
    target_address -= lb;

    /* get the primitive datatype info */
    ret = ompi_osc_base_get_primitive_type_info (target_datatype, &target_primitive, &target_primitive_count);
    if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
        /* target datatype is not made up of a single basic datatype */
        return ret;
    }

    if (source_datatype) {
        ret = ompi_osc_base_get_primitive_type_info (source_datatype, &source_primitive, &source_primitive_count);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
            /* target datatype is not made up of a single basic datatype */
            return ret;
        }

        if (OPAL_UNLIKELY(source_primitive != target_primitive)) {
            return MPI_ERR_TYPE;
        }
    }

    /* prepare convertors for the source and target. these convertors will be used to determine the
     * contiguous segments within the source and target. */
    /* the source may be NULL if using MPI_OP_NO_OP with MPI_Get_accumulate */
    if (source_datatype) {
        OBJ_CONSTRUCT(&source_convertor, opal_convertor_t);
        ret = opal_convertor_copy_and_prepare_for_send (ompi_mpi_local_convertor, &source_datatype->super, source_count, source_buffer,
                                                       0, &source_convertor);
        if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
            return ret;
        }
    }

    /* target_datatype can never be NULL */
    OBJ_CONSTRUCT(&target_convertor, opal_convertor_t);
    ret = opal_convertor_copy_and_prepare_for_send (ompi_mpi_local_convertor, &target_datatype->super, target_count,
                                                    (void *) (intptr_t) target_address, 0, &target_convertor);
    if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
        return ret;
    }

    if (request) {
        /* keep the request from completing until all the transfers have started */
        request->outstanding_requests = 1;
    }

    target_iov_index = 0;
    target_iov_count = 0;
    result_position = 0;

    do {
        /* decode segments of the source data */
        source_iov_count = OMPI_OSC_RDMA_DECODE_MAX;
        source_iov_index = 0;
        /* opal_convertor_raw returns done when it has reached the end of the data */
        if (!source_datatype) {
            done = true;
            source_iovec[0].iov_len = (size_t) -1;
            source_iovec[0].iov_base = NULL;
            source_iov_count = 1;
        } else {
            done = opal_convertor_raw (&source_convertor, source_iovec, &source_iov_count, &source_size);
        }

        /* loop on the target segments until we have exhaused the decoded source data */
        while (source_iov_index != source_iov_count) {
            if (target_iov_index == target_iov_count) {
                /* decode segments of the target buffer */
                target_iov_count = OMPI_OSC_RDMA_DECODE_MAX;
                target_iov_index = 0;
                (void) opal_convertor_raw (&target_convertor, target_iovec, &target_iov_count, &target_size);
            }

            /* we already checked that the target was large enough. this should be impossible */
            assert (0 != target_iov_count);

            /* determine how much to put in this operation */
            acc_len = min(target_iovec[target_iov_index].iov_len, source_iovec[source_iov_index].iov_len);
            acc_len = min((size_t) acc_len, acc_limit);

            /* execute the get */
            OMPI_OSC_RDMA_REQUEST_ALLOC(module, peer, subreq);
            subreq->internal = true;
            subreq->parent_request = request;
            if (request) {
                (void) OPAL_THREAD_ADD32 (&request->outstanding_requests, 1);
            }

            if (result_datatype) {
                /* prepare a convertor for this part of the result */
                opal_convertor_copy_and_prepare_for_recv (ompi_mpi_local_convertor, &result_datatype->super, result_count,
                                                          result_buffer, 0, &subreq->convertor);
                opal_convertor_set_position (&subreq->convertor, &result_position);
                subreq->type = OMPI_OSC_RDMA_TYPE_GET_ACC;
            } else {
                subreq->type = OMPI_OSC_RDMA_TYPE_ACC;
            }

            OPAL_OUTPUT_VERBOSE((60, ompi_osc_base_framework.framework_output,
                                 "target index = %d, target = {%p, %lu}, source_index = %d, source = {%p, %lu}, result = %p, result position = %lu, "
                                 "acc_len = %d, count = %lu",
                                 target_iov_index, target_iovec[target_iov_index].iov_base, (unsigned long) target_iovec[target_iov_index].iov_len,
                                 source_iov_index, source_iovec[source_iov_index].iov_base, (unsigned long) source_iovec[source_iov_index].iov_len,
                                 result_buffer, (unsigned long) result_position, acc_len, (unsigned long)(acc_len / target_primitive->super.size)));


            ret = ompi_osc_rdma_gacc_contig (sync, source_iovec[source_iov_index].iov_base, acc_len / target_primitive->super.size,
                                             target_primitive, NULL, 0, NULL, peer, (uint64_t) (intptr_t) target_iovec[target_iov_index].iov_base,
                                             target_handle, acc_len / target_primitive->super.size, target_primitive, op, subreq);
            if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
                if (OPAL_UNLIKELY(OMPI_ERR_OUT_OF_RESOURCE != ret)) {
                    /* something bad happened. need to figure out how to handle these errors */
                    return ret;
                }

                /* progress and try again */
                ompi_osc_rdma_progress (module);
                continue;
            }

            /* adjust io vectors */
            target_iovec[target_iov_index].iov_len -= acc_len;
            source_iovec[source_iov_index].iov_len -= acc_len;
            target_iovec[target_iov_index].iov_base = (void *)((intptr_t) target_iovec[target_iov_index].iov_base + acc_len);
            source_iovec[source_iov_index].iov_base = (void *)((intptr_t) source_iovec[source_iov_index].iov_base + acc_len);
            result_position += acc_len;

            source_iov_index += !source_datatype || (0 == source_iovec[source_iov_index].iov_len);
            target_iov_index += (0 == target_iovec[target_iov_index].iov_len);
        }
    } while (!done);

    if (request) {
        /* release our reference so the request can complete */
        (void) OPAL_THREAD_ADD32 (&request->outstanding_requests, -1);
    }

    if (source_datatype) {
        opal_convertor_cleanup (&source_convertor);
        OBJ_DESTRUCT(&source_convertor);
    }

    opal_convertor_cleanup (&target_convertor);
    OBJ_DESTRUCT(&target_convertor);

    return OMPI_SUCCESS;
}
Exemplo n.º 6
0
/**
 * Shared memory broadcast.
 *
 * For the root, the general algorithm is to wait for a set of
 * segments to become available.  Once it is, the root claims the set
 * by writing the current operation number and the number of processes
 * using the set to the flag.  The root then loops over the set of
 * segments; for each segment, it copies a fragment of the user's
 * buffer into the shared data segment and then writes the data size
 * into its childrens' control buffers.  The process is repeated until
 * all fragments have been written.
 *
 * For non-roots, for each set of buffers, they wait until the current
 * operation number appears in the in-use flag (i.e., written by the
 * root).  Then for each segment, they wait for a nonzero to appear
 * into their control buffers.  If they have children, they copy the
 * data from their parent's shared data segment into their shared data
 * segment, and write the data size into each of their childrens'
 * control buffers.  They then copy the data from their shared [local]
 * data segment into the user's output buffer.  The process is
 * repeated until all fragments have been received.  If they do not
 * have children, they copy the data directly from the parent's shared
 * data segment into the user's output buffer.
 */
int mca_coll_sm_bcast_intra(void *buff, int count,
                            struct ompi_datatype_t *datatype, int root,
                            struct ompi_communicator_t *comm,
                            mca_coll_base_module_t *module)
{
    struct iovec iov;
    mca_coll_sm_module_t *sm_module = (mca_coll_sm_module_t*) module;
    mca_coll_sm_comm_t *data;
    int i, ret, rank, size, num_children, src_rank;
    int flag_num, segment_num, max_segment_num;
    int parent_rank;
    size_t total_size, max_data, bytes;
    mca_coll_sm_in_use_flag_t *flag;
    opal_convertor_t convertor;
    mca_coll_sm_tree_node_t *me, *parent, **children;
    mca_coll_sm_data_index_t *index;

    /* Lazily enable the module the first time we invoke a collective
       on it */
    if (!sm_module->enabled) {
        if (OMPI_SUCCESS != (ret = ompi_coll_sm_lazy_enable(module, comm))) {
            return ret;
        }
    }
    data = sm_module->sm_comm_data;

    /* Setup some identities */

    rank = ompi_comm_rank(comm);
    size = ompi_comm_size(comm);

    OBJ_CONSTRUCT(&convertor, opal_convertor_t);
    iov.iov_len = mca_coll_sm_component.sm_fragment_size;
    bytes = 0;

    me = &data->mcb_tree[(rank + size - root) % size];
    parent = me->mcstn_parent;
    children = me->mcstn_children;
    num_children = me->mcstn_num_children;

    /* Only have one top-level decision as to whether I'm the root or
       not.  Do this at the slight expense of repeating a little logic
       -- but it's better than a conditional branch in every loop
       iteration. */

    /*********************************************************************
     * Root
     *********************************************************************/

    if (root == rank) {

        /* The root needs a send convertor to pack from the user's
           buffer to shared memory */

        if (OMPI_SUCCESS !=
            (ret =
             opal_convertor_copy_and_prepare_for_send(ompi_mpi_local_convertor,
                                                      &(datatype->super),
                                                      count,
                                                      buff,
                                                      0,
                                                      &convertor))) {
            return ret;
        }
        opal_convertor_get_packed_size(&convertor, &total_size);

        /* Main loop over sending fragments */

        do {
            flag_num = (data->mcb_operation_count++ %
                        mca_coll_sm_component.sm_comm_num_in_use_flags);

            FLAG_SETUP(flag_num, flag, data);
            FLAG_WAIT_FOR_IDLE(flag, bcast_root_label);
            FLAG_RETAIN(flag, size - 1, data->mcb_operation_count - 1);

            /* Loop over all the segments in this set */

            segment_num =
                flag_num * mca_coll_sm_component.sm_segs_per_inuse_flag;
            max_segment_num =
                (flag_num + 1) * mca_coll_sm_component.sm_segs_per_inuse_flag;
            do {
                index = &(data->mcb_data_index[segment_num]);

                /* Copy the fragment from the user buffer to my fragment
                   in the current segment */
                max_data = mca_coll_sm_component.sm_fragment_size;
                COPY_FRAGMENT_IN(convertor, index, rank, iov, max_data);
                bytes += max_data;

                /* Wait for the write to absolutely complete */
                opal_atomic_wmb();

                /* Tell my children that this fragment is ready */
                PARENT_NOTIFY_CHILDREN(children, num_children, index,
                                       max_data);

                ++segment_num;
            } while (bytes < total_size && segment_num < max_segment_num);
        } while (bytes < total_size);
    }

    /*********************************************************************
     * Non-root
     *********************************************************************/

    else {

        /* Non-root processes need a receive convertor to unpack from
           shared mmory to the user's buffer */

        if (OMPI_SUCCESS !=
            (ret =
             opal_convertor_copy_and_prepare_for_recv(ompi_mpi_local_convertor,
                                                      &(datatype->super),
                                                      count,
                                                      buff,
                                                      0,
                                                      &convertor))) {
            return ret;
        }
        opal_convertor_get_packed_size(&convertor, &total_size);

        /* Loop over receiving (and possibly re-sending) the
           fragments */

        do {
            flag_num = (data->mcb_operation_count %
                        mca_coll_sm_component.sm_comm_num_in_use_flags);

            /* Wait for the root to mark this set of segments as
               ours */
            FLAG_SETUP(flag_num, flag, data);
            FLAG_WAIT_FOR_OP(flag, data->mcb_operation_count, bcast_nonroot_label1);
            ++data->mcb_operation_count;

            /* Loop over all the segments in this set */

            segment_num =
                flag_num * mca_coll_sm_component.sm_segs_per_inuse_flag;
            max_segment_num =
                (flag_num + 1) * mca_coll_sm_component.sm_segs_per_inuse_flag;
            do {

                /* Pre-calculate some values */
                parent_rank = (parent->mcstn_id + root) % size;
                index = &(data->mcb_data_index[segment_num]);

                /* Wait for my parent to tell me that the segment is ready */
                CHILD_WAIT_FOR_NOTIFY(rank, index, max_data, bcast_nonroot_label2);

                /* If I have children, send the data to them */
                if (num_children > 0) {
                    /* Copy the fragment from the parent's portion in
                       the segment to my portion in the segment. */
                    COPY_FRAGMENT_BETWEEN(parent_rank, rank, index, max_data);

                    /* Wait for the write to absolutely complete */
                    opal_atomic_wmb();

                    /* Tell my children that this fragment is ready */
                    PARENT_NOTIFY_CHILDREN(children, num_children, index,
                                           max_data);

                    /* Set the "copy from buffer" to be my local
                       segment buffer so that we don't potentially
                       incur a non-local memory copy from the parent's
                       fan out data segment [again] when copying to
                       the user's buffer */
                    src_rank = rank;
                }

                /* If I don't have any children, set the "copy from
                   buffer" to be my parent's fan out segment to copy
                   directly from my parent */

                else {
                    src_rank = parent_rank;
                }

                /* Copy to my output buffer */
                COPY_FRAGMENT_OUT(convertor, src_rank, index, iov, max_data);

                bytes += max_data;
                ++segment_num;
            } while (bytes < total_size && segment_num < max_segment_num);

            /* Wait for all copy-out writes to complete before I say
               I'm done with the segments */
            opal_atomic_wmb();

            /* We're finished with this set of segments */
            FLAG_RELEASE(flag);
        } while (bytes < total_size);
    }

    /* Kill the convertor */

    OBJ_DESTRUCT(&convertor);

    /* All done */

    return OMPI_SUCCESS;
}