コード例 #1
0
ファイル: spml_ikrit.c プロジェクト: XuanWang1982/ompi
static int mca_spml_ikrit_get_helper(mxm_send_req_t *sreq,
                                     void *src_addr,
                                     size_t size,
                                     void *dst_addr,
                                     int src)
{
    /* shmem spec states that get() operations are blocking. So it is enough
     to have single mxm request. Also we count on mxm doing copy */
    void *rva;
    sshmem_mkey_t *r_mkey;
    int ptl_id;

    ptl_id = get_ptl_id(src);
    /* already tried to send via shm and failed. go via rdma */
    if (ptl_id == MXM_PTL_SHM)
        ptl_id = MXM_PTL_RDMA;

    /**
     * Get the address to the remote rkey.
     **/
    r_mkey = mca_memheap.memheap_get_cached_mkey(src,
                                                 src_addr,
                                                 ptl_id,
                                                 &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    SPML_VERBOSE(100,
                 "get: pe:%d ptl=%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, ptl_id, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));

    /* mxm does not really cares for get lkey */
    sreq->base.mq = mca_spml_ikrit.mxm_mq;
    sreq->base.conn = mca_spml_ikrit.mxm_peers[src]->mxm_conn;
    sreq->base.data_type = MXM_REQ_DATA_BUFFER;
    sreq->base.data.buffer.ptr = dst_addr;
    sreq->base.data.buffer.length = size;
#if MXM_API < MXM_VERSION(2,0)
    sreq->base.data.buffer.memh = NULL;
    sreq->op.mem.remote_memh = NULL;
#else
    sreq->op.mem.remote_mkey = to_mxm_mkey(r_mkey);
#endif
    sreq->opcode = MXM_REQ_OP_GET;
    sreq->op.mem.remote_vaddr = (intptr_t) rva;
    sreq->base.state = MXM_REQ_NEW;

    return OSHMEM_SUCCESS;
}
コード例 #2
0
ファイル: spml_ucx.c プロジェクト: 00datman/ompi
int mca_spml_ucx_fence(void)
{
    ucs_status_t err;

    err = ucp_worker_flush(mca_spml_ucx.ucp_worker);
    if (UCS_OK != err) {
        SPML_ERROR("fence failed");
         oshmem_shmem_abort(-1);
         return OSHMEM_ERROR;
    }
    return OSHMEM_SUCCESS;
}
コード例 #3
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
int mca_spml_ucx_quiet(shmem_ctx_t ctx)
{
    int ret;
    mca_spml_ucx_ctx_t *ucx_ctx = (mca_spml_ucx_ctx_t *)ctx;

    ret = opal_common_ucx_worker_flush(ucx_ctx->ucp_worker);
    if (OMPI_SUCCESS != ret) {
         oshmem_shmem_abort(-1);
         return ret;
    }
    return OSHMEM_SUCCESS;
}
コード例 #4
0
/* extension. used 4 fence implementation b4 fence was added to mxm */
int mca_spml_ikrit_get_async(void *src_addr,
                             size_t size,
                             void *dst_addr,
                             int src)
{
    mca_spml_ikrit_get_request_t *get_req;

    if (OSHMEM_SUCCESS == mca_spml_ikrit_get_shm(src_addr, size, dst_addr, src))
        return OSHMEM_SUCCESS;

    get_req = alloc_get_req();
    if (NULL == get_req) {
        SPML_ERROR("out of get requests - aborting");
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    if (OSHMEM_SUCCESS
            != mca_spml_ikrit_get_helper(&get_req->mxm_req,
                                         src_addr,
                                         size,
                                         dst_addr,
                                         src)) {
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

#if MXM_API < MXM_VERSION(2,0)
    get_req->mxm_req.base.flags = 0;
#else
    get_req->mxm_req.flags = 0;
#endif
    get_req->mxm_req.base.completed_cb = get_completion_cb;
    get_req->mxm_req.base.context = get_req;
    OPAL_THREAD_ADD32(&mca_spml_ikrit.n_active_gets, 1);

    SPML_IKRIT_MXM_POST_SEND(get_req->mxm_req);

    return OSHMEM_SUCCESS;
}
コード例 #5
0
ファイル: spml_ikrit.c プロジェクト: XuanWang1982/ompi
int mca_spml_ikrit_get(void *src_addr, size_t size, void *dst_addr, int src)
{
    mxm_send_req_t sreq;

    if (0 >= size) {
        return OSHMEM_SUCCESS;
    }

    if (OSHMEM_SUCCESS == mca_spml_ikrit_get_shm(src_addr, size, dst_addr, src))
        return OSHMEM_SUCCESS;

    if (OSHMEM_SUCCESS
            != mca_spml_ikrit_get_helper(&sreq,
                                         src_addr,
                                         size,
                                         dst_addr,
                                         src)) {
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

#if MXM_API < MXM_VERSION(2,0)
    sreq.base.flags = MXM_REQ_FLAG_BLOCKING;
#else
    sreq.flags = MXM_REQ_SEND_FLAG_BLOCKING;
#endif
    sreq.base.completed_cb = NULL;

    mxm_req_send(&sreq);
    opal_progress();
    mca_spml_irkit_req_wait(&sreq.base);

    if (MXM_OK != sreq.base.error) {
        SPML_ERROR("get request failed: %s - aborting",
                   mxm_error_string(sreq.base.error));
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }
    return OSHMEM_SUCCESS;
}
コード例 #6
0
static inline int get_ptl_id(int dst)
{
    ompi_proc_t *proc;

    /* get endpoint and btl */
    proc = oshmem_proc_group_all(dst);
    if (!proc) {
        SPML_ERROR("Can not find destination proc for pe=%d", dst);
        oshmem_shmem_abort(-1);
        return -1;
    }
    return OSHMEM_PROC_DATA(proc)->transport_ids[0];
}
コード例 #7
0
ファイル: spml_ucx.c プロジェクト: jjhursey/ompi
spml_ucx_mkey_t * mca_spml_ucx_get_mkey_slow(int pe, void *va, void **rva)
{
    sshmem_mkey_t *r_mkey;

    r_mkey = mca_memheap_base_get_cached_mkey(pe, va, 0, rva);
    if (OPAL_UNLIKELY(!r_mkey)) {
        SPML_ERROR("pe=%d: %p is not address of symmetric variable",
                   pe, va);
        oshmem_shmem_abort(-1);
        return NULL;
    }
    return (spml_ucx_mkey_t *)(r_mkey->spml_context);
}
コード例 #8
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
int mca_spml_ucx_fence(shmem_ctx_t ctx)
{
    ucs_status_t err;
    mca_spml_ucx_ctx_t *ucx_ctx = (mca_spml_ucx_ctx_t *)ctx;

    err = ucp_worker_fence(ucx_ctx->ucp_worker);
    if (UCS_OK != err) {
         SPML_UCX_ERROR("fence failed: %s", ucs_status_string(err));
         oshmem_shmem_abort(-1);
         return OSHMEM_ERROR;
    }
    return OSHMEM_SUCCESS;
}
コード例 #9
0
int mca_spml_ikrit_put_nb(void* dst_addr,
                          size_t size,
                          void* src_addr,
                          int dst,
                          void **handle)
{
    int err;
    err = mca_spml_ikrit_put_internal(dst_addr, size, src_addr, dst, handle, 1);
    if (OSHMEM_SUCCESS != err) {
        SPML_ERROR("put failed - aborting");
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }
    return OSHMEM_SUCCESS;
}
コード例 #10
0
ファイル: memheap_base_mkey.c プロジェクト: anhzhang/ompi
static void unpack_remote_mkeys(opal_buffer_t *msg, int remote_pe)
{
    int32_t cnt;
    int32_t n;
    int32_t tr_id;
    int i;
    oshmem_proc_t *proc;

    proc = oshmem_proc_group_find(oshmem_group_all, remote_pe);
    cnt = 1;
    opal_dss.unpack(msg, &n, &cnt, OPAL_UINT32);
    for (i = 0; i < n; i++) {
        cnt = 1;
        opal_dss.unpack(msg, &tr_id, &cnt, OPAL_UINT32);
        cnt = 1;
        opal_dss.unpack(msg,
                        &memheap_oob.mkeys[tr_id].va_base,
                        &cnt,
                        OPAL_UINT64);

        if (0 == memheap_oob.mkeys[tr_id].va_base) {
            cnt = 1;
            opal_dss.unpack(msg, &memheap_oob.mkeys[tr_id].u.key, &cnt, OPAL_UINT64);
            if (OPAL_PROC_ON_LOCAL_NODE(proc->super.proc_flags)) {
                memheap_attach_segment(&memheap_oob.mkeys[tr_id], tr_id);
            }
        } else {
            cnt = 1;
            opal_dss.unpack(msg, &memheap_oob.mkeys[tr_id].len, &cnt, OPAL_UINT16);
            if (0 < memheap_oob.mkeys[tr_id].len) {
                memheap_oob.mkeys[tr_id].u.data = malloc(memheap_oob.mkeys[tr_id].len);
                if (NULL == memheap_oob.mkeys[tr_id].u.data) {
                    MEMHEAP_ERROR("Failed allocate %d bytes", memheap_oob.mkeys[tr_id].len);
                    oshmem_shmem_abort(-1);
                }
                cnt = memheap_oob.mkeys[tr_id].len;
                opal_dss.unpack(msg, memheap_oob.mkeys[tr_id].u.data, &cnt, OPAL_BYTE);
                MCA_SPML_CALL(rmkey_unpack(&memheap_oob.mkeys[tr_id], remote_pe));
            } else {
                memheap_oob.mkeys[tr_id].u.key = MAP_SEGMENT_SHM_INVALID;
            }
        }

        MEMHEAP_VERBOSE(5,
                        "tr_id: %d %s",
                        tr_id, mca_spml_base_mkey2str(&memheap_oob.mkeys[tr_id]));
    }
}
コード例 #11
0
ファイル: scoll_mpi_module.c プロジェクト: 00datman/ompi
/*
 * Initialize module on the communicator
 */
static int mca_scoll_mpi_module_enable(mca_scoll_base_module_t *module,
                                        oshmem_group_t *osh_group)
{

    if (OSHMEM_SUCCESS != mca_scoll_mpi_save_coll_handlers(module, osh_group)){
        MPI_COLL_ERROR("MPI module enable failed - aborting to prevent inconsistent application state");
        /* There's no modules available */
        opal_show_help("help-oshmem-scoll-mpi.txt",
                       "module_enable:fatal", true,
		       		   "MPI module enable failed - aborting to prevent inconsistent application state");

        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    return OSHMEM_SUCCESS;
}
コード例 #12
0
ファイル: scoll_fca_module.c プロジェクト: Benguang/ompi
/*
 *  * Initialize module on the communicator
 *   */
static int mca_scoll_fca_module_enable(mca_scoll_base_module_t *module,
                                       struct oshmem_group_t *comm)
{

    mca_scoll_fca_module_t *fca_module = (mca_scoll_fca_module_t*) module;
    int rc;

    fca_module->comm = comm;
    fca_module->rank = comm->my_pe;

    rc = mca_scoll_fca_get_fca_lib(comm);
    if (rc != OSHMEM_SUCCESS)
        goto exit_fatal;

    rc = _save_coll_handlers(fca_module);
    if (rc != OSHMEM_SUCCESS)
        goto exit_fatal;

    rc = _get_local_ranks(fca_module);
    if (rc != OSHMEM_SUCCESS)
        goto exit_fatal;

    rc = _create_fca_comm(fca_module);
    if (rc != OSHMEM_SUCCESS)
        goto exit_fatal;

    FCA_MODULE_VERBOSE(fca_module, 1, "FCA Module initialized");
    return OMPI_SUCCESS;

    exit_fatal:
    /* it is possible that other pe(s) succesfully enabled fca.
     * So differnt frameworks will be used for collective ops
     */
    FCA_ERROR("FCA module enable failed - aborting to prevent inconsistent application state");
    /* There's no modules available */
    opal_show_help("help-oshmem-scoll-fca.txt",
                   "module_enable:fatal", true,
		   "FCA module enable failed - aborting to prevent inconsistent application state");
    oshmem_shmem_abort(-1);
    return OMPI_ERROR;
}
コード例 #13
0
ファイル: spml_ucx.c プロジェクト: jjhursey/ompi
void mca_spml_ucx_rmkey_unpack(sshmem_mkey_t *mkey, uint32_t segno, int pe, int tr_id)
{
    spml_ucx_mkey_t   *ucx_mkey;
    ucs_status_t err;
    
    ucx_mkey = &mca_spml_ucx.ucp_peers[pe].mkeys[segno].key;

    err = ucp_ep_rkey_unpack(mca_spml_ucx.ucp_peers[pe].ucp_conn,
            mkey->u.data, 
            &ucx_mkey->rkey); 
    if (UCS_OK != err) {
        SPML_ERROR("failed to unpack rkey");
        goto error_fatal;
    }

    mkey->spml_context = ucx_mkey;
    mca_spml_ucx_cache_mkey(mkey, segno, pe);
    return;

error_fatal:
    oshmem_shmem_abort(-1);
    return;
}
コード例 #14
0
ファイル: shmem_init.c プロジェクト: gpaulsen/ompi
static inline void _shmem_init(int required, int *provided)
{
    int err = OSHMEM_SUCCESS;

    if (oshmem_shmem_initialized) {
        /*
         * SPEC: If start_pes() is called multiple times, subsequent calls have no effect.
         */
        return;
    }

    err = oshmem_shmem_init(0, NULL, required, provided);
    if (OSHMEM_SUCCESS != err) {
        /* since spec does not propagete error to user we can only abort */
        SHMEM_API_ERROR("SHMEM failed to initialize - aborting");
        oshmem_shmem_abort(-1);
    }

    OPAL_CR_INIT_LIBRARY();
#if HAVE_ON_EXIT
    on_exit(shmem_onexit, NULL);
#endif
}
コード例 #15
0
ファイル: spml_ikrit.c プロジェクト: XuanWang1982/ompi
static inline int mca_spml_ikrit_get_shm(void *src_addr,
                                         size_t size,
                                         void *dst_addr,
                                         int src)
{
    int ptl_id;
    void *rva;
    sshmem_mkey_t *r_mkey;

    ptl_id = get_ptl_id(src);
    /**
     * Get the address to the remote rkey.
     **/
    if (ptl_id != MXM_PTL_SHM)
        return OSHMEM_ERROR;

    r_mkey = mca_memheap.memheap_get_cached_mkey(src,
                                                 src_addr,
                                                 ptl_id,
                                                 &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    if (!mca_memheap_base_can_local_copy(r_mkey, src_addr))
        return OSHMEM_ERROR;

    SPML_VERBOSE(100,
                 "shm get: pe:%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
    memcpy(dst_addr, (void *) (unsigned long) rva, size);
    opal_progress();
    return OSHMEM_SUCCESS;
}
コード例 #16
0
ファイル: spml_yoda_putreq.c プロジェクト: Greatrandom/ompi
void mca_spml_yoda_put_completion(mca_btl_base_module_t* btl,
                                  struct mca_btl_base_endpoint_t* ep,
                                  struct mca_btl_base_descriptor_t* des,
                                  int status)
{
    mca_spml_yoda_rdma_frag_t* frag =
            (mca_spml_yoda_rdma_frag_t*) des->des_cbdata;
    mca_spml_yoda_put_request_t* putreq =
            (mca_spml_yoda_put_request_t*) frag->rdma_req;
    mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*) des->des_context;

    OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_puts, -1);
    /* check completion status */
    if (OPAL_UNLIKELY(OSHMEM_SUCCESS != status)) {
        /* no way to propagete errors. die */
        SPML_ERROR("FATAL put completion error");
        oshmem_shmem_abort(-1);
    }

    putreq->req_put.req_base.req_spml_complete = true;
    oshmem_request_complete(&putreq->req_put.req_base.req_oshmem, 1);
    oshmem_request_free((oshmem_request_t**) &putreq);
    mca_bml_base_free(bml_btl, des);
}
コード例 #17
0
ファイル: proc_group_cache.c プロジェクト: ORNL/ompi
int cache_group(oshmem_group_t *group,
                int PE_start,
                int logPE_stride,
                int PE_size)
{
    oshmem_group_cache_t *cached_group = NULL;
    cached_group = OBJ_NEW(oshmem_group_cache_t);
#if OPAL_ENABLE_DEBUG
    cached_group->item.opal_list_item_belong_to = NULL;
    cached_group->item.opal_list_item_refcount = 0;
#endif
    cached_group->group = group;
    cached_group->cache_id[0] = PE_start;
    cached_group->cache_id[1] = logPE_stride;
    cached_group->cache_id[2] = PE_size;
    if (opal_list_get_size(&oshmem_group_cache_list)
            < oshmem_group_cache_size) {
        opal_list_append(&oshmem_group_cache_list,
                         (opal_list_item_t *)cached_group);
    } else {
#if ABORT_ON_CACHE_OVERFLOW
        opal_output(0,
                    "error: group cache overflow on rank %i: cache_size = %u: try encreasing oshmem_group_cache_size mca parameter",
                    group->my_pe,
                    oshmem_group_cache_size);
        oshmem_shmem_abort(-1);
#else
        /*This part of code makes FIFO group cache management. Define ABORT_ON_CACHE_OVERFLOW as 0 to enable this.*/
        oshmem_group_cache_t *cached_group_to_remove = (oshmem_group_cache_t *)opal_list_remove_first(&oshmem_group_cache_list);
        oshmem_proc_group_destroy(cached_group_to_remove->group);
        OBJ_RELEASE(cached_group_to_remove);
        opal_list_append(&oshmem_group_cache_list,(opal_list_item_t *)cached_group);
#endif
    }
    return OSHMEM_SUCCESS;
}
コード例 #18
0
ファイル: memheap_base_mkey.c プロジェクト: anhzhang/ompi
static void memheap_attach_segment(sshmem_mkey_t *mkey, int tr_id)
{
    /* process special case when va was got using sshmem
     * this case is notable for:
     * - key is set as (seg_id);
     * - va_base is set as 0;
     * - len is set as 0;
     */
    assert(mkey->va_base == 0);
    assert(mkey->len == 0);

    MEMHEAP_VERBOSE(5,
            "shared memory usage tr_id: %d va_base: 0x%p len: %d key %llx",
            tr_id,
            mkey->va_base, mkey->len, (unsigned long long)mkey->u.key);

    mca_sshmem_segment_attach(&(memheap_map->mem_segs[HEAP_SEG_INDEX]), mkey);

    if ((void *) -1 == (void *) mkey->va_base) {
        MEMHEAP_ERROR("tr_id: %d key %llx attach failed: errno = %d",
                tr_id, (unsigned long long)mkey->u.key, errno);
        oshmem_shmem_abort(-1);
    }
}
コード例 #19
0
ファイル: spml_yoda.c プロジェクト: 00datman/ompi
static inline int mca_spml_yoda_put_internal(void *dst_addr,
                                             size_t size,
                                             void *src_addr,
                                             int dst,
                                             int is_nb)
{
    int rc = OSHMEM_SUCCESS;
    mca_spml_yoda_put_request_t *putreq = NULL;
    mca_bml_base_btl_t* bml_btl;
    mca_btl_base_descriptor_t* des = NULL;
    mca_btl_base_segment_t* segment;
    mca_spml_yoda_rdma_frag_t* frag;
    int nfrags;
    int i;
    unsigned ncopied = 0;
    unsigned int frag_size = 0;
    char *p_src, *p_dst;
    void* rva;
    sshmem_mkey_t *r_mkey;
    int btl_id = 0;
    struct yoda_btl *ybtl;
    int put_via_send;
    mca_btl_base_registration_handle_t *local_handle = NULL, *remote_handle = NULL;

    /* If nothing to put its OK.*/
    if (0 >= size) {
        return OSHMEM_SUCCESS;
    }

    /* Find bml_btl and its global btl_id */
    bml_btl = get_next_btl(dst, &btl_id);
    if (!bml_btl) {
        SPML_ERROR("cannot reach %d pe: no appropriate btl found", oshmem_my_proc_id());
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }
    /* Check if btl has PUT method. If it doesn't - use SEND*/
    put_via_send = !(bml_btl->btl->btl_flags & MCA_BTL_FLAGS_PUT);

    /* Get rkey of remote PE (dst proc) which must be on memheap*/
    r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, btl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   dst, dst_addr);
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }

#if SPML_YODA_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
                 dst, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    ybtl = &mca_spml_yoda.btl_type_map[btl_id];

    if (ybtl->btl->btl_register_mem) {
        assert (r_mkey->len == ybtl->btl->btl_registration_handle_size);
        remote_handle = (mca_btl_base_registration_handle_t *) r_mkey->u.data;
    }

    /* check if we doing put into shm attached segment and if so
     * just do memcpy
     */
    if ((YODA_BTL_SM == ybtl->btl_type || YODA_BTL_VADER == ybtl->btl_type)
            && mca_memheap_base_can_local_copy(r_mkey, dst_addr)) {
        memcpy((void *) (unsigned long) rva, src_addr, size);
        return OSHMEM_SUCCESS;
    }

    /* We support only blocking PUT now => we always need copy for src buffer*/
    calc_nfrags_put (bml_btl, size, &frag_size, &nfrags, put_via_send);

    p_src = (char*) src_addr;
    p_dst = (char*) (unsigned long) rva;
    for (i = 0; i < nfrags; i++) {
        /* Allocating send request from free list */
        putreq = mca_spml_yoda_putreq_alloc(dst);
        frag = &putreq->put_frag;
        ncopied = i < nfrags - 1 ? frag_size :(unsigned) ((char *) src_addr + size - p_src);

        /* Preparing source buffer */

        /* allocate buffer */
        mca_spml_yoda_bml_alloc(bml_btl,
                                &des,
                                MCA_BTL_NO_ORDER,
                                ncopied,
                                MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                                put_via_send);

        if (OPAL_UNLIKELY(!des || !des->des_segments )) {
            SPML_ERROR("src=%p nfrags = %d frag_size=%d",
                       src_addr, nfrags, frag_size);
            SPML_ERROR("shmem OOM error need %d bytes", ncopied);
            opal_show_help("help-oshmem-spml-yoda.txt",
                           "internal_oom_error",
                           true,
                           "Put", ncopied, mca_spml_yoda.bml_alloc_threshold);
            rc = OSHMEM_ERR_FATAL;
            goto exit_fatal;
        }

        /* copy data to allocated buffer*/
        segment = des->des_segments;
        spml_yoda_prepare_for_put((void*)segment->seg_addr.pval, ncopied,
                                  (void*)p_src, (void*)p_dst, put_via_send);

        if (!put_via_send && ybtl->btl->btl_register_mem) {
            local_handle = ybtl->btl->btl_register_mem (ybtl->btl, bml_btl->btl_endpoint,
                                                        segment->seg_addr.pval, ncopied, 0);
            if (NULL == local_handle) {
                /* No free resources, Block on completion here */
                SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
                oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
            }
        }

        frag->rdma_segs[0].base_seg.seg_addr.lval = (uintptr_t) p_dst;
        frag->rdma_segs[0].base_seg.seg_len = (put_via_send ?
                                                   ncopied + SPML_YODA_SEND_CONTEXT_SIZE :
                                                   ncopied);
        frag->rdma_req = putreq;

        /* initialize callback data for put*/
        des->des_cbdata = frag;
        des->des_cbfunc = mca_spml_yoda_put_completion;

        OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_puts, 1);
        /* put the data to remote side */
        if (!put_via_send) {
            rc = mca_bml_base_put (bml_btl, segment->seg_addr.pval, (uint64_t) (intptr_t) p_dst,
                                   local_handle, remote_handle, ncopied, 0, 0, mca_spml_yoda_put_completion_rdma,
                                   des);
        } else {
            rc = mca_bml_base_send(bml_btl, des, MCA_SPML_YODA_PUT);
            if (1 == rc)
                rc = OSHMEM_SUCCESS;
        }

        if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
            if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
                /* No free resources, Block on completion here */
                SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
                oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
            } else {
                SPML_ERROR("shmem error");
            }
            /* exit with errro */
            SPML_ERROR("shmem error: ret = %i, send_pe = %i, dest_pe = %i",
                       rc, oshmem_my_proc_id(), dst);
            rc = OSHMEM_ERR_FATAL;
            goto exit_fatal;
        }
        p_src += ncopied;
        p_dst += ncopied;
    }

    return rc;

exit_fatal:
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
    return rc;
}
コード例 #20
0
ファイル: spml_yoda.c プロジェクト: 00datman/ompi
static void mca_yoda_get_callback(mca_btl_base_module_t* btl,
                                  mca_btl_base_tag_t tag,
                                  mca_btl_base_descriptor_t* des,
                                  void* cbdata )
{
    void** p, ** p_src, **p_dst;
    size_t* size;
    int* dst;
    void** p_getreq;
    mca_btl_base_descriptor_t* des_loc;
    int rc;
    mca_bml_base_btl_t* bml_btl;
    mca_spml_yoda_rdma_frag_t* frag;
    int btl_id;
    mca_spml_yoda_put_request_t *putreq;

    rc = OSHMEM_SUCCESS;
    btl_id = 0;
    putreq = NULL;

    /* Unpack data */
    p = (void **)des->des_segments->seg_addr.pval;
    p_src = (void*) p;

    size = (size_t*)((char*)p_src + sizeof(*p_src) );
    dst = (int*)( (char*)size + sizeof(*size));
    p_dst = (void*) ((char*)dst + sizeof(*dst));
    p_getreq =(void**) ( (char*)p_dst + sizeof(*p_dst));

    /* Prepare put via send*/
    bml_btl = get_next_btl(*dst, &btl_id);

    putreq = mca_spml_yoda_putreq_alloc(*dst);
    frag = &putreq->put_frag;

    mca_spml_yoda_bml_alloc(bml_btl,
                            &des_loc,
                            MCA_BTL_NO_ORDER,
                            *size,
                            MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                            1);

    if (OPAL_UNLIKELY(!des_loc || !des_loc->des_segments)) {
        SPML_ERROR("shmem OOM error need %d bytes", (int)*size);
        oshmem_shmem_abort(-1);
    }
    spml_yoda_prepare_for_get_response((void*)des_loc->des_segments->seg_addr.pval, *size, (void*)*p_src, (void*) *p_dst,(void*)*p_getreq,1);

    frag->rdma_req = putreq;

    /* Initialize callback data for put*/
    des_loc->des_cbdata = frag;
    des_loc->des_cbfunc = mca_spml_yoda_put_completion;
    des_loc->des_segment_count = 1;

    OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_puts, 1);

    /* Put via send*/
    rc = mca_bml_base_send(bml_btl, des_loc, MCA_SPML_YODA_GET_RESPONSE);
    if (1 == rc) {
        rc = OSHMEM_SUCCESS;
    }

    if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
        if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
            /* No free resources, Block on completion here */
            SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
            oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
        } else {
            SPML_ERROR("shmem error");
        }
        /* exit with errro */
        SPML_ERROR("shmem error: ret = %i, send_pe = %i, dest_pe = %i",
                   rc, oshmem_my_proc_id(), *dst);
        oshmem_shmem_abort(-1);
        rc = OSHMEM_ERROR;
    }
}
コード例 #21
0
ファイル: spml_yoda.c プロジェクト: 00datman/ompi
/**
 * shmem_get reads data from a remote address
 * in the symmetric heap via RDMA READ.
 * Get operation:
 * 1. Get the rkey to the remote address.
 * 2. Allocate a get request.
 * 3. Allocated a temporary pre-registered buffer
 *    to copy the data to.
 * 4. Init the request descriptor with remote side
 *    data and local side data.
 * 5. Read the remote buffer to a pre-registered
 *    buffer on the local PE using RDMA READ.
 * 6. Copy the received data to dst_addr if an
 *    intermediate pre-register buffer was used.
 * 7. Clear the request and return.
 *
 * src_addr - address on remote pe.
 * size - the amount on bytes to be read.
 * dst_addr - address on the local pe.
 * src - the pe of remote process.
 */
int mca_spml_yoda_get(void* src_addr, size_t size, void* dst_addr, int src)
{
    int rc = OSHMEM_SUCCESS;
    sshmem_mkey_t *r_mkey, *l_mkey;
    void* rva;
    unsigned ncopied = 0;
    unsigned int frag_size = 0;
    char *p_src, *p_dst;
    int i;
    int nfrags;
    mca_bml_base_btl_t* bml_btl = NULL;
    mca_btl_base_segment_t* segment;
    mca_btl_base_descriptor_t* des = NULL;
    mca_spml_yoda_rdma_frag_t* frag = NULL;
    struct mca_spml_yoda_getreq_parent get_holder;
    struct yoda_btl *ybtl;
    int btl_id = 0;
    int get_via_send;
    mca_btl_base_registration_handle_t *local_handle, *remote_handle = NULL;
    mca_spml_yoda_get_request_t* getreq = NULL;

    /*If nothing to get its OK.*/
    if (0 >= size) {
        return rc;
    }

    /* Find bml_btl and its global btl_id */
    bml_btl = get_next_btl(src, &btl_id);
    if (!bml_btl) {
        SPML_ERROR("cannot reach %d pe: no appropriate btl found", oshmem_my_proc_id());
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }
    /* Check if btl has GET method. If it doesn't - use SEND*/
    get_via_send = ! ( (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_GET)) &&
                       (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_PUT)) );

    /* Get rkey of remote PE (src proc) which must be on memheap*/
    r_mkey = mca_memheap_base_get_cached_mkey(src, src_addr, btl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }

#if SPML_YODA_DEBUG == 1
    SPML_VERBOSE(100, "get: pe:%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    ybtl = &mca_spml_yoda.btl_type_map[btl_id];

    if (ybtl->btl->btl_register_mem) {
        assert(ybtl->btl->btl_registration_handle_size == r_mkey->len);
        remote_handle = (mca_btl_base_registration_handle_t *) r_mkey->u.data;
    }

    nfrags = 1;

    /* check if we doing get into shm attached segment and if so
     * just do memcpy
     */
    if ((YODA_BTL_SM == ybtl->btl_type || YODA_BTL_VADER == ybtl->btl_type)
            && mca_memheap_base_can_local_copy(r_mkey, src_addr)) {
        memcpy(dst_addr, (void *) rva, size);
        /* must call progress here to avoid deadlock. Scenarion:
         * pe1 pols pe2 via shm get. pe2 tries to get static variable from node one, which goes to sm btl
         * In this case pe2 is stuck forever because pe1 never calls opal_progress.
         * May be we do not need to call progress on every get() here but rather once in a while.
         */
        opal_progress();
        return OSHMEM_SUCCESS;
    }

    l_mkey = mca_memheap.memheap_get_local_mkey(dst_addr,
                                                btl_id);
    /*
     * Need a copy if local memory has not been registered or
     * we make GET via SEND
     */
    frag_size = ncopied;
    if ((NULL == l_mkey) || get_via_send) {
        calc_nfrags_get (bml_btl, size, &frag_size, &nfrags, get_via_send);
    }

    p_src = (char*) (unsigned long) rva;
    p_dst = (char*) dst_addr;
    get_holder.active_count = 0;

    for (i = 0; i < nfrags; i++) {
        /**
         * Allocating a get request from a pre-allocated
         * and pre-registered free list.
         */
        getreq = mca_spml_yoda_getreq_alloc(src);
        assert(getreq);
        getreq->p_dst = NULL;
        frag = &getreq->get_frag;
        getreq->parent = &get_holder;

        ncopied = i < nfrags - 1 ? frag_size :(unsigned) ((char *) dst_addr + size - p_dst);
        frag->allocated = 0;
        /* Prepare destination descriptor*/
        memcpy(&frag->rdma_segs[0].base_seg,
                r_mkey->u.data,
                r_mkey->len);

        frag->rdma_segs[0].base_seg.seg_len = (get_via_send ? ncopied + SPML_YODA_SEND_CONTEXT_SIZE : ncopied);
        if (get_via_send) {
            frag->use_send = 1;
            frag->allocated = 1;
            /**
             * Allocate a temporary buffer on the local PE.
             * The local buffer will store the data read
             * from the remote address.
             */
            mca_spml_yoda_bml_alloc(bml_btl,
                                    &des,
                                    MCA_BTL_NO_ORDER,
                                    (int)frag_size,
                                    MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                                    get_via_send);
            if (OPAL_UNLIKELY(!des || !des->des_segments)) {
                SPML_ERROR("shmem OOM error need %d bytes", ncopied);
                SPML_ERROR("src=%p nfrags = %d frag_size=%d",
                           src_addr, nfrags, frag_size);
                rc = OSHMEM_ERR_FATAL;
                goto exit_fatal;
            }

            segment = des->des_segments;
            spml_yoda_prepare_for_get((void*)segment->seg_addr.pval, ncopied, (void*)p_src, oshmem_my_proc_id(), (void*)p_dst, (void*) getreq);
            des->des_cbfunc = mca_spml_yoda_get_response_completion;
            des->des_cbdata = frag;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }
        else {
            /*
             * Register src memory if do GET via GET
             */
            if (NULL == l_mkey && ybtl->btl->btl_register_mem) {
                local_handle = ybtl->btl->btl_register_mem (ybtl->btl, bml_btl->btl_endpoint, p_dst, ncopied,
                                                            MCA_BTL_REG_FLAG_LOCAL_WRITE);

                if (NULL == local_handle) {
                    SPML_ERROR("%s: failed to register destination memory %p.",
                               btl_type2str(ybtl->btl_type), p_dst);
                }

                frag->local_handle = local_handle;
            } else {
                local_handle = ((mca_spml_yoda_context_t*)l_mkey->spml_context)->registration;
                frag->local_handle = NULL;
            }

            frag->rdma_segs[0].base_seg.seg_addr.lval = (uintptr_t) p_src;
            getreq->p_dst = (uint64_t*) p_dst;
            frag->size = ncopied;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }

        /**
         * Initialize the remote data fragment
         * with remote address data required for
         * executing RDMA READ from a remote buffer.
         */

        frag->rdma_req = getreq;

        /**
         *  Do GET operation
         */
        if (get_via_send) {
            rc = mca_bml_base_send(bml_btl, des, MCA_SPML_YODA_GET);
            if (1 == rc)
                rc = OSHMEM_SUCCESS;
        } else {
            rc = mca_bml_base_get(bml_btl, p_dst, (uint64_t) (intptr_t) p_src, local_handle,
                                  remote_handle, ncopied, 0, 0, mca_spml_yoda_get_completion, frag);
        }

        if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
            if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
                /* No free resources, Block on completion here */
                oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);
                return OSHMEM_SUCCESS;
            } else {
                SPML_ERROR("oshmem_get: error %d", rc);
                goto exit_fatal;
            }
        }
        p_dst += ncopied;
        p_src += ncopied;
        OPAL_THREAD_ADD32(&get_holder.active_count, 1);
    }

    /* revisit if we really need this for self and sm */
    /* if (YODA_BTL_SELF == ybtl->btl_type) */
    opal_progress();

    /* Wait for completion on request */
    while (get_holder.active_count > 0)
        oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);

    return rc;

exit_fatal:
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
    return rc;
}
コード例 #22
0
void mca_memheap_modex_recv_all(void)
{
    int i;
    int j;
    int nprocs, my_pe;
    oshmem_proc_t *proc;
    mca_spml_mkey_t *mkey;
    void* dummy_rva;

    if (!mca_memheap_base_key_exchange)
        return;

    /* init rkey cache */
    nprocs = oshmem_num_procs();
    my_pe = oshmem_my_proc_id();

    /* Note:
     * Doing exchange via rml till we figure out problem with grpcomm.modex and barrier
     */
    for (i = 0; i < nprocs; i++) {
        if (i == my_pe)
            continue;

        proc = oshmem_proc_group_find(oshmem_group_all, i);
        for (j = 0; j < memheap_map->n_segments; j++) {
            mkey =
                    mca_memheap_base_get_cached_mkey(i,
                                                     memheap_map->mem_segs[j].start,
                                                     proc->transport_ids[0],
                                                     &dummy_rva);
            if (!mkey) {
                MEMHEAP_ERROR("Failed to receive mkeys");
                oshmem_shmem_abort(-1);
            }
        }

    }

    /*
     * There is an issue with orte_grpcomm.barrier usage as
     * ess/pmi directs to use grpcomm/pmi in case slurm srun() call grpcomm/pmi calls PMI_Barrier() 
     * that is a function of external library.
     * There is no opal_progress() in such way. As a result slow PEs send a request (MEMHEAP_RKEY_REQ) to
     * fast PEs waiting on barrier and do not get a respond (MEMHEAP_RKEY_RESP).
     *
     * there are following ways to solve one:
     * 1. calculate requests from remote PEs and do ORTE_PROGRESSED_WAIT waiting for expected value;
     * 2. use shmem_barrier_all();
     * 3. rework pmi/barrier to use opal_progress();
     * 4. use orte_grpcomm.barrier carefully;
     * 
     * It seems there is no need to use orte_grpcomm.barrier here
     */

    if (memheap_map->mem_segs[HEAP_SEG_INDEX].shmid != MEMHEAP_SHM_INVALID) {
        /* unfortunately we must do barrier here to assure that everyone are attached to our segment
         * good thing that this code path only invoked on older linuxes (-mca shmalloc_use_hugepages 3|4)
         * try to minimize damage here by waiting 5 seconds and doing progress
         */
        shmem_barrier_all();
        /* keys exchanged, segments attached, now we can safely cleanup */
        if (memheap_map->mem_segs[HEAP_SEG_INDEX].type
                == MAP_SEGMENT_ALLOC_SHM) {
            shmctl(memheap_map->mem_segs[HEAP_SEG_INDEX].shmid,
                   IPC_RMID,
                   NULL );
        }
    }
}
コード例 #23
0
ファイル: memheap_base_mkey.c プロジェクト: anhzhang/ompi
void mca_memheap_modex_recv_all(void)
{
    int i;
    int j;
    int nprocs, my_pe;
    opal_buffer_t *msg = NULL;
    void *send_buffer = NULL;
    char *rcv_buffer = NULL;
    int size;
    int *rcv_size = NULL;
    int *rcv_n_transports = NULL;
    int *rcv_offsets = NULL;
    int rc = OSHMEM_SUCCESS;
    size_t buffer_size;

    if (!mca_memheap_base_key_exchange) {
        oshmem_shmem_barrier();
        return;
    }

    nprocs = oshmem_num_procs();
    my_pe = oshmem_my_proc_id();

    /* buffer allocation for num_transports
     * message sizes and offsets */

    rcv_size = (int *)malloc(nprocs * sizeof(int));
    if (NULL == rcv_size) {
        MEMHEAP_ERROR("failed to get rcv_size buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    rcv_offsets = (int *)malloc(nprocs * sizeof(int));
    if (NULL == rcv_offsets) {
        MEMHEAP_ERROR("failed to get rcv_offsets buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    rcv_n_transports = (int *)malloc(nprocs * sizeof(int));
    if (NULL == rcv_offsets) {
        MEMHEAP_ERROR("failed to get rcv_offsets buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    /* serialize our own mkeys */
    msg = OBJ_NEW(opal_buffer_t);
    if (NULL == msg) {
        MEMHEAP_ERROR("failed to get msg buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    for (j = 0; j < memheap_map->n_segments; j++) {
        pack_local_mkeys(msg, 0, j, 1);
    }

    /* we assume here that int32_t returned by opal_dss.unload
     * is equal to size of int we use for MPI_Allgather, MPI_Allgatherv */

    assert(sizeof(int32_t) == sizeof(int));

    /* Do allgather */
    opal_dss.unload(msg, &send_buffer, &size);
    MEMHEAP_VERBOSE(1, "local keys packed into %d bytes, %d segments", size, memheap_map->n_segments);

    /* we need to send num_transports and message sizes separately
     * since message sizes depend on types of btl used */

    rc = oshmem_shmem_allgather(&memheap_map->num_transports, rcv_n_transports, sizeof(int));
    if (MPI_SUCCESS != rc) {
        MEMHEAP_ERROR("allgather failed");
        goto exit_fatal;
    }

    rc = oshmem_shmem_allgather(&size, rcv_size, sizeof(int));
    if (MPI_SUCCESS != rc) {
        MEMHEAP_ERROR("allgather failed");
        goto exit_fatal;
    }

    /* calculating offsets (displacements) for allgatherv */

    rcv_offsets[0] = 0;
    for (i = 1; i < nprocs; i++) {
        rcv_offsets[i] = rcv_offsets[i - 1] + rcv_size[i - 1];
    }

    buffer_size = rcv_offsets[nprocs - 1] + rcv_size[nprocs - 1];

    rcv_buffer = malloc (buffer_size);
    if (NULL == rcv_buffer) {
        MEMHEAP_ERROR("failed to allocate recieve buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    rc = oshmem_shmem_allgatherv(send_buffer, rcv_buffer, size, rcv_size, rcv_offsets);
    if (MPI_SUCCESS != rc) {
        free (rcv_buffer);
        MEMHEAP_ERROR("allgatherv failed");
        goto exit_fatal;
    }

    opal_dss.load(msg, rcv_buffer, buffer_size);

    /* deserialize mkeys */
    OPAL_THREAD_LOCK(&memheap_oob.lck);
    for (i = 0; i < nprocs; i++) {
        if (i == my_pe) {
            continue;
        }

        msg->unpack_ptr = (void *)((intptr_t) msg->base_ptr + rcv_offsets[i]);

        for (j = 0; j < memheap_map->n_segments; j++) {
            map_segment_t *s;

            s = &memheap_map->mem_segs[j];
            if (NULL != s->mkeys_cache[i]) {
                MEMHEAP_VERBOSE(10, "PE%d: segment%d already exists, mkey will be replaced", i, j);
            } else {
                s->mkeys_cache[i] = (sshmem_mkey_t *) calloc(rcv_n_transports[i],
                        sizeof(sshmem_mkey_t));
                if (NULL == s->mkeys_cache[i]) {
                    MEMHEAP_ERROR("PE%d: segment%d: Failed to allocate mkeys cache entry", i, j);
                    oshmem_shmem_abort(-1);
                }
            }
            memheap_oob.mkeys = s->mkeys_cache[i];
            unpack_remote_mkeys(msg, i);
        }
    }

    OPAL_THREAD_UNLOCK(&memheap_oob.lck);

exit_fatal:
    if (rcv_size) {
        free(rcv_size);
    }
    if (rcv_offsets) {
        free(rcv_offsets);
    }
    if (rcv_n_transports) {
        free(rcv_n_transports);
    }
    if (send_buffer) {
        free(send_buffer);
    }
    if (msg) {
        OBJ_RELEASE(msg);
    }

    /* This function requires abort in any error case */
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
}
コード例 #24
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
sshmem_mkey_t *mca_spml_ucx_register(void* addr,
                                         size_t size,
                                         uint64_t shmid,
                                         int *count)
{
    sshmem_mkey_t *mkeys;
    ucs_status_t status;
    spml_ucx_mkey_t   *ucx_mkey;
    size_t len;
    ucp_mem_map_params_t mem_map_params;
    int segno;
    map_segment_t *mem_seg;
    unsigned flags;
    int my_pe = oshmem_my_proc_id();

    *count = 0;
    mkeys = (sshmem_mkey_t *) calloc(1, sizeof(*mkeys));
    if (!mkeys) {
        return NULL;
    }

    segno   = memheap_find_segnum(addr);
    mem_seg = memheap_find_seg(segno);

    ucx_mkey = &mca_spml_ucx_ctx_default.ucp_peers[my_pe].mkeys[segno].key;
    mkeys[0].spml_context = ucx_mkey;

    /* if possible use mem handle already created by ucx allocator */
    if (MAP_SEGMENT_ALLOC_UCX != mem_seg->type) {
        flags = 0;
        if (mca_spml_ucx.heap_reg_nb && memheap_is_va_in_segment(addr, HEAP_SEG_INDEX)) {
            flags = UCP_MEM_MAP_NONBLOCK;
        }

        mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
                                    UCP_MEM_MAP_PARAM_FIELD_LENGTH |
                                    UCP_MEM_MAP_PARAM_FIELD_FLAGS;
        mem_map_params.address    = addr;
        mem_map_params.length     = size;
        mem_map_params.flags      = flags;

        status = ucp_mem_map(mca_spml_ucx.ucp_context, &mem_map_params, &ucx_mkey->mem_h);
        if (UCS_OK != status) {
            goto error_out;
        }

    } else {
        ucx_mkey->mem_h = (ucp_mem_h)mem_seg->context;
    }

    status = ucp_rkey_pack(mca_spml_ucx.ucp_context, ucx_mkey->mem_h, 
                           &mkeys[0].u.data, &len); 
    if (UCS_OK != status) {
        goto error_unmap;
    }
    if (len >= 0xffff) {
        SPML_UCX_ERROR("packed rkey is too long: %llu >= %d",
                (unsigned long long)len,
                0xffff);
        oshmem_shmem_abort(-1);
    }

    status = ucp_ep_rkey_unpack(mca_spml_ucx_ctx_default.ucp_peers[oshmem_group_self->my_pe].ucp_conn,
                                mkeys[0].u.data,
                                &ucx_mkey->rkey);
    if (UCS_OK != status) {
        SPML_UCX_ERROR("failed to unpack rkey");
        goto error_unmap;
    }

    mkeys[0].len     = len;
    mkeys[0].va_base = addr;
    *count = 1;
    mca_spml_ucx_cache_mkey(&mca_spml_ucx_ctx_default, &mkeys[0], segno, my_pe);
    return mkeys;

error_unmap:
    ucp_mem_unmap(mca_spml_ucx.ucp_context, ucx_mkey->mem_h);
error_out:
    free(mkeys);

    return NULL ;
}
コード例 #25
0
ファイル: scoll_fca_module.c プロジェクト: Benguang/ompi
/*
 *  * Invoked when there's a new communicator that has been created.
 *   * Look at the communicator and decide which set of functions and
 *    * priority we want to return.
 *     */
mca_scoll_base_module_t *
mca_scoll_fca_comm_query(struct oshmem_group_t *comm, int *priority)
{
    mca_scoll_base_module_t *module;
    int size = comm->proc_count;
    int local_peers = 0;

    mca_scoll_fca_module_t *fca_module;

    *priority = 0;
    module = NULL;

    if (!mca_scoll_fca_component.fca_enable) {
        FCA_VERBOSE(20, "FCA is disable on user request => exiting");
        goto exit;
    }

    if (mca_memheap.memheap_component == NULL ) {
        FCA_VERBOSE(20, "No memheap => exiting");
        goto exit;
    }

    if (NULL == mca_scoll_fca_component.ret) {
        MCA_MEMHEAP_CALL(private_alloc(sizeof(int),(void **)&mca_scoll_fca_component.ret));
        MCA_MEMHEAP_CALL(private_alloc(oshmem_group_all->proc_count*sizeof(*mca_scoll_fca_component.rcounts), (void **)&mca_scoll_fca_component.rcounts ));
        MCA_MEMHEAP_CALL(private_alloc(/*info_size*/20,&mca_scoll_fca_component.my_info_exchangeable));
        MCA_MEMHEAP_CALL(private_alloc(sizeof(fca_comm_desc_t), &mca_scoll_fca_component.fca_comm_desc_exchangeable));
    }
    if (size < mca_scoll_fca_component.fca_np) {
        FCA_VERBOSE(20,
                    "size(%d) < fca_np(%d)",
                    size, mca_scoll_fca_component.fca_np);
        goto exit;
    }

    if (size < 2) {
        FCA_VERBOSE(20, "size(%d) < 2", size);
        goto exit;
    }

    if (!have_remote_peers(comm,
                           size,
                           &local_peers) /* || OMPI_COMM_IS_INTER(comm)*/) {
        FCA_VERBOSE(1,
                    "all peers in group are on the same node, fca disabled\n");
        goto exit;
    }

    fca_module = OBJ_NEW(mca_scoll_fca_module_t);
    if (!fca_module) {
        goto exit_fatal;
    }
    fca_module->super.scoll_module_enable = mca_scoll_fca_module_enable;
    fca_module->super.scoll_collect =
            mca_scoll_fca_component.fca_enable_allgather ?
                    mca_scoll_fca_collect : NULL;
    fca_module->super.scoll_reduce =
            mca_scoll_fca_component.fca_enable_allreduce ?
                    mca_scoll_fca_reduce : NULL;
    fca_module->super.scoll_barrier =
            mca_scoll_fca_component.fca_enable_barrier ? mca_scoll_fca_barrier :
                                                         NULL;
    fca_module->super.scoll_broadcast =
            mca_scoll_fca_component.fca_enable_bcast ? mca_scoll_fca_broadcast :
                                                       NULL;

    *priority = mca_scoll_fca_component.fca_priority;
    module = &fca_module->super;

    exit:
    FCA_VERBOSE(4,
                "Query FCA module for comm %p size %d rank %d local_peers=%d: priority=%d %s",
                (void *)comm, size, comm->my_pe, local_peers, *priority, module ? "enabled" : "disabled");
    return module;

    exit_fatal:
    /* it is possible that other pe(s) succesfully initialized fca.
     * So differnt frameworks will be used for collective ops
     */
    FCA_ERROR("FCA module query failed - aborting");
    oshmem_shmem_abort(-1);
    return NULL ;
}
コード例 #26
0
ファイル: atomic_mxm_fadd.c プロジェクト: Cai900205/test
int mca_atomic_mxm_fadd(void *target,
                        void *prev,
                        const void *value,
                        size_t nlong,
                        int pe,
                        struct oshmem_op_t *op)
{
    unsigned my_pe;
    uint8_t nlong_order;
    void *remote_addr;
    int ptl_id;
    mxm_send_req_t sreq;
    mxm_error_t mxm_err;
    sshmem_mkey_t *r_mkey;
    static char dummy_buf[8];

    my_pe = oshmem_my_proc_id();
    ptl_id = -1;
    mxm_err = MXM_OK;

    if (!target || !value) {
        ATOMIC_ERROR("[#%d] target or value are not defined", my_pe);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    if ((pe < 0) || (pe >= oshmem_num_procs())) {
        ATOMIC_ERROR("[#%d] PE=%d not valid", my_pe, pe);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    switch (nlong) {
    case 1:
        nlong_order = 0;
        break;
    case 2:
        nlong_order = 1;
        break;
    case 4:
        nlong_order = 2;
        break;
    case 8:
        nlong_order = 3;
        break;
    default:
        ATOMIC_ERROR("[#%d] Type size must be 1/2/4 or 8 bytes.", my_pe);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    ptl_id = oshmem_proc_group_all(pe)->transport_ids[0];
    if (MXM_PTL_SHM == ptl_id) {
        ptl_id = MXM_PTL_RDMA;
    }
    r_mkey = mca_memheap.memheap_get_cached_mkey(pe,
                                             target,
                                             ptl_id,
                                             &remote_addr);
    if (!r_mkey) {
        ATOMIC_ERROR("[#%d] %p is not address of symmetric variable",
                     my_pe, target);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    /* mxm request init */
    sreq.base.state = MXM_REQ_NEW;
    sreq.base.mq = mca_spml_self->mxm_mq;
    sreq.base.conn = mca_spml_self->mxm_peers[pe]->mxm_hw_rdma_conn;
    sreq.base.completed_cb = NULL;
    sreq.base.data_type = MXM_REQ_DATA_BUFFER;

    sreq.op.atomic.remote_vaddr = (uintptr_t) remote_addr;
#if MXM_API < MXM_VERSION(2,0)
    sreq.op.atomic.remote_memh  = MXM_INVALID_MEM_HANDLE;
    memcpy(&sreq.op.atomic.value8, value, nlong);
#else
    sreq.op.atomic.remote_mkey = to_mxm_mkey(r_mkey);
    memcpy(&sreq.op.atomic.value, value, nlong);
#endif
    sreq.op.atomic.order = nlong_order;

    /* Do we need atomic 'add' or atomic 'fetch and add'? */
    if (NULL == prev) {
        sreq.base.data.buffer.ptr = dummy_buf;
        sreq.base.data.buffer.length = nlong;
        sreq.base.data.buffer.memh = MXM_INVALID_MEM_HANDLE;
#if MXM_API < MXM_VERSION(2,0)
        sreq.base.flags = MXM_REQ_FLAG_SEND_SYNC;
        sreq.opcode = MXM_REQ_OP_ATOMIC_ADD;
#else
        sreq.flags = 0;
        sreq.opcode = MXM_REQ_OP_ATOMIC_FADD;
#endif
    } else {
        sreq.base.data.buffer.ptr = prev;
        sreq.base.data.buffer.length = nlong;
        sreq.base.data.buffer.memh = MXM_INVALID_MEM_HANDLE;
#if MXM_API < MXM_VERSION(2,0)
        sreq.base.flags = 0;
#else
        sreq.flags = 0;
#endif

        sreq.opcode = MXM_REQ_OP_ATOMIC_FADD;
    }

    if (MXM_OK != (mxm_err = mxm_req_send(&sreq))) {
        ATOMIC_ERROR("[#%d] mxm_req_send failed, mxm_error = %d",
                     my_pe, mxm_err);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    mxm_req_wait(&sreq.base);
    if (MXM_OK != sreq.base.error) {
        ATOMIC_ERROR("[#%d] mxm_req_wait got non MXM_OK error: %d",
                     my_pe, sreq.base.error);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    return OSHMEM_SUCCESS;
}
コード例 #27
0
ファイル: spml_ucx.c プロジェクト: 00datman/ompi
sshmem_mkey_t *mca_spml_ucx_register(void* addr,
                                         size_t size,
                                         uint64_t shmid,
                                         int *count)
{
    sshmem_mkey_t *mkeys;
    ucs_status_t err;
    spml_ucx_mkey_t   *ucx_mkey;
    size_t len;

    *count = 0;
    mkeys = (sshmem_mkey_t *) calloc(1, sizeof(*mkeys));
    if (!mkeys) {
        return NULL ;
    }

    ucx_mkey = (spml_ucx_mkey_t *)malloc(sizeof(*ucx_mkey));
    if (!ucx_mkey) {
        goto error_out;
    }

    mkeys[0].spml_context = ucx_mkey;
    err = ucp_mem_map(mca_spml_ucx.ucp_context, 
            &addr, size, 0, &ucx_mkey->mem_h);
    if (UCS_OK != err) {
        goto error_out1;
    }

    err = ucp_rkey_pack(mca_spml_ucx.ucp_context, ucx_mkey->mem_h, 
            &mkeys[0].u.data, &len); 
    if (UCS_OK != err) {
        goto error_unmap;
    }
    if (len >= 0xffff) {
        SPML_ERROR("packed rkey is too long: %llu >= %d",
                (unsigned long long)len,
                0xffff);
        oshmem_shmem_abort(-1);
    }

    err = ucp_ep_rkey_unpack(mca_spml_ucx.ucp_peers[oshmem_group_self->my_pe].ucp_conn,
                             mkeys[0].u.data,
                             &ucx_mkey->rkey);
    if (UCS_OK != err) {
        SPML_ERROR("failed to unpack rkey");
        goto error_unmap;
    }

    mkeys[0].len     = len;
    mkeys[0].va_base = addr;
    *count = 1;
    return mkeys;

error_unmap:
    ucp_mem_unmap(mca_spml_ucx.ucp_context, ucx_mkey->mem_h);
error_out1:
    free(ucx_mkey);
error_out:
    free(mkeys);

    return NULL ;
}
コード例 #28
0
static void memheap_attach_segment(mca_spml_mkey_t *mkey, int tr_id)
{
    /* process special case when va was got using shmget(IPC_PRIVATE)
     * this case is notable for:
     * - key is set as (type|shmid);
     * - va_base is set as 0;
     */
    if (!mkey->va_base
            && ((int) MEMHEAP_SHM_GET_ID(mkey->key) != MEMHEAP_SHM_INVALID)) {
        MEMHEAP_VERBOSE(5,
                        "shared memory usage tr_id: %d key %llx base_va %p shmid 0x%X|0x%X",
                        tr_id,
                        (unsigned long long)mkey->key,
                        mkey->va_base,
                        MEMHEAP_SHM_GET_TYPE(mkey->key),
                        MEMHEAP_SHM_GET_ID(mkey->key));

        if (MEMHEAP_SHM_GET_TYPE(mkey->key) == MAP_SEGMENT_ALLOC_SHM) {
            mkey->va_base = shmat(MEMHEAP_SHM_GET_ID(mkey->key),
                                             0,
                                             0);
        } else if (MEMHEAP_SHM_GET_TYPE(mkey->key) == MAP_SEGMENT_ALLOC_IBV) {
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
            openib_device_t *device = NULL;
            struct ibv_mr *ib_mr;
            void *addr;
            static int mr_count;

            int access_flag = IBV_ACCESS_LOCAL_WRITE |
            IBV_ACCESS_REMOTE_WRITE |
            IBV_ACCESS_REMOTE_READ |
            IBV_ACCESS_NO_RDMA;

            device = (openib_device_t *)memheap_map->mem_segs[HEAP_SEG_INDEX].context;
            assert(device);

            /* workaround mtt problem - request aligned addresses */
            ++mr_count;
            addr = (void *)(mca_memheap_base_start_address + mca_memheap_base_mr_interleave_factor*1024ULL*1024ULL*1024ULL*mr_count);
            ib_mr = ibv_reg_shared_mr(MEMHEAP_SHM_GET_ID(mkey->key),
                    device->ib_pd, addr, access_flag);
            if (NULL == ib_mr)
            {
                mkey->va_base = (void*)-1;
                MEMHEAP_ERROR("error to ibv_reg_shared_mr() errno says %d: %s",
                        errno, strerror(errno));
            }
            else
            {
                if (ib_mr->addr != addr) {
                    MEMHEAP_WARN("Failed to map shared region to address %p got addr %p. Try to increase 'memheap_mr_interleave_factor' from %d", addr, ib_mr->addr, mca_memheap_base_mr_interleave_factor);
                }

                opal_value_array_append_item(&device->ib_mr_array, &ib_mr);
                mkey->va_base = ib_mr->addr;
            }
#endif /* MPAGE_ENABLE */
        } else {
            MEMHEAP_ERROR("tr_id: %d key %llx attach failed: incorrect shmid 0x%X|0x%X",
                          tr_id, 
                          (unsigned long long)mkey->key,
                          MEMHEAP_SHM_GET_TYPE(mkey->key),
                          MEMHEAP_SHM_GET_ID(mkey->key));
            oshmem_shmem_abort(-1);
        }

        if ((void *) -1 == (void *) mkey->va_base) {
            MEMHEAP_ERROR("tr_id: %d key %llx attach failed: errno = %d",
                          tr_id, (unsigned long long)mkey->key, errno);
            oshmem_shmem_abort(-1);
        }
    }
}
コード例 #29
0
/**
 * TODO: using put request as handle is not good.
 */
static inline int mca_spml_ikrit_put_internal(void* dst_addr,
                                              size_t size,
                                              void* src_addr,
                                              int dst,
                                              void **handle,
                                              int zcopy)
{
    void *rva;
    mca_spml_ikrit_put_request_t *put_req;
    int ptl_id;
    sshmem_mkey_t *r_mkey;
    static int count;
    int need_progress = 0;

    if (0 >= size) {
        return OSHMEM_SUCCESS;
    }

    ptl_id = get_ptl_id(dst);
    /* Get rkey of remote PE (dst proc) which must be on memheap  */
    r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, ptl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   dst, dst_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

#if SPML_IKRIT_PUT_DEBUG == 1

    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif
    if (ptl_id == MXM_PTL_SHM) {

        if (mca_memheap_base_can_local_copy(r_mkey, dst_addr)) {
            memcpy((void *) (unsigned long) rva, src_addr, size);
            /* call progress as often as we would have with regular put */
            if (++count % SPML_IKRIT_PACKETS_PER_SYNC == 0)
                mxm_progress(mca_spml_ikrit.mxm_context);
            return OSHMEM_SUCCESS;
        }
        /* segment not mapped - fallback to rmda */
        ptl_id = MXM_PTL_RDMA;
        r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, ptl_id, &rva);
        if (!r_mkey) {
            SPML_ERROR("pe=%d: %p is not address of shared variable",
                       dst, dst_addr);
            oshmem_shmem_abort(-1);
            return OSHMEM_ERROR;
        }
    }

#if SPML_IKRIT_PUT_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    put_req = alloc_put_req();
    if (NULL == put_req) {
        SPML_ERROR("out of put requests - aborting");
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }
    if (handle)
        *handle = put_req;

    /* fill out request */
    put_req->mxm_req.base.mq = mca_spml_ikrit.mxm_mq;
    /* request immediate responce if we are getting low on send buffers. We only get responce from remote on ack timeout.
     * Also request explicit ack once in a while  */
#if MXM_API < MXM_VERSION(2,0)
    put_req->mxm_req.opcode = MXM_REQ_OP_PUT;
    if (mca_spml_ikrit.free_list_max - mca_spml_ikrit.n_active_puts <= SPML_IKRIT_PUT_LOW_WATER ||
            (mca_spml_ikrit.mxm_peers[dst]->n_active_puts + 1) % SPML_IKRIT_PACKETS_PER_SYNC == 0) {
        put_req->mxm_req.base.flags = MXM_REQ_FLAG_SEND_SYNC;
        need_progress = 1;
    } else  {
        put_req->mxm_req.base.flags = MXM_REQ_FLAG_SEND_LAZY|MXM_REQ_FLAG_SEND_SYNC;
    }
#else
    put_req->mxm_req.flags = 0;
    if (mca_spml_ikrit.free_list_max - mca_spml_ikrit.n_active_puts <= SPML_IKRIT_PUT_LOW_WATER ||
            (int)opal_list_get_size(&mca_spml_ikrit.active_peers) > mca_spml_ikrit.unsync_conn_max ||
            (mca_spml_ikrit.mxm_peers[dst]->n_active_puts + 1) % SPML_IKRIT_PACKETS_PER_SYNC == 0) {
        need_progress = 1;
        put_req->mxm_req.opcode = MXM_REQ_OP_PUT_SYNC;
    } else  {
        put_req->mxm_req.opcode = MXM_REQ_OP_PUT;
    }
    if (!zcopy) {
        if (size < mca_spml_ikrit.put_zcopy_threshold) {
            put_req->mxm_req.flags |= MXM_REQ_SEND_FLAG_BLOCKING;
        } else {
            put_req->mxm_req.opcode = MXM_REQ_OP_PUT_SYNC;
        }
    }
#endif

    put_req->mxm_req.base.conn = mca_spml_ikrit.mxm_peers[dst]->mxm_conn;
    put_req->mxm_req.base.data_type = MXM_REQ_DATA_BUFFER;
    put_req->mxm_req.base.data.buffer.ptr = src_addr;
    put_req->mxm_req.base.data.buffer.length = size;
    put_req->mxm_req.base.completed_cb = put_completion_cb;
    put_req->mxm_req.base.context = put_req;
    put_req->mxm_req.op.mem.remote_vaddr = (intptr_t) rva;
    put_req->mxm_req.base.state = MXM_REQ_NEW;
    put_req->pe = dst;

#if MXM_API < MXM_VERSION(2,0)
    put_req->mxm_req.base.data.buffer.memh = NULL;
    put_req->mxm_req.op.mem.remote_memh = NULL;
#else
    put_req->mxm_req.op.mem.remote_mkey = to_mxm_mkey(r_mkey);
#endif

    OPAL_THREAD_ADD32(&mca_spml_ikrit.n_active_puts, 1);
    if (mca_spml_ikrit.mxm_peers[dst]->need_fence == 0) {
        opal_list_append(&mca_spml_ikrit.active_peers,
                         &mca_spml_ikrit.mxm_peers[dst]->super);
        mca_spml_ikrit.mxm_peers[dst]->need_fence = 1;
    }

    mca_spml_ikrit.mxm_peers[dst]->n_active_puts++;

    SPML_IKRIT_MXM_POST_SEND(put_req->mxm_req);

    if (need_progress)
        mxm_progress(mca_spml_ikrit.mxm_context);

    return OSHMEM_SUCCESS;
}
コード例 #30
0
/* simple buffered put implementation. NOT IN USE
 * Problems:
 * - slighly worse performance than impl based on non buffered put
 * - fence complexity is O(n_active_connections) instead of O(n_connections_with_outstanding_puts).
 *   Later is bounded by the network RTT & mxm ack timer.
 */
int mca_spml_ikrit_put_simple(void* dst_addr,
                              size_t size,
                              void* src_addr,
                              int dst)
{
    void *rva;
    mxm_send_req_t mxm_req;
    mxm_wait_t wait;
    int ptl_id;
    sshmem_mkey_t *r_mkey;
    static int count;

    ptl_id = get_ptl_id(dst);
    /* Get rkey of remote PE (dst proc) which must be on memheap  */
    r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, ptl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   dst, dst_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

#if SPML_IKRIT_PUT_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif
    if (ptl_id == MXM_PTL_SHM) {

        if (mca_memheap_base_can_local_copy(r_mkey, dst_addr)) {
            memcpy((void *) (unsigned long) rva, src_addr, size);
            /* call progress as often as we would have with regular put */
            if (++count % SPML_IKRIT_PACKETS_PER_SYNC == 0)
                mxm_progress(mca_spml_ikrit.mxm_context);
            return OSHMEM_SUCCESS;
        }
        /* segment not mapped - fallback to rmda */
        ptl_id = MXM_PTL_RDMA;
        r_mkey = mca_memheap_base_get_cached_mkey(dst,
                                                     //(unsigned long) dst_addr,
                                                     dst_addr,
                                                     ptl_id,
                                                     &rva);
        if (!r_mkey) {
            SPML_ERROR("pe=%d: %p is not address of shared variable",
                       dst, dst_addr);
            oshmem_shmem_abort(-1);
            return OSHMEM_ERROR;
        }
    }

#if SPML_IKRIT_PUT_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    /* fill out request */
    mxm_req.base.mq = mca_spml_ikrit.mxm_mq;
#if MXM_API <  MXM_VERSION(2,0)
    mxm_req.base.flags = MXM_REQ_FLAG_BLOCKING;
#else
    mxm_req.flags = MXM_REQ_SEND_FLAG_BLOCKING;
#endif
    mxm_req.base.conn = mca_spml_ikrit.mxm_peers[dst]->mxm_conn;
    mxm_req.base.data_type = MXM_REQ_DATA_BUFFER;
    mxm_req.base.data.buffer.ptr = src_addr;
    mxm_req.base.data.buffer.length = size;
    mxm_req.base.completed_cb = 0;
    mxm_req.base.context = 0;
    mxm_req.opcode = MXM_REQ_OP_PUT;
    mxm_req.op.mem.remote_vaddr = (intptr_t) rva;
    mxm_req.base.state = MXM_REQ_NEW;
    mxm_req.base.error = MXM_OK;

#if MXM_API < MXM_VERSION(2, 0)
    mxm_req.base.data.buffer.memh = NULL;
    mxm_req.op.mem.remote_memh = NULL;
#else
    mxm_req.op.mem.remote_mkey = to_mxm_mkey(r_mkey);
#endif

    if (mca_spml_ikrit.mxm_peers[dst]->need_fence == 0) {
        opal_list_append(&mca_spml_ikrit.active_peers,
                         &mca_spml_ikrit.mxm_peers[dst]->super);
        mca_spml_ikrit.mxm_peers[dst]->need_fence = 1;
    }

    SPML_IKRIT_MXM_POST_SEND(mxm_req);

    wait.req = &mxm_req.base;
    wait.state = (mxm_req_state_t)(MXM_REQ_SENT | MXM_REQ_COMPLETED);
    wait.progress_cb = NULL;
    wait.progress_arg = NULL;
    mxm_wait(&wait);

    return OSHMEM_SUCCESS;
}