コード例 #1
0
ファイル: spml_ikrit.c プロジェクト: XuanWang1982/ompi
int mca_spml_ikrit_del_procs(oshmem_proc_t** procs, size_t nprocs)
{
    size_t i, n;
    int my_rank = oshmem_my_proc_id();

    oshmem_shmem_barrier();
#if MXM_API >= MXM_VERSION(2,0)
    if (mca_spml_ikrit.bulk_disconnect) {
        mxm_ep_powerdown(mca_spml_ikrit.mxm_ep);
    }
#endif

    while (NULL != opal_list_remove_first(&mca_spml_ikrit.active_peers)) {
    };
    OBJ_DESTRUCT(&mca_spml_ikrit.active_peers);

    for (n = 0; n < nprocs; n++) {
        i = (my_rank + n) % nprocs;
        if (mca_spml_ikrit.mxm_peers[i]->mxm_conn) {
            mxm_ep_disconnect(mca_spml_ikrit.mxm_peers[i]->mxm_conn);
        }
        if (mca_spml_ikrit.hw_rdma_channel && mca_spml_ikrit.mxm_peers[i]->mxm_hw_rdma_conn) {
            mxm_ep_disconnect(mca_spml_ikrit.mxm_peers[i]->mxm_hw_rdma_conn);
        }
        destroy_ptl_idx(i);
        if (mca_spml_ikrit.mxm_peers[i]) {
            OBJ_RELEASE(mca_spml_ikrit.mxm_peers[i]);
        }
    }
    if (mca_spml_ikrit.mxm_peers)
        free(mca_spml_ikrit.mxm_peers);

    return OSHMEM_SUCCESS;
}
コード例 #2
0
static int __reg_segment(map_segment_t *s, int *num_btl)
{
    int rc = OSHMEM_SUCCESS;
    int my_pe;
    int nprocs;

    nprocs = oshmem_num_procs();
    my_pe = oshmem_my_proc_id();

    s->mkeys_cache = (mca_spml_mkey_t **) calloc(nprocs,
                                                 sizeof(mca_spml_mkey_t *));
    if (NULL == s->mkeys_cache) {
        MEMHEAP_ERROR("Failed to allocate memory for remote segments");
        rc = OSHMEM_ERROR;
    }

    if (!rc) {
        s->mkeys = MCA_SPML_CALL(register((void *)(unsigned long)s->start,
                        s->end - s->start,
                        MEMHEAP_SHM_CODE(s->type, s->shmid),
                        num_btl));
        if (NULL == s->mkeys) {
            free(s->mkeys_cache);
            s->mkeys_cache = NULL;

            MEMHEAP_ERROR("Failed to register segment");
            rc = OSHMEM_ERROR;
        }
    }
コード例 #3
0
ファイル: spml_ikrit.c プロジェクト: gpaulsen/ompi
/* for now only do blocking copy send */
int mca_spml_ikrit_send(void* buf,
                        size_t size,
                        int dst,
                        mca_spml_base_put_mode_t mode)
{
    mxm_send_req_t req;
    char dummy_buf[1];

    SPML_VERBOSE(100,
                 "sending %p size %d to %d, mode %d",
                 buf, (int)size, dst, (int)mode);
    req.opcode = MXM_REQ_OP_SEND;

    req.op.send.tag = oshmem_my_proc_id();

    req.base.state = MXM_REQ_NEW;
    req.base.mq = mca_spml_ikrit.mxm_mq;
    req.base.conn = mca_spml_ikrit.mxm_peers[dst].mxm_conn;
    req.flags             = MXM_REQ_SEND_FLAG_BLOCKING;
    req.base.completed_cb = NULL;

    req.base.data_type = MXM_REQ_DATA_BUFFER;
    req.base.data.buffer.ptr = buf == NULL ? dummy_buf : buf;
    req.base.data.buffer.length = size == 0 ? sizeof(dummy_buf) : size;
    req.base.data.buffer.memh = NULL;

    SPML_IKRIT_MXM_POST_SEND(req);

    mca_spml_irkit_req_wait(&req.base);
    if (req.base.error != MXM_OK) {
        return OSHMEM_ERROR;
    }

    return OSHMEM_SUCCESS;
}
コード例 #4
0
ファイル: oshmem_shmem_init.c プロジェクト: davideberius/ompi
int oshmem_shmem_preconnect_all(void)
{
    int rc = OSHMEM_SUCCESS;

    /* force qp creation and rkey exchange for memheap. Does not force exchange of static vars */
    if (oshmem_preconnect_all) {
        long val;
        int nproc;
        int my_pe;
        int i;

        val = 0xdeadbeaf;

        if (!preconnect_value) {
            rc =
                    MCA_MEMHEAP_CALL(private_alloc(sizeof(long), (void **)&preconnect_value));
        }
        if (!preconnect_value || (rc != OSHMEM_SUCCESS)) {
            SHMEM_API_ERROR("shmem_preconnect_all failed");
            return OSHMEM_ERR_OUT_OF_RESOURCE;
        }

        nproc = oshmem_num_procs();
        my_pe = oshmem_my_proc_id();
        for (i = 0; i < nproc; i++) {
            shmem_long_p(preconnect_value, val, (my_pe + i) % nproc);
        }
        shmem_barrier_all();
        SHMEM_API_VERBOSE(5, "Preconnected all PEs");
    }

    return OSHMEM_SUCCESS;
}
コード例 #5
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
int mca_spml_ucx_deregister(sshmem_mkey_t *mkeys)
{
    spml_ucx_mkey_t   *ucx_mkey;
    map_segment_t *mem_seg;
    int segno;
    int my_pe = oshmem_my_proc_id();

    MCA_SPML_CALL(quiet(oshmem_ctx_default));
    if (!mkeys)
        return OSHMEM_SUCCESS;

    if (!mkeys[0].spml_context)
        return OSHMEM_SUCCESS;

    mem_seg  = memheap_find_va(mkeys[0].va_base);
    ucx_mkey = (spml_ucx_mkey_t*)mkeys[0].spml_context;

    if (OPAL_UNLIKELY(NULL == mem_seg)) {
        return OSHMEM_ERROR;
    }
    
    if (MAP_SEGMENT_ALLOC_UCX != mem_seg->type) {
        ucp_mem_unmap(mca_spml_ucx.ucp_context, ucx_mkey->mem_h);
    }
    ucp_rkey_destroy(ucx_mkey->rkey);
    ucx_mkey->rkey = NULL;

    if (0 < mkeys[0].len) {
        ucp_rkey_buffer_release(mkeys[0].u.data);
    }

    free(mkeys);

    return OSHMEM_SUCCESS;
}
コード例 #6
0
ファイル: spml_ikrit.c プロジェクト: gpaulsen/ompi
int mca_spml_ikrit_del_procs(ompi_proc_t** procs, size_t nprocs)
{
    size_t i, n;
    int my_rank = oshmem_my_proc_id();

    oshmem_shmem_barrier();
    if (mca_spml_ikrit.bulk_disconnect) {
        mxm_ep_powerdown(mca_spml_ikrit.mxm_ep);
    }

    while (NULL != opal_list_remove_first(&mca_spml_ikrit.active_peers)) {
    };
    OBJ_DESTRUCT(&mca_spml_ikrit.active_peers);

    for (n = 0; n < nprocs; n++) {
        i = (my_rank + n) % nprocs;
        mxm_ep_disconnect(mca_spml_ikrit.mxm_peers[i].mxm_conn);
        if (mca_spml_ikrit.hw_rdma_channel) {
            assert(mca_spml_ikrit.mxm_peers[i].mxm_hw_rdma_conn != mca_spml_ikrit.mxm_peers[i].mxm_conn);
            mxm_ep_disconnect(mca_spml_ikrit.mxm_peers[i].mxm_hw_rdma_conn);
        }
        mxm_peer_destruct(&mca_spml_ikrit.mxm_peers[i]);
    }
    free(mca_spml_ikrit.mxm_peers);

    return OSHMEM_SUCCESS;
}
コード例 #7
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
void mca_spml_ucx_memuse_hook(void *addr, size_t length)
{
    int my_pe;
    spml_ucx_mkey_t *ucx_mkey;
    ucp_mem_advise_params_t params;
    ucs_status_t status;

    if (!(mca_spml_ucx.heap_reg_nb && memheap_is_va_in_segment(addr, HEAP_SEG_INDEX))) {
        return;
    }

    my_pe    = oshmem_my_proc_id();
    ucx_mkey = &mca_spml_ucx_ctx_default.ucp_peers[my_pe].mkeys[HEAP_SEG_INDEX].key;

    params.field_mask = UCP_MEM_ADVISE_PARAM_FIELD_ADDRESS |
                        UCP_MEM_ADVISE_PARAM_FIELD_LENGTH |
                        UCP_MEM_ADVISE_PARAM_FIELD_ADVICE;

    params.address = addr;
    params.length  = length;
    params.advice  = UCP_MADV_WILLNEED;

    status = ucp_mem_advise(mca_spml_ucx.ucp_context, ucx_mkey->mem_h, &params);
    if (UCS_OK != status) {
        SPML_UCX_ERROR("ucp_mem_advise failed addr %p len %llu : %s",
                       addr, (unsigned long long)length, ucs_status_string(status));
    }
}
コード例 #8
0
static int __dereg_segment(map_segment_t *s)
{
    int rc = OSHMEM_SUCCESS;
    int j;
    int nprocs, my_pe;

    nprocs = oshmem_num_procs();
    my_pe = oshmem_my_proc_id();

    MCA_SPML_CALL(deregister(s->mkeys));

    if (s->mkeys_cache) {
        for (j = 0; j < nprocs; j++) {
            if (j == my_pe)
                continue;
            if (s->mkeys_cache[j]) {
                free(s->mkeys_cache[j]);
                s->mkeys_cache[j] = NULL;
            }
        }
        free(s->mkeys_cache);
        s->mkeys_cache = NULL;
    }

    s->is_active = 0;

    return rc;
}
コード例 #9
0
ファイル: spml_ucx.c プロジェクト: jjhursey/ompi
int mca_spml_ucx_del_procs(ompi_proc_t** procs, size_t nprocs)
{
    int my_rank = oshmem_my_proc_id();
    size_t num_reqs, max_reqs;
    void *dreq, **dreqs;
    ucp_ep_h ep;
    size_t i, n;

    oshmem_shmem_barrier();

    if (!mca_spml_ucx.ucp_peers) {
        return OSHMEM_SUCCESS;
    }

    max_reqs = mca_spml_ucx.num_disconnect;
    if (max_reqs > nprocs) {
        max_reqs = nprocs;
    }

    dreqs = malloc(sizeof(*dreqs) * max_reqs);
    if (dreqs == NULL) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    num_reqs = 0;

    for (i = 0; i < nprocs; ++i) {
        n  = (i + my_rank) % nprocs;
        ep = mca_spml_ucx.ucp_peers[n].ucp_conn;
        if (ep == NULL) {
            continue;
        }

        SPML_VERBOSE(10, "disconnecting from peer %d", n);
        dreq = ucp_disconnect_nb(ep);
        if (dreq != NULL) {
            if (UCS_PTR_IS_ERR(dreq)) {
                SPML_ERROR("ucp_disconnect_nb(%d) failed: %s", n,
                           ucs_status_string(UCS_PTR_STATUS(dreq)));
            } else {
                dreqs[num_reqs++] = dreq;
            }
        }

        mca_spml_ucx.ucp_peers[n].ucp_conn = NULL;

        if ((int)num_reqs >= mca_spml_ucx.num_disconnect) {
            mca_spml_ucx_waitall(dreqs, &num_reqs);
        }
    }

    mca_spml_ucx_waitall(dreqs, &num_reqs);
    free(dreqs);

    opal_pmix.fence(NULL, 0);
    free(mca_spml_ucx.ucp_peers);
    return OSHMEM_SUCCESS;
}
コード例 #10
0
ファイル: spml_ucx.c プロジェクト: 00datman/ompi
static void dump_address(int pe, char *addr, size_t len)
{
#ifdef SPML_UCX_DEBUG
    int my_rank = oshmem_my_proc_id();
    unsigned  i;

    printf("me=%d dest_pe=%d addr=%p len=%d\n", my_rank, pe, addr, len);
    for (i = 0; i < len; i++) {
        printf("%02X ", (unsigned)0xFF&addr[i]);
    }
    printf("\n");
#endif
}
コード例 #11
0
ファイル: memheap_base_mkey.c プロジェクト: urids/XSCALAMPI
sshmem_mkey_t * mca_memheap_base_get_cached_mkey(int pe,
                                                   void* va,
                                                   int btl_id,
                                                   void** rva)
{
    map_segment_t *s;
    int rc;
    sshmem_mkey_t *mkey;

    MEMHEAP_VERBOSE_FASTPATH(10, "rkey: pe=%d va=%p", pe, va);
    s = __find_va(va);
    if (NULL == s)
        return NULL ;

    if (!MAP_SEGMENT_IS_VALID(s))
        return NULL ;

    if (pe == oshmem_my_proc_id()) {
        *rva = va;
        MEMHEAP_VERBOSE_FASTPATH(10, "rkey: pe=%d va=%p -> (local) %lx %p", pe, va,
                s->mkeys[btl_id].u.key, *rva);
        return &s->mkeys[btl_id];
    }

    if (OPAL_LIKELY(s->mkeys_cache[pe])) {
        mkey = &s->mkeys_cache[pe][btl_id];
        *rva = va2rva(va, s->seg_base_addr, mkey->va_base);
        MEMHEAP_VERBOSE_FASTPATH(10, "rkey: pe=%d va=%p -> (cached) %lx %p", pe, (void *)va, mkey->u.key, (void *)*rva);
        return mkey;
    }

    s->mkeys_cache[pe] = (sshmem_mkey_t *) calloc(memheap_map->num_transports,
                                                    sizeof(sshmem_mkey_t));
    if (!s->mkeys_cache[pe])
        return NULL ;

    rc = memheap_oob_get_mkeys(pe,
                               s - memheap_map->mem_segs,
                               s->mkeys_cache[pe]);
    if (OSHMEM_SUCCESS != rc)
        return NULL ;

    mkey = &s->mkeys_cache[pe][btl_id];
    *rva = va2rva(va, s->seg_base_addr, mkey->va_base);

    MEMHEAP_VERBOSE_FASTPATH(5, "rkey: pe=%d va=%p -> (remote lookup) %lx %p", pe, (void *)va, mkey->u.key, (void *)*rva);
    return mkey;
}
コード例 #12
0
ファイル: atomic_basic_module.c プロジェクト: wuqunyong/ompi
void atomic_basic_lock(int pe)
{
    int index = -1;
    int me = oshmem_my_proc_id();
    int num_pe = oshmem_num_procs();
    char lock_required = ATOMIC_LOCK_WAITING;
    char lock_active = ATOMIC_LOCK_ACTIVE;
    int root_pe = pe;

    do {
        /* announce that we need the resource */
        do {
            MCA_SPML_CALL(put((void*)(atomic_lock_sync + me), sizeof(lock_required), (void*)&lock_required, root_pe));
            MCA_SPML_CALL(get((void*)atomic_lock_sync, num_pe * sizeof(*atomic_lock_sync), (void*)local_lock_sync, root_pe));
        } while (local_lock_sync[me] != lock_required);

        MCA_SPML_CALL(get((void*)atomic_lock_turn, sizeof(index), (void*)&index, root_pe));
        while (index != me) {
            if (local_lock_sync[index] != ATOMIC_LOCK_IDLE) {
                MCA_SPML_CALL(get((void*)atomic_lock_turn, sizeof(index), (void*)&index, root_pe));
                MCA_SPML_CALL(get((void*)atomic_lock_sync, num_pe * sizeof(*atomic_lock_sync), (void*)local_lock_sync, root_pe));
            } else {
                index = (index + 1) % num_pe;
            }
        }

        /* now tentatively claim the resource */
        do {
            MCA_SPML_CALL(put((void*)(atomic_lock_sync + me), sizeof(lock_active), (void*)&lock_active, root_pe));
            MCA_SPML_CALL(get((void*)atomic_lock_sync, num_pe * sizeof(*atomic_lock_sync), (void*)local_lock_sync, root_pe));
        } while (local_lock_sync[me] != lock_active);

        index = 0;
        while ((index < num_pe)
                && ((index == me)
                        || (local_lock_sync[index] != ATOMIC_LOCK_ACTIVE))) {
            index = index + 1;
        }

        MCA_SPML_CALL(get((void*)atomic_lock_turn, sizeof(*atomic_lock_turn), (void*)local_lock_turn, root_pe));
    } while (!((index >= num_pe)
            && ((*local_lock_turn == me)
                    || (local_lock_sync[*local_lock_turn] == ATOMIC_LOCK_IDLE))));

    MCA_SPML_CALL(put((void*)atomic_lock_turn, sizeof(me), (void*)&me, root_pe));
}
コード例 #13
0
ファイル: memheap_base_mkey.c プロジェクト: anhzhang/ompi
uint64_t mca_memheap_base_find_offset(int pe,
                                      int tr_id,
                                      void* va,
                                      void* rva)
{
    map_segment_t *s;
    int my_pe = oshmem_my_proc_id();

    s = memheap_find_va(va);

    if (my_pe == pe) {
        return (uintptr_t)va - (uintptr_t)s->seg_base_addr;
    }
    else {
        return ((s && MAP_SEGMENT_IS_VALID(s)) ? ((uintptr_t)rva - (uintptr_t)(s->mkeys_cache[pe][tr_id].va_base)) : 0);
    }
}
コード例 #14
0
static int create_ptl_idx(int dst_pe)
{
    ompi_proc_t *proc;

    proc = oshmem_proc_group_find(oshmem_group_all, dst_pe);

    OSHMEM_PROC_DATA(proc)->transport_ids = (char *) malloc(MXM_PTL_LAST * sizeof(char));
    if (NULL == OSHMEM_PROC_DATA(proc)->transport_ids)
        return OSHMEM_ERROR;

    OSHMEM_PROC_DATA(proc)->num_transports = 1;
#if MXM_API < MXM_VERSION(2,0)
    if (oshmem_my_proc_id() == dst_pe)
        OSHMEM_PROC_DATA(proc)->transport_ids[0] = MXM_PTL_SELF;
    else
#endif
        OSHMEM_PROC_DATA(proc)->transport_ids[0] = MXM_PTL_RDMA;
    return OSHMEM_SUCCESS;
}
コード例 #15
0
ファイル: spml_ucx.c プロジェクト: 00datman/ompi
int mca_spml_ucx_del_procs(oshmem_proc_t** procs, size_t nprocs)
{
    size_t i, n;
    int my_rank = oshmem_my_proc_id();

    oshmem_shmem_barrier();

    if (!mca_spml_ucx.ucp_peers) {
        return OSHMEM_SUCCESS;
    }

     for (n = 0; n < nprocs; n++) {
         i = (my_rank + n) % nprocs;
         if (mca_spml_ucx.ucp_peers[i].ucp_conn) {
             ucp_ep_destroy(mca_spml_ucx.ucp_peers[i].ucp_conn);
         }
     }

    free(mca_spml_ucx.ucp_peers);
    return OSHMEM_SUCCESS;
}
コード例 #16
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
int mca_spml_ucx_del_procs(ompi_proc_t** procs, size_t nprocs)
{
    opal_common_ucx_del_proc_t *del_procs;
    size_t i;
    int ret;

    oshmem_shmem_barrier();

    if (!mca_spml_ucx_ctx_default.ucp_peers) {
        return OSHMEM_SUCCESS;
    }

    del_procs = malloc(sizeof(*del_procs) * nprocs);
    if (del_procs == NULL) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    for (i = 0; i < nprocs; ++i) {
        del_procs[i].ep   = mca_spml_ucx_ctx_default.ucp_peers[i].ucp_conn;
        del_procs[i].vpid = i;

        /* mark peer as disconnected */
        mca_spml_ucx_ctx_default.ucp_peers[i].ucp_conn = NULL;
    }

    ret = opal_common_ucx_del_procs(del_procs, nprocs, oshmem_my_proc_id(),
                                    mca_spml_ucx.num_disconnect,
                                    mca_spml_ucx_ctx_default.ucp_worker);

    free(del_procs);
    free(mca_spml_ucx.remote_addrs_tbl);
    free(mca_spml_ucx_ctx_default.ucp_peers);

    mca_spml_ucx_ctx_default.ucp_peers = NULL;

    opal_common_ucx_mca_proc_added();

    return ret;
}
コード例 #17
0
ファイル: atomic_basic_module.c プロジェクト: wuqunyong/ompi
void atomic_basic_unlock(int pe)
{
    int index = -1;
    int me = oshmem_my_proc_id();
    int num_pe = oshmem_num_procs();
    char lock_idle = ATOMIC_LOCK_IDLE;
    int root_pe = pe;

    MCA_SPML_CALL(get((void*)atomic_lock_sync, num_pe * sizeof(*atomic_lock_sync), (void*)local_lock_sync, root_pe));
    MCA_SPML_CALL(get((void*)atomic_lock_turn, sizeof(index), (void*)&index, root_pe));

    do {
        index = (index + 1) % num_pe;
    } while (local_lock_sync[index] == ATOMIC_LOCK_IDLE);

    MCA_SPML_CALL(put((void*)atomic_lock_turn, sizeof(index), (void*)&index, root_pe));

    do {
        MCA_SPML_CALL(put((void*)(atomic_lock_sync + me), sizeof(lock_idle), (void*)&lock_idle, root_pe));
        MCA_SPML_CALL(get((void*)atomic_lock_sync, num_pe * sizeof(*atomic_lock_sync), (void*)local_lock_sync, root_pe));
    } while (local_lock_sync[me] != lock_idle);
}
コード例 #18
0
ファイル: memheap_base_mkey.c プロジェクト: anhzhang/ompi
void mca_memheap_modex_recv_all(void)
{
    int i;
    int j;
    int nprocs, my_pe;
    opal_buffer_t *msg = NULL;
    void *send_buffer = NULL;
    char *rcv_buffer = NULL;
    int size;
    int *rcv_size = NULL;
    int *rcv_n_transports = NULL;
    int *rcv_offsets = NULL;
    int rc = OSHMEM_SUCCESS;
    size_t buffer_size;

    if (!mca_memheap_base_key_exchange) {
        oshmem_shmem_barrier();
        return;
    }

    nprocs = oshmem_num_procs();
    my_pe = oshmem_my_proc_id();

    /* buffer allocation for num_transports
     * message sizes and offsets */

    rcv_size = (int *)malloc(nprocs * sizeof(int));
    if (NULL == rcv_size) {
        MEMHEAP_ERROR("failed to get rcv_size buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    rcv_offsets = (int *)malloc(nprocs * sizeof(int));
    if (NULL == rcv_offsets) {
        MEMHEAP_ERROR("failed to get rcv_offsets buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    rcv_n_transports = (int *)malloc(nprocs * sizeof(int));
    if (NULL == rcv_offsets) {
        MEMHEAP_ERROR("failed to get rcv_offsets buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    /* serialize our own mkeys */
    msg = OBJ_NEW(opal_buffer_t);
    if (NULL == msg) {
        MEMHEAP_ERROR("failed to get msg buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    for (j = 0; j < memheap_map->n_segments; j++) {
        pack_local_mkeys(msg, 0, j, 1);
    }

    /* we assume here that int32_t returned by opal_dss.unload
     * is equal to size of int we use for MPI_Allgather, MPI_Allgatherv */

    assert(sizeof(int32_t) == sizeof(int));

    /* Do allgather */
    opal_dss.unload(msg, &send_buffer, &size);
    MEMHEAP_VERBOSE(1, "local keys packed into %d bytes, %d segments", size, memheap_map->n_segments);

    /* we need to send num_transports and message sizes separately
     * since message sizes depend on types of btl used */

    rc = oshmem_shmem_allgather(&memheap_map->num_transports, rcv_n_transports, sizeof(int));
    if (MPI_SUCCESS != rc) {
        MEMHEAP_ERROR("allgather failed");
        goto exit_fatal;
    }

    rc = oshmem_shmem_allgather(&size, rcv_size, sizeof(int));
    if (MPI_SUCCESS != rc) {
        MEMHEAP_ERROR("allgather failed");
        goto exit_fatal;
    }

    /* calculating offsets (displacements) for allgatherv */

    rcv_offsets[0] = 0;
    for (i = 1; i < nprocs; i++) {
        rcv_offsets[i] = rcv_offsets[i - 1] + rcv_size[i - 1];
    }

    buffer_size = rcv_offsets[nprocs - 1] + rcv_size[nprocs - 1];

    rcv_buffer = malloc (buffer_size);
    if (NULL == rcv_buffer) {
        MEMHEAP_ERROR("failed to allocate recieve buffer");
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto exit_fatal;
    }

    rc = oshmem_shmem_allgatherv(send_buffer, rcv_buffer, size, rcv_size, rcv_offsets);
    if (MPI_SUCCESS != rc) {
        free (rcv_buffer);
        MEMHEAP_ERROR("allgatherv failed");
        goto exit_fatal;
    }

    opal_dss.load(msg, rcv_buffer, buffer_size);

    /* deserialize mkeys */
    OPAL_THREAD_LOCK(&memheap_oob.lck);
    for (i = 0; i < nprocs; i++) {
        if (i == my_pe) {
            continue;
        }

        msg->unpack_ptr = (void *)((intptr_t) msg->base_ptr + rcv_offsets[i]);

        for (j = 0; j < memheap_map->n_segments; j++) {
            map_segment_t *s;

            s = &memheap_map->mem_segs[j];
            if (NULL != s->mkeys_cache[i]) {
                MEMHEAP_VERBOSE(10, "PE%d: segment%d already exists, mkey will be replaced", i, j);
            } else {
                s->mkeys_cache[i] = (sshmem_mkey_t *) calloc(rcv_n_transports[i],
                        sizeof(sshmem_mkey_t));
                if (NULL == s->mkeys_cache[i]) {
                    MEMHEAP_ERROR("PE%d: segment%d: Failed to allocate mkeys cache entry", i, j);
                    oshmem_shmem_abort(-1);
                }
            }
            memheap_oob.mkeys = s->mkeys_cache[i];
            unpack_remote_mkeys(msg, i);
        }
    }

    OPAL_THREAD_UNLOCK(&memheap_oob.lck);

exit_fatal:
    if (rcv_size) {
        free(rcv_size);
    }
    if (rcv_offsets) {
        free(rcv_offsets);
    }
    if (rcv_n_transports) {
        free(rcv_n_transports);
    }
    if (send_buffer) {
        free(send_buffer);
    }
    if (msg) {
        OBJ_RELEASE(msg);
    }

    /* This function requires abort in any error case */
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
}
コード例 #19
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
sshmem_mkey_t *mca_spml_ucx_register(void* addr,
                                         size_t size,
                                         uint64_t shmid,
                                         int *count)
{
    sshmem_mkey_t *mkeys;
    ucs_status_t status;
    spml_ucx_mkey_t   *ucx_mkey;
    size_t len;
    ucp_mem_map_params_t mem_map_params;
    int segno;
    map_segment_t *mem_seg;
    unsigned flags;
    int my_pe = oshmem_my_proc_id();

    *count = 0;
    mkeys = (sshmem_mkey_t *) calloc(1, sizeof(*mkeys));
    if (!mkeys) {
        return NULL;
    }

    segno   = memheap_find_segnum(addr);
    mem_seg = memheap_find_seg(segno);

    ucx_mkey = &mca_spml_ucx_ctx_default.ucp_peers[my_pe].mkeys[segno].key;
    mkeys[0].spml_context = ucx_mkey;

    /* if possible use mem handle already created by ucx allocator */
    if (MAP_SEGMENT_ALLOC_UCX != mem_seg->type) {
        flags = 0;
        if (mca_spml_ucx.heap_reg_nb && memheap_is_va_in_segment(addr, HEAP_SEG_INDEX)) {
            flags = UCP_MEM_MAP_NONBLOCK;
        }

        mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
                                    UCP_MEM_MAP_PARAM_FIELD_LENGTH |
                                    UCP_MEM_MAP_PARAM_FIELD_FLAGS;
        mem_map_params.address    = addr;
        mem_map_params.length     = size;
        mem_map_params.flags      = flags;

        status = ucp_mem_map(mca_spml_ucx.ucp_context, &mem_map_params, &ucx_mkey->mem_h);
        if (UCS_OK != status) {
            goto error_out;
        }

    } else {
        ucx_mkey->mem_h = (ucp_mem_h)mem_seg->context;
    }

    status = ucp_rkey_pack(mca_spml_ucx.ucp_context, ucx_mkey->mem_h, 
                           &mkeys[0].u.data, &len); 
    if (UCS_OK != status) {
        goto error_unmap;
    }
    if (len >= 0xffff) {
        SPML_UCX_ERROR("packed rkey is too long: %llu >= %d",
                (unsigned long long)len,
                0xffff);
        oshmem_shmem_abort(-1);
    }

    status = ucp_ep_rkey_unpack(mca_spml_ucx_ctx_default.ucp_peers[oshmem_group_self->my_pe].ucp_conn,
                                mkeys[0].u.data,
                                &ucx_mkey->rkey);
    if (UCS_OK != status) {
        SPML_UCX_ERROR("failed to unpack rkey");
        goto error_unmap;
    }

    mkeys[0].len     = len;
    mkeys[0].va_base = addr;
    *count = 1;
    mca_spml_ucx_cache_mkey(&mca_spml_ucx_ctx_default, &mkeys[0], segno, my_pe);
    return mkeys;

error_unmap:
    ucp_mem_unmap(mca_spml_ucx.ucp_context, ucx_mkey->mem_h);
error_out:
    free(mkeys);

    return NULL ;
}
コード例 #20
0
ファイル: spml_ikrit.c プロジェクト: gpaulsen/ompi
sshmem_mkey_t *mca_spml_ikrit_register(void* addr,
                                         size_t size,
                                         uint64_t shmid,
                                         int *count)
{
    int i;
    sshmem_mkey_t *mkeys;
    mxm_error_t err;
    mxm_mem_key_t *m_key;
    int my_rank = oshmem_my_proc_id();

    *count = 0;
    mkeys = (sshmem_mkey_t *) calloc(1, MXM_PTL_LAST * sizeof(*mkeys));
    if (!mkeys) {
        return NULL ;
    }

    for (i = 0; i < MXM_PTL_LAST; i++) {
        mkeys[i].u.key = MAP_SEGMENT_SHM_INVALID;
        switch (i) {
        case MXM_PTL_SHM:
            if ((int)shmid != MAP_SEGMENT_SHM_INVALID) {
                mkeys[i].u.key = shmid;
                mkeys[i].va_base = 0;
            } else {
                mkeys[i].len = 0;
                mkeys[i].va_base = addr;
            }
            mkeys[i].spml_context = 0;
            break;
        case MXM_PTL_RDMA:
            mkeys[i].va_base = addr;
            mkeys[i].spml_context = 0;

            if (mca_spml_ikrit.ud_only) {
                mkeys[i].len = 0;
                break;
            }

            err = mxm_mem_map(mca_spml_ikrit.mxm_context, &addr, &size, 0, 0, 0);
            if (MXM_OK != err) {
                SPML_ERROR("Failed to register memory: %s", mxm_error_string(err));
                goto error_out;
            }
            mkeys[i].spml_context = (void *)(unsigned long)size;

            m_key = malloc(sizeof(*m_key));
            if (NULL == m_key) {
                SPML_ERROR("Failed to allocate m_key memory");
                goto error_out;
            }
            mkeys[i].len = sizeof(*m_key);
            mkeys[i].u.data = m_key;

            err = mxm_mem_get_key(mca_spml_ikrit.mxm_context, addr, m_key);
            if (MXM_OK != err) {
                SPML_ERROR("Failed to get memory key: %s", mxm_error_string(err));
                goto error_out;
            }
            break;

        default:
            SPML_ERROR("unsupported PTL: %d", i);
            goto error_out;
        }
        SPML_VERBOSE(5,
                     "rank %d ptl %d addr %p size %llu %s",
                     my_rank, i, addr, (unsigned long long)size,
                     mca_spml_base_mkey2str(&mkeys[i]));

        mca_spml_ikrit_cache_mkeys(&mkeys[i], memheap_find_segnum(addr), my_rank, i);
    }
    *count = MXM_PTL_LAST;

    return mkeys;

error_out:
    mca_spml_ikrit_deregister(mkeys);

    return NULL;
}
コード例 #21
0
ファイル: spml_ikrit.c プロジェクト: gpaulsen/ompi
int mca_spml_ikrit_add_procs(ompi_proc_t** procs, size_t nprocs)
{
    spml_ikrit_mxm_ep_conn_info_t *ep_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t *ep_hw_rdma_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t my_ep_info;
    size_t mxm_addr_len = MXM_MAX_ADDR_LEN;
    mxm_error_t err;
    size_t i, n;
    int rc = OSHMEM_ERROR;
    ompi_proc_t *proc_self;
    int my_rank = oshmem_my_proc_id();

    OBJ_CONSTRUCT(&mca_spml_ikrit.active_peers, opal_list_t);
    /* Allocate connection requests */
    ep_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
    if (NULL == ep_info) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

    if (mca_spml_ikrit.hw_rdma_channel) {
        ep_hw_rdma_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
        if (NULL == ep_hw_rdma_info) {
            rc = OSHMEM_ERR_OUT_OF_RESOURCE;
            goto bail;
        }
    }

    mca_spml_ikrit.mxm_peers = (mxm_peer_t *) calloc(nprocs , sizeof(mxm_peer_t));
    if (NULL == mca_spml_ikrit.mxm_peers) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

    memset(&my_ep_info, 0, sizeof(my_ep_info));

    if (mca_spml_ikrit.hw_rdma_channel) {
        err = mxm_ep_get_address(mca_spml_ikrit.mxm_hw_rdma_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
        if (MXM_OK != err) {
            orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                    mxm_error_string(err));
            rc = OSHMEM_ERROR;
            goto bail;
        }
        oshmem_shmem_allgather(&my_ep_info, ep_hw_rdma_info,
                sizeof(spml_ikrit_mxm_ep_conn_info_t));
    }
    err = mxm_ep_get_address(mca_spml_ikrit.mxm_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
    if (MXM_OK != err) {
        orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                mxm_error_string(err));
        rc = OSHMEM_ERROR;
        goto bail;
    }

    oshmem_shmem_allgather(&my_ep_info, ep_info,
                           sizeof(spml_ikrit_mxm_ep_conn_info_t));

    opal_progress_register(spml_ikrit_progress);

    /* Get the EP connection requests for all the processes from modex */
    for (n = 0; n < nprocs; ++n) {

        /* mxm 2.0 keeps its connections on a list. Make sure
         * that list have different order on every rank */
        i = (my_rank + n) % nprocs;
        mxm_peer_construct(&mca_spml_ikrit.mxm_peers[i]);

        err = mxm_ep_connect(mca_spml_ikrit.mxm_ep, ep_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i].mxm_conn);
        if (MXM_OK != err) {
            SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
            goto bail;
        }
        mxm_conn_ctx_set(mca_spml_ikrit.mxm_peers[i].mxm_conn, &mca_spml_ikrit.mxm_peers[i]);
        if (mca_spml_ikrit.hw_rdma_channel) {
            err = mxm_ep_connect(mca_spml_ikrit.mxm_hw_rdma_ep, ep_hw_rdma_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i].mxm_hw_rdma_conn);
            if (MXM_OK != err) {
                SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
                goto bail;
            }
        } else {
            mca_spml_ikrit.mxm_peers[i].mxm_hw_rdma_conn = mca_spml_ikrit.mxm_peers[i].mxm_conn;
        }
    }

    if (ep_info)
        free(ep_info);
    if (ep_hw_rdma_info)
        free(ep_hw_rdma_info);

    if (mca_spml_ikrit.bulk_connect) {
        /* Need a barrier to ensure remote peers already created connection */
        oshmem_shmem_barrier();
        mxm_ep_wireup(mca_spml_ikrit.mxm_ep);
    }

    proc_self = oshmem_proc_group_find(oshmem_group_all, my_rank);
    /* identify local processes and change transport to SHM */
    for (i = 0; i < nprocs; i++) {
        if (procs[i]->super.proc_name.jobid != proc_self->super.proc_name.jobid ||
            !OPAL_PROC_ON_LOCAL_NODE(procs[i]->super.proc_flags)) {
            continue;
        }
        if (procs[i] == proc_self)
            continue;

        /* use zcopy for put/get via sysv shared memory with fallback to RDMA */
        mca_spml_ikrit.mxm_peers[i].ptl_id = MXM_PTL_SHM;
    }

    SPML_VERBOSE(50, "*** ADDED PROCS ***");
    return OSHMEM_SUCCESS;

bail:
	if (ep_info)
		free(ep_info);
	if (ep_hw_rdma_info)
		free(ep_hw_rdma_info);
    SPML_ERROR("add procs FAILED rc=%d", rc);

    return rc;

}
コード例 #22
0
ファイル: spml_ucx.c プロジェクト: davideberius/ompi
int mca_spml_ucx_ctx_create(long options, shmem_ctx_t *ctx)
{
    mca_spml_ucx_ctx_t *ucx_ctx;
    ucp_worker_params_t params;
    ucp_ep_params_t ep_params;
    size_t i, j, nprocs = oshmem_num_procs();
    ucs_status_t err;
    int my_pe = oshmem_my_proc_id();
    size_t len;
    spml_ucx_mkey_t *ucx_mkey;
    sshmem_mkey_t *mkey;
    int rc = OSHMEM_ERROR;

    ucx_ctx = malloc(sizeof(mca_spml_ucx_ctx_t));
    ucx_ctx->options = options;

    params.field_mask  = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
    if (oshmem_mpi_thread_provided == SHMEM_THREAD_SINGLE || options & SHMEM_CTX_PRIVATE || options & SHMEM_CTX_SERIALIZED) {
        params.thread_mode = UCS_THREAD_MODE_SINGLE;
    } else {
        params.thread_mode = UCS_THREAD_MODE_MULTI;
    }

    err = ucp_worker_create(mca_spml_ucx.ucp_context, &params,
                            &ucx_ctx->ucp_worker);
    if (UCS_OK != err) {
        free(ucx_ctx);
        return OSHMEM_ERROR;
    }

    ucx_ctx->ucp_peers = (ucp_peer_t *) calloc(nprocs, sizeof(*(ucx_ctx->ucp_peers)));
    if (NULL == ucx_ctx->ucp_peers) {
        goto error;
    }

    if (mca_spml_ucx.active_array.ctxs_count == 0) {
        opal_progress_register(spml_ucx_ctx_progress);
    }

    for (i = 0; i < nprocs; i++) {
        ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
        ep_params.address    = (ucp_address_t *)(mca_spml_ucx.remote_addrs_tbl[i]);
        err = ucp_ep_create(ucx_ctx->ucp_worker, &ep_params,
                            &ucx_ctx->ucp_peers[i].ucp_conn);
        if (UCS_OK != err) {
            SPML_ERROR("ucp_ep_create(proc=%d/%d) failed: %s", i, nprocs,
                       ucs_status_string(err));
            goto error2;
        }

        for (j = 0; j < MCA_MEMHEAP_SEG_COUNT; j++) {
            mkey = &memheap_map->mem_segs[j].mkeys_cache[i][0];
            ucx_mkey = &ucx_ctx->ucp_peers[i].mkeys[j].key;
            err = ucp_ep_rkey_unpack(ucx_ctx->ucp_peers[i].ucp_conn,
                                     mkey->u.data,
                                     &ucx_mkey->rkey);
            if (UCS_OK != err) {
                SPML_UCX_ERROR("failed to unpack rkey");
                goto error2;
            }
            mca_spml_ucx_cache_mkey(ucx_ctx, mkey, j, i);
        }
    }

    SHMEM_MUTEX_LOCK(mca_spml_ucx.internal_mutex);
    _ctx_add(&mca_spml_ucx.active_array, ucx_ctx);
    SHMEM_MUTEX_UNLOCK(mca_spml_ucx.internal_mutex);

    (*ctx) = (shmem_ctx_t)ucx_ctx;
    return OSHMEM_SUCCESS;

 error2:
    for (i = 0; i < nprocs; i++) {
        if (ucx_ctx->ucp_peers[i].ucp_conn) {
            ucp_ep_destroy(ucx_ctx->ucp_peers[i].ucp_conn);
        }
    }

    if (ucx_ctx->ucp_peers)
        free(ucx_ctx->ucp_peers);

 error:
    ucp_worker_destroy(ucx_ctx->ucp_worker);
    free(ucx_ctx);
    rc = OSHMEM_ERR_OUT_OF_RESOURCE;
    SPML_ERROR("ctx create FAILED rc=%d", rc);
    return rc;
}
コード例 #23
0
int mca_spml_ikrit_add_procs(ompi_proc_t** procs, size_t nprocs)
{
    spml_ikrit_mxm_ep_conn_info_t *ep_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t *ep_hw_rdma_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t my_ep_info = {{0}};
#if MXM_API < MXM_VERSION(2,0)
    mxm_conn_req_t *conn_reqs;
    int timeout;
#else
    size_t mxm_addr_len = MXM_MAX_ADDR_LEN;
#endif
    mxm_error_t err;
    size_t i, n;
    int rc = OSHMEM_ERROR;
    ompi_proc_t *proc_self;
    int my_rank = oshmem_my_proc_id();

    OBJ_CONSTRUCT(&mca_spml_ikrit.active_peers, opal_list_t);
    /* Allocate connection requests */
#if MXM_API < MXM_VERSION(2,0)
    conn_reqs = malloc(nprocs * sizeof(mxm_conn_req_t));
    if (NULL == conn_reqs) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }
    memset(conn_reqs, 0x0, sizeof(mxm_conn_req_t));
#endif
    ep_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
    if (NULL == ep_info) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

    if (mca_spml_ikrit.hw_rdma_channel) {
        ep_hw_rdma_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
        if (NULL == ep_hw_rdma_info) {
            rc = OSHMEM_ERR_OUT_OF_RESOURCE;
            goto bail;
        }
    }

    mca_spml_ikrit.mxm_peers = (mxm_peer_t **) malloc(nprocs
            * sizeof(*(mca_spml_ikrit.mxm_peers)));
    if (NULL == mca_spml_ikrit.mxm_peers) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

#if MXM_API < MXM_VERSION(2,0)
    if (OSHMEM_SUCCESS
            != spml_ikrit_get_ep_address(&my_ep_info, MXM_PTL_SELF)) {
        rc = OSHMEM_ERROR;
        goto bail;
    }
    if (OSHMEM_SUCCESS
            != spml_ikrit_get_ep_address(&my_ep_info, MXM_PTL_RDMA)) {
        rc = OSHMEM_ERROR;
        goto bail;
    }
#else
    if (mca_spml_ikrit.hw_rdma_channel) {
        err = mxm_ep_get_address(mca_spml_ikrit.mxm_hw_rdma_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
        if (MXM_OK != err) {
            orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                    mxm_error_string(err));
            rc = OSHMEM_ERROR;
            goto bail;
        }
        oshmem_shmem_allgather(&my_ep_info, ep_hw_rdma_info,
                sizeof(spml_ikrit_mxm_ep_conn_info_t));
    }
    err = mxm_ep_get_address(mca_spml_ikrit.mxm_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
    if (MXM_OK != err) {
        orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                mxm_error_string(err));
        rc = OSHMEM_ERROR;
        goto bail;
    }
#endif
    oshmem_shmem_allgather(&my_ep_info, ep_info,
                           sizeof(spml_ikrit_mxm_ep_conn_info_t));

    opal_progress_register(spml_ikrit_progress);

    /* Get the EP connection requests for all the processes from modex */
    for (n = 0; n < nprocs; ++n) {

        /* mxm 2.0 keeps its connections on a list. Make sure
         * that list have different order on every rank */
        i = (my_rank + n) % nprocs;
        mca_spml_ikrit.mxm_peers[i] = OBJ_NEW(mxm_peer_t);
        if (NULL == mca_spml_ikrit.mxm_peers[i]) {
            rc = OSHMEM_ERR_OUT_OF_RESOURCE;
            goto bail;
        }
        mca_spml_ikrit.mxm_peers[i]->pe = i;

#if MXM_API < MXM_VERSION(2,0)
        conn_reqs[i].ptl_addr[MXM_PTL_SELF] =
                (struct sockaddr *) &ep_info[i].addr.ptl_addr[MXM_PTL_SELF];
        conn_reqs[i].ptl_addr[MXM_PTL_SHM] = NULL;
        conn_reqs[i].ptl_addr[MXM_PTL_RDMA] =
                (struct sockaddr *) &ep_info[i].addr.ptl_addr[MXM_PTL_RDMA];
#else
        err = mxm_ep_connect(mca_spml_ikrit.mxm_ep, ep_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i]->mxm_conn);
        if (MXM_OK != err) {
            SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
            goto bail;
        }
        if (OSHMEM_SUCCESS != create_ptl_idx(i))
                goto bail;
        mxm_conn_ctx_set(mca_spml_ikrit.mxm_peers[i]->mxm_conn, mca_spml_ikrit.mxm_peers[i]);
        if (mca_spml_ikrit.hw_rdma_channel) {
            err = mxm_ep_connect(mca_spml_ikrit.mxm_hw_rdma_ep, ep_hw_rdma_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i]->mxm_hw_rdma_conn);
            if (MXM_OK != err) {
                SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
                goto bail;
            }
        } else {
            mca_spml_ikrit.mxm_peers[i]->mxm_hw_rdma_conn = mca_spml_ikrit.mxm_peers[i]->mxm_conn;
        }
#endif
    }

#if MXM_API < MXM_VERSION(2,0)
    /* Connect to remote peers */
    if (mxm_get_version() < MXM_VERSION(1,5)) {
        timeout = 1000;
    } else {
        timeout = -1;
    }
    err = mxm_ep_connect(mca_spml_ikrit.mxm_ep, conn_reqs, nprocs, timeout);
    if (MXM_OK != err) {
        SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
        for (i = 0; i < nprocs; ++i) {
            if (MXM_OK != conn_reqs[i].error) {
                SPML_ERROR("MXM EP connect to %s error: %s\n",
                           procs[i]->proc_hostname, mxm_error_string(conn_reqs[i].error));
            }
        }
        rc = OSHMEM_ERR_CONNECTION_FAILED;
        goto bail;
    }

    /* Save returned connections */
    for (i = 0; i < nprocs; ++i) {
        mca_spml_ikrit.mxm_peers[i]->mxm_conn = conn_reqs[i].conn;
        if (OSHMEM_SUCCESS != create_ptl_idx(i)) {
            rc = OSHMEM_ERR_CONNECTION_FAILED;
            goto bail;
        }

        mxm_conn_ctx_set(conn_reqs[i].conn, mca_spml_ikrit.mxm_peers[i]);
    }

    if (conn_reqs)
        free(conn_reqs);
#endif
    if (ep_info)
        free(ep_info);
    if (ep_hw_rdma_info)
        free(ep_hw_rdma_info);

#if MXM_API >= MXM_VERSION(2,0)
    if (mca_spml_ikrit.bulk_connect) {
        /* Need a barrier to ensure remote peers already created connection */
        oshmem_shmem_barrier();
        mxm_ep_wireup(mca_spml_ikrit.mxm_ep);
    }
#endif

    proc_self = oshmem_proc_group_find(oshmem_group_all, my_rank);
    /* identify local processes and change transport to SHM */
    for (i = 0; i < nprocs; i++) {
        if (procs[i]->super.proc_name.jobid != proc_self->super.proc_name.jobid ||
            !OPAL_PROC_ON_LOCAL_NODE(procs[i]->super.proc_flags)) {
            continue;
        }
        if (procs[i] == proc_self)
            continue;

        /* use zcopy for put/get via sysv shared memory */
        OSHMEM_PROC_DATA(procs[i])->transport_ids[0] = MXM_PTL_SHM;
        OSHMEM_PROC_DATA(procs[i])->transport_ids[1] = MXM_PTL_RDMA;
        OSHMEM_PROC_DATA(procs[i])->num_transports = 2;
    }

    SPML_VERBOSE(50, "*** ADDED PROCS ***");
    return OSHMEM_SUCCESS;

bail:
#if MXM_API < MXM_VERSION(2,0)
	if (conn_reqs)
		free(conn_reqs);
#endif
	if (ep_info)
		free(ep_info);
	if (ep_hw_rdma_info)
		free(ep_hw_rdma_info);
    SPML_ERROR("add procs FAILED rc=%d", rc);

    return rc;

}
コード例 #24
0
ファイル: spml_yoda.c プロジェクト: 00datman/ompi
static void mca_yoda_get_callback(mca_btl_base_module_t* btl,
                                  mca_btl_base_tag_t tag,
                                  mca_btl_base_descriptor_t* des,
                                  void* cbdata )
{
    void** p, ** p_src, **p_dst;
    size_t* size;
    int* dst;
    void** p_getreq;
    mca_btl_base_descriptor_t* des_loc;
    int rc;
    mca_bml_base_btl_t* bml_btl;
    mca_spml_yoda_rdma_frag_t* frag;
    int btl_id;
    mca_spml_yoda_put_request_t *putreq;

    rc = OSHMEM_SUCCESS;
    btl_id = 0;
    putreq = NULL;

    /* Unpack data */
    p = (void **)des->des_segments->seg_addr.pval;
    p_src = (void*) p;

    size = (size_t*)((char*)p_src + sizeof(*p_src) );
    dst = (int*)( (char*)size + sizeof(*size));
    p_dst = (void*) ((char*)dst + sizeof(*dst));
    p_getreq =(void**) ( (char*)p_dst + sizeof(*p_dst));

    /* Prepare put via send*/
    bml_btl = get_next_btl(*dst, &btl_id);

    putreq = mca_spml_yoda_putreq_alloc(*dst);
    frag = &putreq->put_frag;

    mca_spml_yoda_bml_alloc(bml_btl,
                            &des_loc,
                            MCA_BTL_NO_ORDER,
                            *size,
                            MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                            1);

    if (OPAL_UNLIKELY(!des_loc || !des_loc->des_segments)) {
        SPML_ERROR("shmem OOM error need %d bytes", (int)*size);
        oshmem_shmem_abort(-1);
    }
    spml_yoda_prepare_for_get_response((void*)des_loc->des_segments->seg_addr.pval, *size, (void*)*p_src, (void*) *p_dst,(void*)*p_getreq,1);

    frag->rdma_req = putreq;

    /* Initialize callback data for put*/
    des_loc->des_cbdata = frag;
    des_loc->des_cbfunc = mca_spml_yoda_put_completion;
    des_loc->des_segment_count = 1;

    OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_puts, 1);

    /* Put via send*/
    rc = mca_bml_base_send(bml_btl, des_loc, MCA_SPML_YODA_GET_RESPONSE);
    if (1 == rc) {
        rc = OSHMEM_SUCCESS;
    }

    if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
        if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
            /* No free resources, Block on completion here */
            SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
            oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
        } else {
            SPML_ERROR("shmem error");
        }
        /* exit with errro */
        SPML_ERROR("shmem error: ret = %i, send_pe = %i, dest_pe = %i",
                   rc, oshmem_my_proc_id(), *dst);
        oshmem_shmem_abort(-1);
        rc = OSHMEM_ERROR;
    }
}
コード例 #25
0
ファイル: atomic_mxm_fadd.c プロジェクト: Cai900205/test
int mca_atomic_mxm_fadd(void *target,
                        void *prev,
                        const void *value,
                        size_t nlong,
                        int pe,
                        struct oshmem_op_t *op)
{
    unsigned my_pe;
    uint8_t nlong_order;
    void *remote_addr;
    int ptl_id;
    mxm_send_req_t sreq;
    mxm_error_t mxm_err;
    sshmem_mkey_t *r_mkey;
    static char dummy_buf[8];

    my_pe = oshmem_my_proc_id();
    ptl_id = -1;
    mxm_err = MXM_OK;

    if (!target || !value) {
        ATOMIC_ERROR("[#%d] target or value are not defined", my_pe);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    if ((pe < 0) || (pe >= oshmem_num_procs())) {
        ATOMIC_ERROR("[#%d] PE=%d not valid", my_pe, pe);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    switch (nlong) {
    case 1:
        nlong_order = 0;
        break;
    case 2:
        nlong_order = 1;
        break;
    case 4:
        nlong_order = 2;
        break;
    case 8:
        nlong_order = 3;
        break;
    default:
        ATOMIC_ERROR("[#%d] Type size must be 1/2/4 or 8 bytes.", my_pe);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    ptl_id = oshmem_proc_group_all(pe)->transport_ids[0];
    if (MXM_PTL_SHM == ptl_id) {
        ptl_id = MXM_PTL_RDMA;
    }
    r_mkey = mca_memheap.memheap_get_cached_mkey(pe,
                                             target,
                                             ptl_id,
                                             &remote_addr);
    if (!r_mkey) {
        ATOMIC_ERROR("[#%d] %p is not address of symmetric variable",
                     my_pe, target);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERR_BAD_PARAM;
    }

    /* mxm request init */
    sreq.base.state = MXM_REQ_NEW;
    sreq.base.mq = mca_spml_self->mxm_mq;
    sreq.base.conn = mca_spml_self->mxm_peers[pe]->mxm_hw_rdma_conn;
    sreq.base.completed_cb = NULL;
    sreq.base.data_type = MXM_REQ_DATA_BUFFER;

    sreq.op.atomic.remote_vaddr = (uintptr_t) remote_addr;
#if MXM_API < MXM_VERSION(2,0)
    sreq.op.atomic.remote_memh  = MXM_INVALID_MEM_HANDLE;
    memcpy(&sreq.op.atomic.value8, value, nlong);
#else
    sreq.op.atomic.remote_mkey = to_mxm_mkey(r_mkey);
    memcpy(&sreq.op.atomic.value, value, nlong);
#endif
    sreq.op.atomic.order = nlong_order;

    /* Do we need atomic 'add' or atomic 'fetch and add'? */
    if (NULL == prev) {
        sreq.base.data.buffer.ptr = dummy_buf;
        sreq.base.data.buffer.length = nlong;
        sreq.base.data.buffer.memh = MXM_INVALID_MEM_HANDLE;
#if MXM_API < MXM_VERSION(2,0)
        sreq.base.flags = MXM_REQ_FLAG_SEND_SYNC;
        sreq.opcode = MXM_REQ_OP_ATOMIC_ADD;
#else
        sreq.flags = 0;
        sreq.opcode = MXM_REQ_OP_ATOMIC_FADD;
#endif
    } else {
        sreq.base.data.buffer.ptr = prev;
        sreq.base.data.buffer.length = nlong;
        sreq.base.data.buffer.memh = MXM_INVALID_MEM_HANDLE;
#if MXM_API < MXM_VERSION(2,0)
        sreq.base.flags = 0;
#else
        sreq.flags = 0;
#endif

        sreq.opcode = MXM_REQ_OP_ATOMIC_FADD;
    }

    if (MXM_OK != (mxm_err = mxm_req_send(&sreq))) {
        ATOMIC_ERROR("[#%d] mxm_req_send failed, mxm_error = %d",
                     my_pe, mxm_err);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    mxm_req_wait(&sreq.base);
    if (MXM_OK != sreq.base.error) {
        ATOMIC_ERROR("[#%d] mxm_req_wait got non MXM_OK error: %d",
                     my_pe, sreq.base.error);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    return OSHMEM_SUCCESS;
}
コード例 #26
0
ファイル: spml_ucx.c プロジェクト: 00datman/ompi
int mca_spml_ucx_add_procs(oshmem_proc_t** procs, size_t nprocs)
{
    size_t i, n;
    int rc = OSHMEM_ERROR;
    int my_rank = oshmem_my_proc_id();
    ucs_status_t err;
    ucp_address_t *wk_local_addr;
    size_t wk_addr_len;
    int *wk_roffs, *wk_rsizes;
    char *wk_raddrs;


    mca_spml_ucx.ucp_peers = (ucp_peer_t *) calloc(nprocs, sizeof(*(mca_spml_ucx.ucp_peers)));
    if (NULL == mca_spml_ucx.ucp_peers) {
        goto error;
    }

    err = ucp_worker_get_address(mca_spml_ucx.ucp_worker, &wk_local_addr, &wk_addr_len);
    if (err != UCS_OK) {
        goto error;
    }
    dump_address(my_rank, (char *)wk_local_addr, wk_addr_len);

    rc = oshmem_shmem_xchng(wk_local_addr, wk_addr_len, nprocs,
            (void **)&wk_raddrs, &wk_roffs, &wk_rsizes);
    if (rc != OSHMEM_SUCCESS) {
        goto error;
    }

    opal_progress_register(spml_ucx_progress);

    /* Get the EP connection requests for all the processes from modex */
    for (n = 0; n < nprocs; ++n) {
        i = (my_rank + n) % nprocs;
        dump_address(i, (char *)(wk_raddrs + wk_roffs[i]), wk_rsizes[i]);
        err = ucp_ep_create(mca_spml_ucx.ucp_worker, 
                (ucp_address_t *)(wk_raddrs + wk_roffs[i]),
                &mca_spml_ucx.ucp_peers[i].ucp_conn);
        if (UCS_OK != err) {
            SPML_ERROR("ucp_ep_create failed!!!\n");
            goto error2;
        }
        procs[i]->num_transports = 1;
        procs[i]->transport_ids = spml_ucx_transport_ids;
    }

    ucp_worker_release_address(mca_spml_ucx.ucp_worker, wk_local_addr);
    free(wk_raddrs);
    free(wk_rsizes);
    free(wk_roffs);

    SPML_VERBOSE(50, "*** ADDED PROCS ***");
    return OSHMEM_SUCCESS;

error2:
    for (i = 0; i < nprocs; ++i) {
         if (mca_spml_ucx.ucp_peers[i].ucp_conn) {
             ucp_ep_destroy(mca_spml_ucx.ucp_peers[i].ucp_conn);
         }
    }
    if (mca_spml_ucx.ucp_peers) 
        free(mca_spml_ucx.ucp_peers);
    if (wk_raddrs)
        free(wk_raddrs);
    if (wk_rsizes)
        free(wk_rsizes);
    if (wk_roffs)
        free(wk_roffs);
    if (mca_spml_ucx.ucp_peers)
        free(mca_spml_ucx.ucp_peers);
error:
    rc = OSHMEM_ERR_OUT_OF_RESOURCE;
    SPML_ERROR("add procs FAILED rc=%d", rc);
    return rc;

}
コード例 #27
0
ファイル: spml_yoda.c プロジェクト: 00datman/ompi
/**
 * shmem_get reads data from a remote address
 * in the symmetric heap via RDMA READ.
 * Get operation:
 * 1. Get the rkey to the remote address.
 * 2. Allocate a get request.
 * 3. Allocated a temporary pre-registered buffer
 *    to copy the data to.
 * 4. Init the request descriptor with remote side
 *    data and local side data.
 * 5. Read the remote buffer to a pre-registered
 *    buffer on the local PE using RDMA READ.
 * 6. Copy the received data to dst_addr if an
 *    intermediate pre-register buffer was used.
 * 7. Clear the request and return.
 *
 * src_addr - address on remote pe.
 * size - the amount on bytes to be read.
 * dst_addr - address on the local pe.
 * src - the pe of remote process.
 */
int mca_spml_yoda_get(void* src_addr, size_t size, void* dst_addr, int src)
{
    int rc = OSHMEM_SUCCESS;
    sshmem_mkey_t *r_mkey, *l_mkey;
    void* rva;
    unsigned ncopied = 0;
    unsigned int frag_size = 0;
    char *p_src, *p_dst;
    int i;
    int nfrags;
    mca_bml_base_btl_t* bml_btl = NULL;
    mca_btl_base_segment_t* segment;
    mca_btl_base_descriptor_t* des = NULL;
    mca_spml_yoda_rdma_frag_t* frag = NULL;
    struct mca_spml_yoda_getreq_parent get_holder;
    struct yoda_btl *ybtl;
    int btl_id = 0;
    int get_via_send;
    mca_btl_base_registration_handle_t *local_handle, *remote_handle = NULL;
    mca_spml_yoda_get_request_t* getreq = NULL;

    /*If nothing to get its OK.*/
    if (0 >= size) {
        return rc;
    }

    /* Find bml_btl and its global btl_id */
    bml_btl = get_next_btl(src, &btl_id);
    if (!bml_btl) {
        SPML_ERROR("cannot reach %d pe: no appropriate btl found", oshmem_my_proc_id());
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }
    /* Check if btl has GET method. If it doesn't - use SEND*/
    get_via_send = ! ( (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_GET)) &&
                       (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_PUT)) );

    /* Get rkey of remote PE (src proc) which must be on memheap*/
    r_mkey = mca_memheap_base_get_cached_mkey(src, src_addr, btl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }

#if SPML_YODA_DEBUG == 1
    SPML_VERBOSE(100, "get: pe:%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    ybtl = &mca_spml_yoda.btl_type_map[btl_id];

    if (ybtl->btl->btl_register_mem) {
        assert(ybtl->btl->btl_registration_handle_size == r_mkey->len);
        remote_handle = (mca_btl_base_registration_handle_t *) r_mkey->u.data;
    }

    nfrags = 1;

    /* check if we doing get into shm attached segment and if so
     * just do memcpy
     */
    if ((YODA_BTL_SM == ybtl->btl_type || YODA_BTL_VADER == ybtl->btl_type)
            && mca_memheap_base_can_local_copy(r_mkey, src_addr)) {
        memcpy(dst_addr, (void *) rva, size);
        /* must call progress here to avoid deadlock. Scenarion:
         * pe1 pols pe2 via shm get. pe2 tries to get static variable from node one, which goes to sm btl
         * In this case pe2 is stuck forever because pe1 never calls opal_progress.
         * May be we do not need to call progress on every get() here but rather once in a while.
         */
        opal_progress();
        return OSHMEM_SUCCESS;
    }

    l_mkey = mca_memheap.memheap_get_local_mkey(dst_addr,
                                                btl_id);
    /*
     * Need a copy if local memory has not been registered or
     * we make GET via SEND
     */
    frag_size = ncopied;
    if ((NULL == l_mkey) || get_via_send) {
        calc_nfrags_get (bml_btl, size, &frag_size, &nfrags, get_via_send);
    }

    p_src = (char*) (unsigned long) rva;
    p_dst = (char*) dst_addr;
    get_holder.active_count = 0;

    for (i = 0; i < nfrags; i++) {
        /**
         * Allocating a get request from a pre-allocated
         * and pre-registered free list.
         */
        getreq = mca_spml_yoda_getreq_alloc(src);
        assert(getreq);
        getreq->p_dst = NULL;
        frag = &getreq->get_frag;
        getreq->parent = &get_holder;

        ncopied = i < nfrags - 1 ? frag_size :(unsigned) ((char *) dst_addr + size - p_dst);
        frag->allocated = 0;
        /* Prepare destination descriptor*/
        memcpy(&frag->rdma_segs[0].base_seg,
                r_mkey->u.data,
                r_mkey->len);

        frag->rdma_segs[0].base_seg.seg_len = (get_via_send ? ncopied + SPML_YODA_SEND_CONTEXT_SIZE : ncopied);
        if (get_via_send) {
            frag->use_send = 1;
            frag->allocated = 1;
            /**
             * Allocate a temporary buffer on the local PE.
             * The local buffer will store the data read
             * from the remote address.
             */
            mca_spml_yoda_bml_alloc(bml_btl,
                                    &des,
                                    MCA_BTL_NO_ORDER,
                                    (int)frag_size,
                                    MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                                    get_via_send);
            if (OPAL_UNLIKELY(!des || !des->des_segments)) {
                SPML_ERROR("shmem OOM error need %d bytes", ncopied);
                SPML_ERROR("src=%p nfrags = %d frag_size=%d",
                           src_addr, nfrags, frag_size);
                rc = OSHMEM_ERR_FATAL;
                goto exit_fatal;
            }

            segment = des->des_segments;
            spml_yoda_prepare_for_get((void*)segment->seg_addr.pval, ncopied, (void*)p_src, oshmem_my_proc_id(), (void*)p_dst, (void*) getreq);
            des->des_cbfunc = mca_spml_yoda_get_response_completion;
            des->des_cbdata = frag;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }
        else {
            /*
             * Register src memory if do GET via GET
             */
            if (NULL == l_mkey && ybtl->btl->btl_register_mem) {
                local_handle = ybtl->btl->btl_register_mem (ybtl->btl, bml_btl->btl_endpoint, p_dst, ncopied,
                                                            MCA_BTL_REG_FLAG_LOCAL_WRITE);

                if (NULL == local_handle) {
                    SPML_ERROR("%s: failed to register destination memory %p.",
                               btl_type2str(ybtl->btl_type), p_dst);
                }

                frag->local_handle = local_handle;
            } else {
                local_handle = ((mca_spml_yoda_context_t*)l_mkey->spml_context)->registration;
                frag->local_handle = NULL;
            }

            frag->rdma_segs[0].base_seg.seg_addr.lval = (uintptr_t) p_src;
            getreq->p_dst = (uint64_t*) p_dst;
            frag->size = ncopied;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }

        /**
         * Initialize the remote data fragment
         * with remote address data required for
         * executing RDMA READ from a remote buffer.
         */

        frag->rdma_req = getreq;

        /**
         *  Do GET operation
         */
        if (get_via_send) {
            rc = mca_bml_base_send(bml_btl, des, MCA_SPML_YODA_GET);
            if (1 == rc)
                rc = OSHMEM_SUCCESS;
        } else {
            rc = mca_bml_base_get(bml_btl, p_dst, (uint64_t) (intptr_t) p_src, local_handle,
                                  remote_handle, ncopied, 0, 0, mca_spml_yoda_get_completion, frag);
        }

        if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
            if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
                /* No free resources, Block on completion here */
                oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);
                return OSHMEM_SUCCESS;
            } else {
                SPML_ERROR("oshmem_get: error %d", rc);
                goto exit_fatal;
            }
        }
        p_dst += ncopied;
        p_src += ncopied;
        OPAL_THREAD_ADD32(&get_holder.active_count, 1);
    }

    /* revisit if we really need this for self and sm */
    /* if (YODA_BTL_SELF == ybtl->btl_type) */
    opal_progress();

    /* Wait for completion on request */
    while (get_holder.active_count > 0)
        oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);

    return rc;

exit_fatal:
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
    return rc;
}
コード例 #28
0
mca_spml_mkey_t *mca_spml_yoda_register(void* addr,
                                        size_t size,
                                        uint64_t shmid,
                                        int *count)
{
    int i;
    mca_btl_base_descriptor_t* des = NULL;
    const opal_datatype_t *datatype = &opal_datatype_wchar;
    opal_convertor_t convertor;
    mca_spml_mkey_t *mkeys;
    struct yoda_btl *ybtl;
    oshmem_proc_t *proc_self;
    mca_spml_yoda_context_t* yoda_context;
    struct iovec iov;
    uint32_t iov_count = 1;


    SPML_VERBOSE(10, "address %p len %llu", addr, (unsigned long long)size);
    *count = 0;
    /* make sure everything is initialized to 0 */
    mkeys = (mca_spml_mkey_t *) calloc(1,
                                       mca_spml_yoda.n_btls * sizeof(*mkeys));
    if (!mkeys) {
        return NULL ;
    }

    proc_self = oshmem_proc_group_find(oshmem_group_all, oshmem_my_proc_id());
    /* create convertor */
    OBJ_CONSTRUCT(&convertor, opal_convertor_t);

    mca_bml.bml_register( MCA_SPML_YODA_PUT,
                          mca_yoda_put_callback,
                          NULL );
    mca_bml.bml_register( MCA_SPML_YODA_GET,
                          mca_yoda_get_callback,
                          NULL );
    mca_bml.bml_register( MCA_SPML_YODA_GET_RESPONSE,
                          mca_yoda_get_response_callback,
                          NULL );
    /* Register proc memory in every rdma BTL. */
    for (i = 0; i < mca_spml_yoda.n_btls; i++) {

        ybtl = &mca_spml_yoda.btl_type_map[i];
        mkeys[i].va_base = addr;

        if (!ybtl->use_cnt) {
            SPML_VERBOSE(10,
                         "%s: present but not in use. SKIP registration",
                         btl_type2str(ybtl->btl_type));
            continue;
        }

        /* If we have shared memory just save its id*/
        if (YODA_BTL_SM == ybtl->btl_type
                && MEMHEAP_SHM_INVALID != (int) MEMHEAP_SHM_GET_ID(shmid)) {
            mkeys[i].u.key = shmid;
            mkeys[i].va_base = 0;
            continue;
        }

        yoda_context = calloc(1, sizeof(*yoda_context));
        mkeys[i].spml_context = yoda_context;

        yoda_context->registration = NULL;
        if (NULL != ybtl->btl->btl_prepare_src) {
            /* initialize convertor for source descriptor*/
            opal_convertor_copy_and_prepare_for_recv(proc_self->proc_convertor,
                                                     datatype,
                                                     size,
                                                     addr,
                                                     0,
                                                     &convertor);

            if (NULL != ybtl->btl->btl_mpool && NULL != ybtl->btl->btl_mpool->mpool_register) {
                iov.iov_len = size;
                iov.iov_base = NULL;

                opal_convertor_pack(&convertor, &iov, &iov_count, &size);
                ybtl->btl->btl_mpool->mpool_register(ybtl->btl->btl_mpool,
                                                     iov.iov_base, size, 0, &yoda_context->registration);
            }
            /* initialize convertor for source descriptor*/
            opal_convertor_copy_and_prepare_for_recv(proc_self->proc_convertor,
                                                     datatype,
                                                     size,
                                                     addr,
                                                     0,
                                                     &convertor);

            /* register source memory */
            des = ybtl->btl->btl_prepare_src(ybtl->btl,
                                             0,
                                             yoda_context->registration,
                                             &convertor,
                                             MCA_BTL_NO_ORDER,
                                             0,
                                             &size,
                                             0);
            if (NULL == des) {
                SPML_ERROR("%s: failed to register source memory. ",
                           btl_type2str(ybtl->btl_type));
            }

            yoda_context->btl_src_descriptor = des;
            mkeys[i].u.data = des->des_src;
            mkeys[i].len  = ybtl->btl->btl_seg_size;
        }

        SPML_VERBOSE(5,
                     "rank %d btl %s address 0x%p len %llu shmid 0x%X|0x%X",
                     oshmem_proc_local_proc->proc_name.vpid, btl_type2str(ybtl->btl_type), 
                     mkeys[i].va_base, (unsigned long long)size, MEMHEAP_SHM_GET_TYPE(shmid), MEMHEAP_SHM_GET_ID(shmid));
    }
    OBJ_DESTRUCT(&convertor);
    *count = mca_spml_yoda.n_btls;
    return mkeys;
}
コード例 #29
0
ファイル: spml_yoda.c プロジェクト: 00datman/ompi
static inline int mca_spml_yoda_put_internal(void *dst_addr,
                                             size_t size,
                                             void *src_addr,
                                             int dst,
                                             int is_nb)
{
    int rc = OSHMEM_SUCCESS;
    mca_spml_yoda_put_request_t *putreq = NULL;
    mca_bml_base_btl_t* bml_btl;
    mca_btl_base_descriptor_t* des = NULL;
    mca_btl_base_segment_t* segment;
    mca_spml_yoda_rdma_frag_t* frag;
    int nfrags;
    int i;
    unsigned ncopied = 0;
    unsigned int frag_size = 0;
    char *p_src, *p_dst;
    void* rva;
    sshmem_mkey_t *r_mkey;
    int btl_id = 0;
    struct yoda_btl *ybtl;
    int put_via_send;
    mca_btl_base_registration_handle_t *local_handle = NULL, *remote_handle = NULL;

    /* If nothing to put its OK.*/
    if (0 >= size) {
        return OSHMEM_SUCCESS;
    }

    /* Find bml_btl and its global btl_id */
    bml_btl = get_next_btl(dst, &btl_id);
    if (!bml_btl) {
        SPML_ERROR("cannot reach %d pe: no appropriate btl found", oshmem_my_proc_id());
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }
    /* Check if btl has PUT method. If it doesn't - use SEND*/
    put_via_send = !(bml_btl->btl->btl_flags & MCA_BTL_FLAGS_PUT);

    /* Get rkey of remote PE (dst proc) which must be on memheap*/
    r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, btl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   dst, dst_addr);
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }

#if SPML_YODA_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
                 dst, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    ybtl = &mca_spml_yoda.btl_type_map[btl_id];

    if (ybtl->btl->btl_register_mem) {
        assert (r_mkey->len == ybtl->btl->btl_registration_handle_size);
        remote_handle = (mca_btl_base_registration_handle_t *) r_mkey->u.data;
    }

    /* check if we doing put into shm attached segment and if so
     * just do memcpy
     */
    if ((YODA_BTL_SM == ybtl->btl_type || YODA_BTL_VADER == ybtl->btl_type)
            && mca_memheap_base_can_local_copy(r_mkey, dst_addr)) {
        memcpy((void *) (unsigned long) rva, src_addr, size);
        return OSHMEM_SUCCESS;
    }

    /* We support only blocking PUT now => we always need copy for src buffer*/
    calc_nfrags_put (bml_btl, size, &frag_size, &nfrags, put_via_send);

    p_src = (char*) src_addr;
    p_dst = (char*) (unsigned long) rva;
    for (i = 0; i < nfrags; i++) {
        /* Allocating send request from free list */
        putreq = mca_spml_yoda_putreq_alloc(dst);
        frag = &putreq->put_frag;
        ncopied = i < nfrags - 1 ? frag_size :(unsigned) ((char *) src_addr + size - p_src);

        /* Preparing source buffer */

        /* allocate buffer */
        mca_spml_yoda_bml_alloc(bml_btl,
                                &des,
                                MCA_BTL_NO_ORDER,
                                ncopied,
                                MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                                put_via_send);

        if (OPAL_UNLIKELY(!des || !des->des_segments )) {
            SPML_ERROR("src=%p nfrags = %d frag_size=%d",
                       src_addr, nfrags, frag_size);
            SPML_ERROR("shmem OOM error need %d bytes", ncopied);
            opal_show_help("help-oshmem-spml-yoda.txt",
                           "internal_oom_error",
                           true,
                           "Put", ncopied, mca_spml_yoda.bml_alloc_threshold);
            rc = OSHMEM_ERR_FATAL;
            goto exit_fatal;
        }

        /* copy data to allocated buffer*/
        segment = des->des_segments;
        spml_yoda_prepare_for_put((void*)segment->seg_addr.pval, ncopied,
                                  (void*)p_src, (void*)p_dst, put_via_send);

        if (!put_via_send && ybtl->btl->btl_register_mem) {
            local_handle = ybtl->btl->btl_register_mem (ybtl->btl, bml_btl->btl_endpoint,
                                                        segment->seg_addr.pval, ncopied, 0);
            if (NULL == local_handle) {
                /* No free resources, Block on completion here */
                SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
                oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
            }
        }

        frag->rdma_segs[0].base_seg.seg_addr.lval = (uintptr_t) p_dst;
        frag->rdma_segs[0].base_seg.seg_len = (put_via_send ?
                                                   ncopied + SPML_YODA_SEND_CONTEXT_SIZE :
                                                   ncopied);
        frag->rdma_req = putreq;

        /* initialize callback data for put*/
        des->des_cbdata = frag;
        des->des_cbfunc = mca_spml_yoda_put_completion;

        OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_puts, 1);
        /* put the data to remote side */
        if (!put_via_send) {
            rc = mca_bml_base_put (bml_btl, segment->seg_addr.pval, (uint64_t) (intptr_t) p_dst,
                                   local_handle, remote_handle, ncopied, 0, 0, mca_spml_yoda_put_completion_rdma,
                                   des);
        } else {
            rc = mca_bml_base_send(bml_btl, des, MCA_SPML_YODA_PUT);
            if (1 == rc)
                rc = OSHMEM_SUCCESS;
        }

        if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
            if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
                /* No free resources, Block on completion here */
                SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
                oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
            } else {
                SPML_ERROR("shmem error");
            }
            /* exit with errro */
            SPML_ERROR("shmem error: ret = %i, send_pe = %i, dest_pe = %i",
                       rc, oshmem_my_proc_id(), dst);
            rc = OSHMEM_ERR_FATAL;
            goto exit_fatal;
        }
        p_src += ncopied;
        p_dst += ncopied;
    }

    return rc;

exit_fatal:
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
    return rc;
}
コード例 #30
0
int my_pe(void)
{
    RUNTIME_CHECK_INIT();
    return oshmem_my_proc_id();
}