Exemplo n.º 1
0
int mca_spml_ikrit_fence(shmem_ctx_t ctx)
{
    mxm_peer_t *peer;
    opal_list_item_t *item;

    SPML_VERBOSE(20,
                 "Into fence with %d active puts on %d pes",
                 mca_spml_ikrit.n_active_puts, (int)opal_list_get_size(&mca_spml_ikrit.active_peers));

    /* puts(unless are send sync) are completed by remote side lazily. That is either when remote decides to
     * ack window which can take hundreds of ms. So speed things up by doing fence */
    while (NULL != (item = opal_list_remove_first(&mca_spml_ikrit.active_peers))) {
        peer = spml_ikrit_container_of(item, mxm_peer_t, link);
        peer->n_active_puts = 0;
        peer->need_fence = 0;
        mca_spml_ikrit_mxm_fence(peer - mca_spml_ikrit.mxm_peers);
    }

    while (0 < mca_spml_ikrit.n_mxm_fences || 0 < mca_spml_ikrit.n_active_gets) {
        opal_progress();
    }

    SPML_VERBOSE(20, "fence completed");
    return OSHMEM_SUCCESS;
}
Exemplo n.º 2
0
static mca_spml_base_module_t*
mca_spml_yoda_component_init(int* priority,
                             bool enable_progress_threads,
                             bool enable_mpi_threads)
{
    SPML_VERBOSE( 10, "in yoda, my priority is %d\n", mca_spml_yoda.priority);

    *priority = mca_spml_yoda.priority;
    if ((*priority) > mca_spml_yoda.priority) {
        return NULL ;
    }

    /* We use BML/BTL and need to start it */
    if (!mca_bml_base_inited()) {
        SPML_VERBOSE(10, "starting bml\n");
        if (OSHMEM_SUCCESS
                != mca_bml_base_init(enable_progress_threads,
                                     enable_mpi_threads)) {
            return NULL ;
        }
    }

    mca_spml_yoda.n_active_puts = 0;

    return &mca_spml_yoda.super;
}
Exemplo n.º 3
0
int mca_spml_ikrit_fence(void)
{
    mxm_peer_t *peer;
    opal_list_item_t *item;

    SPML_VERBOSE(20,
                 "Into fence with %d active puts on %d pes",
                 mca_spml_ikrit.n_active_puts, (int)opal_list_get_size(&mca_spml_ikrit.active_peers));

    /* puts(unless are send sync) are completed by remote side lazily. That is either when remote decides to
     * ack window which can take hundreds of ms. So speed things up by doing fence */
    while (NULL != (item = opal_list_remove_first(&mca_spml_ikrit.active_peers))) {
        peer = (mxm_peer_t *) item;
        peer->n_active_puts = 0;
        peer->need_fence = 0;
        mca_spml_ikrit_mxm_fence(peer->pe);
    }

    while (0 < mca_spml_ikrit.n_mxm_fences) {
        oshmem_request_wait_any_completion();
    }

    SPML_VERBOSE(20, "fence completed");
    return OSHMEM_SUCCESS;
}
Exemplo n.º 4
0
static inline int mca_spml_ikrit_get_shm(void *src_addr,
                                         size_t size,
                                         void *dst_addr,
                                         int src)
{
    int ptl_id;
    void *rva;
    sshmem_mkey_t *r_mkey;

    ptl_id = get_ptl_id(src);
    /**
     * Get the address to the remote rkey.
     **/
    if (ptl_id != MXM_PTL_SHM)
        return OSHMEM_ERROR;

    r_mkey = mca_memheap_base_get_cached_mkey(src, src_addr, ptl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    if (!mca_memheap_base_can_local_copy(r_mkey, src_addr))
        return OSHMEM_ERROR;

    SPML_VERBOSE(100,
                 "shm get: pe:%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
    memcpy(dst_addr, (void *) (unsigned long) rva, size);
    opal_progress();
    return OSHMEM_SUCCESS;
}
Exemplo n.º 5
0
int mca_spml_ikrit_enable(bool enable)
{
    SPML_VERBOSE(50, "*** ikrit ENABLED ****");
    if (false == enable) {
        return OSHMEM_SUCCESS;
    }

    opal_free_list_init (&mca_spml_base_put_requests,
                         sizeof(mca_spml_ikrit_put_request_t),
                         opal_cache_line_size,
                         OBJ_CLASS(opal_free_list_item_t),
                         0,
                         opal_cache_line_size,
                         mca_spml_ikrit.free_list_num,
                         mca_spml_ikrit.free_list_max,
                         mca_spml_ikrit.free_list_inc,
                         NULL, 0, NULL, NULL, NULL);

    opal_free_list_init (&mca_spml_base_get_requests,
                         sizeof(mca_spml_ikrit_get_request_t),
                         opal_cache_line_size,
                         OBJ_CLASS(opal_free_list_item_t),
                         0,
                         opal_cache_line_size,
                         mca_spml_ikrit.free_list_num,
                         mca_spml_ikrit.free_list_max,
                         mca_spml_ikrit.free_list_inc,
                         NULL, 0, NULL, NULL, NULL);

    mca_spml_ikrit.enabled = true;

    return OSHMEM_SUCCESS;
}
Exemplo n.º 6
0
static inline void put_completion_cb(void *ctx)
{
    mca_spml_ikrit_put_request_t *put_req = (mca_spml_ikrit_put_request_t *) ctx;
    mxm_peer_t *peer;

    OPAL_THREAD_ADD_FETCH32(&mca_spml_ikrit.n_active_puts, -1);
    /* TODO: keep pointer to peer in the request */
    peer = &mca_spml_ikrit.mxm_peers[put_req->pe];

    /* this was last put in progress. Remove peer from the list so that we do not need explicit fence */
#if SPML_IKRIT_PUT_DEBUG == 1
    if (peer) {
        if (peer->n_active_puts <= 0) {
            /* actually this can happen because fence forces ref count to 0 while puts still may be in flight */
            SPML_VERBOSE(1, "pe %d n_active_puts %d", put_req->pe, peer->n_active_puts);
        }
    }

    if (put_req->mxm_req.base.state != MXM_REQ_COMPLETED)
    SPML_ERROR("oops: pe %d uncompleted request state %d", put_req->pe, put_req->mxm_req.base.state);
#endif

    if (0 < peer->n_active_puts) {
        peer->n_active_puts--;
        if (0 == peer->n_active_puts &&
                (put_req->mxm_req.opcode == MXM_REQ_OP_PUT_SYNC)) {
            opal_list_remove_item(&mca_spml_ikrit.active_peers, &peer->link);
            peer->need_fence = 0;
        }
    }

    free_put_req(put_req);
}
Exemplo n.º 7
0
/* for now only do blocking copy send */
int mca_spml_ikrit_send(void* buf,
                        size_t size,
                        int dst,
                        mca_spml_base_put_mode_t mode)
{
    mxm_send_req_t req;
    char dummy_buf[1];

    SPML_VERBOSE(100,
                 "sending %p size %d to %d, mode %d",
                 buf, (int)size, dst, (int)mode);
    req.opcode = MXM_REQ_OP_SEND;

    req.op.send.tag = oshmem_my_proc_id();

    req.base.state = MXM_REQ_NEW;
    req.base.mq = mca_spml_ikrit.mxm_mq;
    req.base.conn = mca_spml_ikrit.mxm_peers[dst].mxm_conn;
    req.flags             = MXM_REQ_SEND_FLAG_BLOCKING;
    req.base.completed_cb = NULL;

    req.base.data_type = MXM_REQ_DATA_BUFFER;
    req.base.data.buffer.ptr = buf == NULL ? dummy_buf : buf;
    req.base.data.buffer.length = size == 0 ? sizeof(dummy_buf) : size;
    req.base.data.buffer.memh = NULL;

    SPML_IKRIT_MXM_POST_SEND(req);

    mca_spml_irkit_req_wait(&req.base);
    if (req.base.error != MXM_OK) {
        return OSHMEM_ERROR;
    }

    return OSHMEM_SUCCESS;
}
Exemplo n.º 8
0
/*  make global btl list&map */
static int create_btl_list(void)
{
    int btl_type;
    char *btl_name;
    int size;
    opal_list_item_t *item;
    mca_btl_base_selected_module_t *btl_sm;
    int i;

    size = opal_list_get_size(&mca_btl_base_modules_initialized);
    if (0 >= size) {
        SPML_ERROR("no btl(s) available");
        return OSHMEM_ERROR;
    }
    SPML_VERBOSE(50, "found %d capable btls", size);

    mca_spml_yoda.btl_type_map =
            (struct yoda_btl *) calloc(size, sizeof(struct yoda_btl));
    if (!mca_spml_yoda.btl_type_map)
        return OSHMEM_ERROR;

    mca_spml_yoda.n_btls = 0;
    for (i = 0, item = opal_list_get_first(&mca_btl_base_modules_initialized);
         item != opal_list_get_end(&mca_btl_base_modules_initialized);
         item = opal_list_get_next(item), i++) {

        btl_sm = (mca_btl_base_selected_module_t *) item;
        btl_name = btl_sm->btl_component->btl_version.mca_component_name;
        btl_type = btl_name_to_id(btl_name);

        SPML_VERBOSE(50, "found btl (%s) btl_type=%s", btl_name, btl_type2str(btl_type));

        /* Note: we setup bml_btl in create_btl_idx() */
        mca_spml_yoda.btl_type_map[mca_spml_yoda.n_btls].bml_btl = NULL;
        mca_spml_yoda.btl_type_map[mca_spml_yoda.n_btls].btl =
                btl_sm->btl_module;
        mca_spml_yoda.btl_type_map[mca_spml_yoda.n_btls].btl_type = btl_type;
        mca_spml_yoda.n_btls++;
    }

    if (0 == mca_spml_yoda.n_btls) {
        SPML_ERROR("can not find any suitable btl");
        return OSHMEM_ERROR;
    }

    return OSHMEM_SUCCESS;
}
Exemplo n.º 9
0
int mca_spml_ucx_del_procs(ompi_proc_t** procs, size_t nprocs)
{
    int my_rank = oshmem_my_proc_id();
    size_t num_reqs, max_reqs;
    void *dreq, **dreqs;
    ucp_ep_h ep;
    size_t i, n;

    oshmem_shmem_barrier();

    if (!mca_spml_ucx.ucp_peers) {
        return OSHMEM_SUCCESS;
    }

    max_reqs = mca_spml_ucx.num_disconnect;
    if (max_reqs > nprocs) {
        max_reqs = nprocs;
    }

    dreqs = malloc(sizeof(*dreqs) * max_reqs);
    if (dreqs == NULL) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    num_reqs = 0;

    for (i = 0; i < nprocs; ++i) {
        n  = (i + my_rank) % nprocs;
        ep = mca_spml_ucx.ucp_peers[n].ucp_conn;
        if (ep == NULL) {
            continue;
        }

        SPML_VERBOSE(10, "disconnecting from peer %d", n);
        dreq = ucp_disconnect_nb(ep);
        if (dreq != NULL) {
            if (UCS_PTR_IS_ERR(dreq)) {
                SPML_ERROR("ucp_disconnect_nb(%d) failed: %s", n,
                           ucs_status_string(UCS_PTR_STATUS(dreq)));
            } else {
                dreqs[num_reqs++] = dreq;
            }
        }

        mca_spml_ucx.ucp_peers[n].ucp_conn = NULL;

        if ((int)num_reqs >= mca_spml_ucx.num_disconnect) {
            mca_spml_ucx_waitall(dreqs, &num_reqs);
        }
    }

    mca_spml_ucx_waitall(dreqs, &num_reqs);
    free(dreqs);

    opal_pmix.fence(NULL, 0);
    free(mca_spml_ucx.ucp_peers);
    return OSHMEM_SUCCESS;
}
Exemplo n.º 10
0
static int mca_spml_ikrit_component_register(void)
{
    char *v;

    mca_spml_ikrit_param_register_int("free_list_num", 1024,
                                      0,
                                      &mca_spml_ikrit.free_list_num);
    mca_spml_ikrit_param_register_int("free_list_max", 1024,
                                      0,
                                      &mca_spml_ikrit.free_list_max);
    mca_spml_ikrit_param_register_int("free_list_inc", 16,
                                      0,
                                      &mca_spml_ikrit.free_list_inc);
    mca_spml_ikrit_param_register_int("bulk_connect", 1,
                                      0,
                                      &mca_spml_ikrit.bulk_connect);
    mca_spml_ikrit_param_register_int("bulk_disconnect", 1,
                                      0,
                                      &mca_spml_ikrit.bulk_disconnect);
    mca_spml_ikrit_param_register_int("priority", 20,
                                      "[integer] ikrit priority",
                                      &mca_spml_ikrit.priority);
    mca_spml_ikrit_param_register_int("hw_rdma_channel", 0,
                                       "create separate reliable connection channel",
                                       &mca_spml_ikrit.hw_rdma_channel);

    if (!mca_spml_ikrit.hw_rdma_channel)
        v = "ud,self";
    else
        v = "rc,ud,self";
    mca_spml_ikrit_param_register_string("mxm_tls",
                                         v,
                                         "[string] TL channels for MXM",
                                         &mca_spml_ikrit.mxm_tls);

     mca_spml_ikrit_param_register_int("np",
#if MXM_API <= MXM_VERSION(2,0)
                                           128,
#else
                                           0,
#endif
                                           "[integer] Minimal allowed job's NP to activate ikrit", &mca_spml_ikrit.np);
#if MXM_API >= MXM_VERSION(2,0)
    mca_spml_ikrit_param_register_int("unsync_conn_max", 8,
                                      "[integer] Max number of connections that do not require notification of PUT operation remote completion. Increasing this number improves efficiency of p2p communication but increases overhead of shmem_fence/shmem_quiet/shmem_barrier",
                                      &mca_spml_ikrit.unsync_conn_max);
#endif

    if (oshmem_num_procs() < mca_spml_ikrit.np) {
        SPML_VERBOSE(1,
                     "Not enough ranks (%d<%d), disqualifying spml/ikrit",
                     oshmem_num_procs(), mca_spml_ikrit.np);
        return OSHMEM_ERR_NOT_AVAILABLE;
    }

    return OSHMEM_SUCCESS;
}
Exemplo n.º 11
0
static inline mca_bml_base_btl_t *get_next_btl(int dst, int *btl_id)
{
    mca_bml_base_endpoint_t* endpoint;
    mca_bml_base_btl_t* bml_btl;
    oshmem_proc_t *proc;
    mca_bml_base_btl_array_t *btl_array = 0;
    int size = 0;
    int shmem_index = 0;

    /* get endpoint and btl */
    proc = oshmem_proc_group_all(dst);
    if (!proc) {
        SPML_ERROR("Can not find destination proc for pe=%d", dst);
        return NULL ;
    }

    endpoint = (mca_bml_base_endpoint_t*) proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
    if (!endpoint) {
        SPML_ERROR("pe=%d proc has no endpoint", dst);
        return NULL ;
    }

    /* At the moment always return first transport */
    size = mca_bml_base_btl_array_get_size(btl_array = &endpoint->btl_rdma);

    if (0 >= size) {
        /* Possibly this is SM BTL with KNEM disabled? Then we should use send based get/put */
        /*
           This hack is necessary for the case when KNEM is not available.
           In this case we still want to use send/recv of SM BTL for put and get
           but SM BTL is not in the rdma list anymore
        */
        size = mca_bml_base_btl_array_get_size(btl_array =
                &endpoint->btl_eager);
        if (0 < size) {
            /*Chose SHMEM capable btl from eager array. Not filter now: take the first
              (but could appear on demand).*/
            for (shmem_index = 0; shmem_index < size; shmem_index++) {
                bml_btl = mca_bml_base_btl_array_get_index(btl_array, shmem_index);
                _find_btl_id(bml_btl);
                size = 1;
                break;
            }
        }
    }

    bml_btl = mca_bml_base_btl_array_get_index(btl_array, shmem_index);
    *btl_id = proc->transport_ids[0];

#if SPML_YODA_DEBUG == 1
    assert(*btl_id >= 0 && *btl_id < YODA_BTL_MAX);
    SPML_VERBOSE(100, "pe=%d reachable via btl %s %d", dst,
                 bml_btl->btl->btl_component->btl_version.mca_component_name, *btl_id);
#endif
    return bml_btl;
}
Exemplo n.º 12
0
static mca_spml_base_module_t*
mca_spml_ucx_component_init(int* priority,
                              bool enable_progress_threads,
                              bool enable_mpi_threads)
{
    SPML_VERBOSE( 10, "in ucx, my priority is %d\n", mca_spml_ucx.priority);

    if ((*priority) > mca_spml_ucx.priority) {
        *priority = mca_spml_ucx.priority;
        return NULL ;
    }
    *priority = mca_spml_ucx.priority;

    if (OSHMEM_SUCCESS != spml_ucx_init())
        return NULL ;

    SPML_VERBOSE(50, "*** ucx initialized ****");
    return &mca_spml_ucx.super;
}
Exemplo n.º 13
0
int mca_spml_ucx_enable(bool enable)
{
    SPML_VERBOSE(50, "*** ucx ENABLED ****");
    if (false == enable) {
        return OSHMEM_SUCCESS;
    }

    mca_spml_ucx.enabled = true;

    return OSHMEM_SUCCESS;
}
Exemplo n.º 14
0
int mca_spml_yoda_enable(bool enable)
{
    SPML_VERBOSE(50, "*** yoda ENABLED ****");
    if (false == enable) {
        return OSHMEM_SUCCESS;
    }

    OBJ_CONSTRUCT(&mca_spml_yoda.lock, opal_mutex_t);

    /**
     *If we get here this is the SPML who get selected for the run. We
     * should get ownership for the put and get requests list, and
     * initialize them with the size of our own requests.
     */

    opal_free_list_init (&mca_spml_base_put_requests,
                         sizeof(mca_spml_yoda_put_request_t),
                         opal_cache_line_size,
                         OBJ_CLASS(mca_spml_yoda_put_request_t),
                         0,
                         opal_cache_line_size,
                         mca_spml_yoda.free_list_num,
                         mca_spml_yoda.free_list_max,
                         mca_spml_yoda.free_list_inc,
                         NULL, 0, NULL, NULL, NULL);

    opal_free_list_init (&mca_spml_base_get_requests,
                         sizeof(mca_spml_yoda_get_request_t),
                         opal_cache_line_size,
                         OBJ_CLASS(mca_spml_yoda_get_request_t),
                         0,
                         opal_cache_line_size,
                         mca_spml_yoda.free_list_num,
                         mca_spml_yoda.free_list_max,
                         mca_spml_yoda.free_list_inc,
                         NULL, 0, NULL, NULL, NULL);

    mca_spml_yoda.enabled = true;

    /* The following line resolves the issue with BTL tcp and SPML yoda. In this case the
     * atomic_basic_lock(root_rank) function may behave as DoS attack on root_rank, since
     * all the procceses will do shmem_int_get from root_rank. These calls would go through
     * bml active messaging and will trigger replays in libevent on root rank. If the flag
     * OPAL_ENVLOOP_ONCE is not set then libevent will continously progress constantly
     * incoming events thus causing root_rank to stuck in libevent loop.
     */
    opal_progress_set_event_flag(OPAL_EVLOOP_NONBLOCK | OPAL_EVLOOP_ONCE);

#if OSHMEM_WAIT_COMPLETION_DEBUG == 1
    condition_dbg_init();
#endif

    return OSHMEM_SUCCESS;
}
Exemplo n.º 15
0
static int mca_spml_ikrit_get_helper(mxm_send_req_t *sreq,
                                     void *src_addr,
                                     size_t size,
                                     void *dst_addr,
                                     int src)
{
    /* shmem spec states that get() operations are blocking. So it is enough
     to have single mxm request. Also we count on mxm doing copy */
    void *rva;
    sshmem_mkey_t *r_mkey;
    int ptl_id;

    ptl_id = get_ptl_id(src);
    /* already tried to send via shm and failed. go via rdma */
    if (ptl_id == MXM_PTL_SHM)
        ptl_id = MXM_PTL_RDMA;

    /**
     * Get the address to the remote rkey.
     **/
    r_mkey = mca_memheap.memheap_get_cached_mkey(src,
                                                 src_addr,
                                                 ptl_id,
                                                 &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

    SPML_VERBOSE(100,
                 "get: pe:%d ptl=%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, ptl_id, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));

    /* mxm does not really cares for get lkey */
    sreq->base.mq = mca_spml_ikrit.mxm_mq;
    sreq->base.conn = mca_spml_ikrit.mxm_peers[src]->mxm_conn;
    sreq->base.data_type = MXM_REQ_DATA_BUFFER;
    sreq->base.data.buffer.ptr = dst_addr;
    sreq->base.data.buffer.length = size;
#if MXM_API < MXM_VERSION(2,0)
    sreq->base.data.buffer.memh = NULL;
    sreq->op.mem.remote_memh = NULL;
#else
    sreq->op.mem.remote_mkey = to_mxm_mkey(r_mkey);
#endif
    sreq->opcode = MXM_REQ_OP_GET;
    sreq->op.mem.remote_vaddr = (intptr_t) rva;
    sreq->base.state = MXM_REQ_NEW;

    return OSHMEM_SUCCESS;
}
Exemplo n.º 16
0
/* blocking receive */
int mca_spml_ikrit_recv(void* buf, size_t size, int src)
{
    mxm_error_t ret = MXM_OK;
    mxm_recv_req_t req;
    char dummy_buf[1];

    /* tag mask 0 matches any tag */
    SPML_VERBOSE(100,
                 "want to recv from src %d, size %d buf %p",
                 src, (int)size, buf);
    req.tag = src == SHMEM_ANY_SOURCE ? 0 : src;
    req.tag_mask = src == SHMEM_ANY_SOURCE ? 0 : 0xFFFFFFFF;

    req.base.state = MXM_REQ_NEW;
    req.base.mq = mca_spml_ikrit.mxm_mq;
    req.base.conn = NULL;
#if MXM_API < MXM_VERSION(2,0)
    req.base.flags           = MXM_REQ_FLAG_BLOCKING;
#endif
    req.base.completed_cb = NULL;

    req.base.data_type = MXM_REQ_DATA_BUFFER;
    req.base.data.buffer.ptr = buf == NULL ? dummy_buf : buf;
    req.base.data.buffer.length = size == 0 ? sizeof(dummy_buf) : size;
    req.base.data.buffer.memh = NULL;

    ret = mxm_req_recv(&req);
    if (MXM_OK != ret) {
        return OSHMEM_ERROR;
    }
    mca_spml_irkit_req_wait(&req.base);
    if (MXM_OK != req.base.error) {
        return OSHMEM_ERROR;
    }
    SPML_VERBOSE(100,
                 "recvd from tag %d len %d",
                 req.completion.sender_tag, (int)req.completion.actual_len);

    return OSHMEM_SUCCESS;
}
Exemplo n.º 17
0
static inline int set_mxm_hw_rdma_tls()
{
    if (!mca_spml_ikrit.hw_rdma_channel) {
        return check_mxm_hw_tls("MXM_OSHMEM_TLS", getenv("MXM_OSHMEM_TLS"));
    }
    opal_setenv("MXM_OSHMEM_HW_RDMA_RC_QP_LIMIT", "-1", 0, &environ);
    opal_setenv("MXM_OSHMEM_HW_RDMA_TLS", "rc", 0, &environ);
    SPML_VERBOSE(5, "Additional communication channel is enabled. Transports are: %s",
                 getenv("MXM_OSHMEM_HW_RDMA_TLS"));

    return check_mxm_hw_tls("MXM_OSHMEM_HW_RDMA_TLS",
            getenv("MXM_OSHMEM_HW_RDMA_TLS"));
}
Exemplo n.º 18
0
static mca_spml_base_module_t*
mca_spml_yoda_component_init(int* priority,
                             bool enable_progress_threads,
                             bool enable_mpi_threads)
{
    SPML_VERBOSE( 10, "in yoda, my priority is %d\n", mca_spml_yoda.priority);

    *priority = mca_spml_yoda.priority;
    if ((*priority) > mca_spml_yoda.priority) {
        return NULL ;
    }

    /* We use BML/BTL and need to start it */
    if (!mca_bml_base_inited()) {
        SPML_VERBOSE(10, "can not select yoda because ompi has no bml component");
        return NULL;
    }

    mca_spml_yoda.n_active_puts = 0;
    mca_spml_yoda.n_active_gets = 0;

    return &mca_spml_yoda.super;
}
Exemplo n.º 19
0
int mca_spml_yoda_enable(bool enable)
{
    SPML_VERBOSE(50, "*** yoda ENABLED ****");
    if (false == enable) {
        return OSHMEM_SUCCESS;
    }

    OBJ_CONSTRUCT(&mca_spml_yoda.lock, opal_mutex_t);

    /**
     *If we get here this is the SPML who get selected for the run. We
     * should get ownership for the put and get requests list, and
     * initialize them with the size of our own requests.
     */

    ompi_free_list_init_new(&mca_spml_base_put_requests,
                            sizeof(mca_spml_yoda_put_request_t),
                            opal_cache_line_size,
                            OBJ_CLASS(mca_spml_yoda_put_request_t),
                            0,
                            opal_cache_line_size,
                            mca_spml_yoda.free_list_num,
                            mca_spml_yoda.free_list_max,
                            mca_spml_yoda.free_list_inc,
                            NULL );

    ompi_free_list_init_new(&mca_spml_base_get_requests,
                            sizeof(mca_spml_yoda_get_request_t),
                            opal_cache_line_size,
                            OBJ_CLASS(mca_spml_yoda_get_request_t),
                            0,
                            opal_cache_line_size,
                            mca_spml_yoda.free_list_num,
                            mca_spml_yoda.free_list_max,
                            mca_spml_yoda.free_list_inc,
                            NULL );

    mca_spml_yoda.enabled = true;

#if OSHMEM_WAIT_COMPLETION_DEBUG == 1
    condition_dbg_init();
#endif

    return OSHMEM_SUCCESS;
}
Exemplo n.º 20
0
static inline void put_completion_cb(void *ctx)
{
    mca_spml_ikrit_put_request_t *put_req = (mca_spml_ikrit_put_request_t *) ctx;
    mxm_peer_t *peer;

    OPAL_THREAD_ADD32(&mca_spml_ikrit.n_active_puts, -1);
    peer = mca_spml_ikrit.mxm_peers[put_req->pe];

    /* this was last put in progress. Remove peer from the list so that we do not need explicit fence */
#if SPML_IKRIT_PUT_DEBUG == 1
    if (peer) {
        if (peer->n_active_puts <= 0) {
            /* actually this can happen because fence forces ref count to 0 while puts still may be in flight */
            SPML_VERBOSE(1, "pe %d n_active_puts %d", put_req->pe, peer->n_active_puts);
        }
    }

    if (put_req->mxm_req.base.state != MXM_REQ_COMPLETED)
    SPML_ERROR("oops: pe %d uncompleted request state %d", put_req->pe, put_req->mxm_req.base.state);
#endif

    if (0 < peer->n_active_puts) {
        peer->n_active_puts--;
#if MXM_API < MXM_VERSION(2,0)
        if (0 == peer->n_active_puts &&
                (put_req->mxm_req.base.flags & MXM_REQ_FLAG_SEND_SYNC)) {
            opal_list_remove_item(&mca_spml_ikrit.active_peers, &peer->super);
            peer->need_fence = 0;
        }
#else
        if (0 == peer->n_active_puts &&
                (put_req->mxm_req.opcode == MXM_REQ_OP_PUT_SYNC)) {
            opal_list_remove_item(&mca_spml_ikrit.active_peers, &peer->super);
            peer->need_fence = 0;
        }
#endif
    }

    put_req->req_put.req_base.req_spml_complete = true;
    put_req->req_put.req_base.req_oshmem.req_status.SHMEM_ERROR =
            OSHMEM_SUCCESS;
    oshmem_request_complete(&put_req->req_put.req_base.req_oshmem, 1);
    oshmem_request_free((oshmem_request_t**) &put_req);
}
Exemplo n.º 21
0
static void mca_spml_ucx_waitall(void **reqs, size_t *count_p)
{
    ucs_status_t status;
    size_t i;

    SPML_VERBOSE(10, "waiting for %d disconnect requests", *count_p);
    for (i = 0; i < *count_p; ++i) {
        do {
            opal_progress();
            status = ucp_request_test(reqs[i], NULL);
        } while (status == UCS_INPROGRESS);
        if (status != UCS_OK) {
            SPML_ERROR("disconnect request failed: %s",
                       ucs_status_string(status));
        }
        ucp_request_release(reqs[i]);
        reqs[i] = NULL;
    }

    *count_p = 0;
}
Exemplo n.º 22
0
/* simple buffered put implementation. NOT IN USE
 * Problems:
 * - slighly worse performance than impl based on non buffered put
 * - fence complexity is O(n_active_connections) instead of O(n_connections_with_outstanding_puts).
 *   Later is bounded by the network RTT & mxm ack timer.
 */
int mca_spml_ikrit_put_simple(void* dst_addr,
                              size_t size,
                              void* src_addr,
                              int dst)
{
    void *rva;
    mxm_send_req_t mxm_req;
    mxm_wait_t wait;
    int ptl_id;
    sshmem_mkey_t *r_mkey;
    static int count;

    ptl_id = get_ptl_id(dst);
    /* Get rkey of remote PE (dst proc) which must be on memheap  */
    r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, ptl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   dst, dst_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

#if SPML_IKRIT_PUT_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif
    if (ptl_id == MXM_PTL_SHM) {

        if (mca_memheap_base_can_local_copy(r_mkey, dst_addr)) {
            memcpy((void *) (unsigned long) rva, src_addr, size);
            /* call progress as often as we would have with regular put */
            if (++count % SPML_IKRIT_PACKETS_PER_SYNC == 0)
                mxm_progress(mca_spml_ikrit.mxm_context);
            return OSHMEM_SUCCESS;
        }
        /* segment not mapped - fallback to rmda */
        ptl_id = MXM_PTL_RDMA;
        r_mkey = mca_memheap_base_get_cached_mkey(dst,
                                                     //(unsigned long) dst_addr,
                                                     dst_addr,
                                                     ptl_id,
                                                     &rva);
        if (!r_mkey) {
            SPML_ERROR("pe=%d: %p is not address of shared variable",
                       dst, dst_addr);
            oshmem_shmem_abort(-1);
            return OSHMEM_ERROR;
        }
    }

#if SPML_IKRIT_PUT_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    /* fill out request */
    mxm_req.base.mq = mca_spml_ikrit.mxm_mq;
#if MXM_API <  MXM_VERSION(2,0)
    mxm_req.base.flags = MXM_REQ_FLAG_BLOCKING;
#else
    mxm_req.flags = MXM_REQ_SEND_FLAG_BLOCKING;
#endif
    mxm_req.base.conn = mca_spml_ikrit.mxm_peers[dst]->mxm_conn;
    mxm_req.base.data_type = MXM_REQ_DATA_BUFFER;
    mxm_req.base.data.buffer.ptr = src_addr;
    mxm_req.base.data.buffer.length = size;
    mxm_req.base.completed_cb = 0;
    mxm_req.base.context = 0;
    mxm_req.opcode = MXM_REQ_OP_PUT;
    mxm_req.op.mem.remote_vaddr = (intptr_t) rva;
    mxm_req.base.state = MXM_REQ_NEW;
    mxm_req.base.error = MXM_OK;

#if MXM_API < MXM_VERSION(2, 0)
    mxm_req.base.data.buffer.memh = NULL;
    mxm_req.op.mem.remote_memh = NULL;
#else
    mxm_req.op.mem.remote_mkey = to_mxm_mkey(r_mkey);
#endif

    if (mca_spml_ikrit.mxm_peers[dst]->need_fence == 0) {
        opal_list_append(&mca_spml_ikrit.active_peers,
                         &mca_spml_ikrit.mxm_peers[dst]->super);
        mca_spml_ikrit.mxm_peers[dst]->need_fence = 1;
    }

    SPML_IKRIT_MXM_POST_SEND(mxm_req);

    wait.req = &mxm_req.base;
    wait.state = (mxm_req_state_t)(MXM_REQ_SENT | MXM_REQ_COMPLETED);
    wait.progress_cb = NULL;
    wait.progress_arg = NULL;
    mxm_wait(&wait);

    return OSHMEM_SUCCESS;
}
Exemplo n.º 23
0
static inline int mca_spml_yoda_put_internal(void *dst_addr,
                                             size_t size,
                                             void *src_addr,
                                             int dst,
                                             int is_nb)
{
    int rc = OSHMEM_SUCCESS;
    mca_spml_yoda_put_request_t *putreq = NULL;
    mca_bml_base_btl_t* bml_btl;
    mca_btl_base_descriptor_t* des = NULL;
    mca_btl_base_segment_t* segment;
    mca_spml_yoda_rdma_frag_t* frag;
    int nfrags;
    int i;
    unsigned ncopied = 0;
    unsigned int frag_size = 0;
    char *p_src, *p_dst;
    void* rva;
    sshmem_mkey_t *r_mkey;
    int btl_id = 0;
    struct yoda_btl *ybtl;
    int put_via_send;
    mca_btl_base_registration_handle_t *local_handle = NULL, *remote_handle = NULL;

    /* If nothing to put its OK.*/
    if (0 >= size) {
        return OSHMEM_SUCCESS;
    }

    /* Find bml_btl and its global btl_id */
    bml_btl = get_next_btl(dst, &btl_id);
    if (!bml_btl) {
        SPML_ERROR("cannot reach %d pe: no appropriate btl found", oshmem_my_proc_id());
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }
    /* Check if btl has PUT method. If it doesn't - use SEND*/
    put_via_send = !(bml_btl->btl->btl_flags & MCA_BTL_FLAGS_PUT);

    /* Get rkey of remote PE (dst proc) which must be on memheap*/
    r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, btl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   dst, dst_addr);
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }

#if SPML_YODA_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
                 dst, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    ybtl = &mca_spml_yoda.btl_type_map[btl_id];

    if (ybtl->btl->btl_register_mem) {
        assert (r_mkey->len == ybtl->btl->btl_registration_handle_size);
        remote_handle = (mca_btl_base_registration_handle_t *) r_mkey->u.data;
    }

    /* check if we doing put into shm attached segment and if so
     * just do memcpy
     */
    if ((YODA_BTL_SM == ybtl->btl_type || YODA_BTL_VADER == ybtl->btl_type)
            && mca_memheap_base_can_local_copy(r_mkey, dst_addr)) {
        memcpy((void *) (unsigned long) rva, src_addr, size);
        return OSHMEM_SUCCESS;
    }

    /* We support only blocking PUT now => we always need copy for src buffer*/
    calc_nfrags_put (bml_btl, size, &frag_size, &nfrags, put_via_send);

    p_src = (char*) src_addr;
    p_dst = (char*) (unsigned long) rva;
    for (i = 0; i < nfrags; i++) {
        /* Allocating send request from free list */
        putreq = mca_spml_yoda_putreq_alloc(dst);
        frag = &putreq->put_frag;
        ncopied = i < nfrags - 1 ? frag_size :(unsigned) ((char *) src_addr + size - p_src);

        /* Preparing source buffer */

        /* allocate buffer */
        mca_spml_yoda_bml_alloc(bml_btl,
                                &des,
                                MCA_BTL_NO_ORDER,
                                ncopied,
                                MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                                put_via_send);

        if (OPAL_UNLIKELY(!des || !des->des_segments )) {
            SPML_ERROR("src=%p nfrags = %d frag_size=%d",
                       src_addr, nfrags, frag_size);
            SPML_ERROR("shmem OOM error need %d bytes", ncopied);
            opal_show_help("help-oshmem-spml-yoda.txt",
                           "internal_oom_error",
                           true,
                           "Put", ncopied, mca_spml_yoda.bml_alloc_threshold);
            rc = OSHMEM_ERR_FATAL;
            goto exit_fatal;
        }

        /* copy data to allocated buffer*/
        segment = des->des_segments;
        spml_yoda_prepare_for_put((void*)segment->seg_addr.pval, ncopied,
                                  (void*)p_src, (void*)p_dst, put_via_send);

        if (!put_via_send && ybtl->btl->btl_register_mem) {
            local_handle = ybtl->btl->btl_register_mem (ybtl->btl, bml_btl->btl_endpoint,
                                                        segment->seg_addr.pval, ncopied, 0);
            if (NULL == local_handle) {
                /* No free resources, Block on completion here */
                SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
                oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
            }
        }

        frag->rdma_segs[0].base_seg.seg_addr.lval = (uintptr_t) p_dst;
        frag->rdma_segs[0].base_seg.seg_len = (put_via_send ?
                                                   ncopied + SPML_YODA_SEND_CONTEXT_SIZE :
                                                   ncopied);
        frag->rdma_req = putreq;

        /* initialize callback data for put*/
        des->des_cbdata = frag;
        des->des_cbfunc = mca_spml_yoda_put_completion;

        OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_puts, 1);
        /* put the data to remote side */
        if (!put_via_send) {
            rc = mca_bml_base_put (bml_btl, segment->seg_addr.pval, (uint64_t) (intptr_t) p_dst,
                                   local_handle, remote_handle, ncopied, 0, 0, mca_spml_yoda_put_completion_rdma,
                                   des);
        } else {
            rc = mca_bml_base_send(bml_btl, des, MCA_SPML_YODA_PUT);
            if (1 == rc)
                rc = OSHMEM_SUCCESS;
        }

        if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
            if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
                /* No free resources, Block on completion here */
                SPML_ERROR("shmem error: OSHMEM_ERR_OUT_OF_RESOURCE");
                oshmem_request_wait_completion(&putreq->req_put.req_base.req_oshmem);
            } else {
                SPML_ERROR("shmem error");
            }
            /* exit with errro */
            SPML_ERROR("shmem error: ret = %i, send_pe = %i, dest_pe = %i",
                       rc, oshmem_my_proc_id(), dst);
            rc = OSHMEM_ERR_FATAL;
            goto exit_fatal;
        }
        p_src += ncopied;
        p_dst += ncopied;
    }

    return rc;

exit_fatal:
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
    return rc;
}
Exemplo n.º 24
0
/* for each proc create transport ids which are indexes into global
 * btl list&map
 */
static int create_btl_idx(int dst_pe)
{
    oshmem_proc_t *proc;
    int btl_id;
    mca_bml_base_endpoint_t* endpoint;
    mca_bml_base_btl_t* bml_btl = 0;
    int i, size;
    mca_bml_base_btl_array_t *btl_array;
    int shmem_index = -1;

    proc = oshmem_proc_group_find(oshmem_group_all, dst_pe);
    endpoint = (mca_bml_base_endpoint_t*) proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
    assert(endpoint);
    size = mca_bml_base_btl_array_get_size(btl_array = &endpoint->btl_rdma);

    if (0 >= size) {
        /* Possibly this is SM BTL with KNEM disabled? Then we should use send based get/put */
        /*
           This hack is necessary for the case when KNEM is not available.
           In this case we still want to use send/recv of SM BTL for put and get
           but SM BTL is not in the rdma list anymore
        */
        size = mca_bml_base_btl_array_get_size(btl_array =
                &endpoint->btl_eager);
        if (0 < size) {
            /*Chose SHMEM capable btl from eager array. Not filter now: take the first
              (but could appear on demand).*/
            shmem_index = 0;
            size = 1;
        }
        else {
            SPML_ERROR("no SHMEM capable transport for dest pe=%d", dst_pe);
            return OSHMEM_ERROR;
        }
    }

    proc->transport_ids = (char *) malloc(size * sizeof(char));
    if (!proc->transport_ids)
        return OSHMEM_ERROR;

    proc->num_transports = size;

    for (i = 0; i < size; i++) {
        bml_btl = mca_bml_base_btl_array_get_index(btl_array,
                                                   (shmem_index >= 0) ?
                                                       (shmem_index) : (i));
        btl_id = _find_btl_id(bml_btl);
        SPML_VERBOSE(50,
                     "dst_pe(%d) use btl (%s) btl_id=%d",
                     dst_pe, bml_btl->btl->btl_component->btl_version.mca_component_name, btl_id);
        if (0 > btl_id) {
            SPML_ERROR("unknown btl: dst_pe(%d) use btl (%s) btl_id=%d",
                       dst_pe, bml_btl->btl->btl_component->btl_version.mca_component_name, btl_id);
            return OSHMEM_ERROR;
        }
        proc->transport_ids[i] = btl_id;
        mca_spml_yoda.btl_type_map[btl_id].bml_btl = bml_btl;
        mca_spml_yoda.btl_type_map[btl_id].use_cnt++;
    }
    return OSHMEM_SUCCESS;
}
Exemplo n.º 25
0
sshmem_mkey_t *mca_spml_yoda_register(void* addr,
                                        size_t size,
                                        uint64_t shmid,
                                        int *count)
{
    int i;
    sshmem_mkey_t *mkeys;
    struct yoda_btl *ybtl;
    mca_spml_yoda_context_t* yoda_context;

    SPML_VERBOSE(10, "address %p len %llu", addr, (unsigned long long)size);
    *count = 0;
    /* make sure everything is initialized to 0 */
    mkeys = (sshmem_mkey_t *) calloc(1,
                                       mca_spml_yoda.n_btls * sizeof(*mkeys));
    if (!mkeys) {
        return NULL ;
    }

    mca_bml.bml_register( MCA_SPML_YODA_PUT,
                          mca_yoda_put_callback,
                          NULL );
    mca_bml.bml_register( MCA_SPML_YODA_GET,
                          mca_yoda_get_callback,
                          NULL );
    mca_bml.bml_register( MCA_SPML_YODA_GET_RESPONSE,
                          mca_yoda_get_response_callback,
                          NULL );
    /* Register proc memory in every rdma BTL. */
    for (i = 0; i < mca_spml_yoda.n_btls; i++) {

        ybtl = &mca_spml_yoda.btl_type_map[i];
        mkeys[i].va_base = addr;
        mkeys[i].u.key = MAP_SEGMENT_SHM_INVALID;

        if (!ybtl->use_cnt) {
            SPML_VERBOSE(10,
                         "%s: present but not in use. SKIP registration",
                         btl_type2str(ybtl->btl_type));
            continue;
        }

        /* If we have shared memory just save its id */
        if ((YODA_BTL_SM == ybtl->btl_type || YODA_BTL_VADER == ybtl->btl_type)
                && MAP_SEGMENT_SHM_INVALID != (int)shmid) {
            mkeys[i].u.key = shmid;
            mkeys[i].va_base = 0;
            continue;
        }

        yoda_context = calloc(1, sizeof(*yoda_context));
        mkeys[i].spml_context = yoda_context;

        yoda_context->registration = NULL;
        if (ybtl->btl->btl_flags & MCA_BTL_FLAGS_RDMA) {
            if (NULL != ybtl->btl->btl_register_mem) {
                yoda_context->registration = ybtl->btl->btl_register_mem (ybtl->btl, MCA_BTL_ENDPOINT_ANY,
                                                                          addr, size, MCA_BTL_REG_FLAG_ACCESS_ANY);
                if (NULL == yoda_context->registration) {
                    SPML_ERROR("%s: failed to register source memory: addr: %p, size: %u",
                               btl_type2str(ybtl->btl_type), addr, size);
                    /* FIXME some cleanup might be needed here
                     * yoda_context->btl_src_descriptor = NULL;
                     * *count = ???;
                     * free(spml_context);
                     */
                    free(mkeys);
                    return NULL;
                }
            }

            yoda_context->btl_src_descriptor = NULL;
            mkeys[i].u.data = yoda_context->registration;
            mkeys[i].len    = yoda_context->registration ? ybtl->btl->btl_registration_handle_size : 0;
        }

        SPML_VERBOSE(5,
                     "rank %d btl %s va_base: 0x%p len: %d key %llx size %llu",
		     oshmem_proc_pe(oshmem_proc_local()), btl_type2str(ybtl->btl_type),
                     mkeys[i].va_base, mkeys[i].len, (unsigned long long)mkeys[i].u.key, (unsigned long long)size);
    }
    *count = mca_spml_yoda.n_btls;
    return mkeys;
}
Exemplo n.º 26
0
/**
 * shmem_get reads data from a remote address
 * in the symmetric heap via RDMA READ.
 * Get operation:
 * 1. Get the rkey to the remote address.
 * 2. Allocate a get request.
 * 3. Allocated a temporary pre-registered buffer
 *    to copy the data to.
 * 4. Init the request descriptor with remote side
 *    data and local side data.
 * 5. Read the remote buffer to a pre-registered
 *    buffer on the local PE using RDMA READ.
 * 6. Copy the received data to dst_addr if an
 *    intermediate pre-register buffer was used.
 * 7. Clear the request and return.
 *
 * src_addr - address on remote pe.
 * size - the amount on bytes to be read.
 * dst_addr - address on the local pe.
 * src - the pe of remote process.
 */
int mca_spml_yoda_get(void* src_addr, size_t size, void* dst_addr, int src)
{
    int rc = OSHMEM_SUCCESS;
    sshmem_mkey_t *r_mkey, *l_mkey;
    void* rva;
    unsigned ncopied = 0;
    unsigned int frag_size = 0;
    char *p_src, *p_dst;
    int i;
    int nfrags;
    mca_bml_base_btl_t* bml_btl = NULL;
    mca_btl_base_segment_t* segment;
    mca_btl_base_descriptor_t* des = NULL;
    mca_spml_yoda_rdma_frag_t* frag = NULL;
    struct mca_spml_yoda_getreq_parent get_holder;
    struct yoda_btl *ybtl;
    int btl_id = 0;
    int get_via_send;
    mca_btl_base_registration_handle_t *local_handle, *remote_handle = NULL;
    mca_spml_yoda_get_request_t* getreq = NULL;

    /*If nothing to get its OK.*/
    if (0 >= size) {
        return rc;
    }

    /* Find bml_btl and its global btl_id */
    bml_btl = get_next_btl(src, &btl_id);
    if (!bml_btl) {
        SPML_ERROR("cannot reach %d pe: no appropriate btl found", oshmem_my_proc_id());
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }
    /* Check if btl has GET method. If it doesn't - use SEND*/
    get_via_send = ! ( (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_GET)) &&
                       (bml_btl->btl->btl_flags & (MCA_BTL_FLAGS_PUT)) );

    /* Get rkey of remote PE (src proc) which must be on memheap*/
    r_mkey = mca_memheap_base_get_cached_mkey(src, src_addr, btl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   src, src_addr);
        rc = OSHMEM_ERR_FATAL;
        goto exit_fatal;
    }

#if SPML_YODA_DEBUG == 1
    SPML_VERBOSE(100, "get: pe:%d src=%p -> dst: %p sz=%d. src_rva=%p, %s",
                 src, src_addr, dst_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    ybtl = &mca_spml_yoda.btl_type_map[btl_id];

    if (ybtl->btl->btl_register_mem) {
        assert(ybtl->btl->btl_registration_handle_size == r_mkey->len);
        remote_handle = (mca_btl_base_registration_handle_t *) r_mkey->u.data;
    }

    nfrags = 1;

    /* check if we doing get into shm attached segment and if so
     * just do memcpy
     */
    if ((YODA_BTL_SM == ybtl->btl_type || YODA_BTL_VADER == ybtl->btl_type)
            && mca_memheap_base_can_local_copy(r_mkey, src_addr)) {
        memcpy(dst_addr, (void *) rva, size);
        /* must call progress here to avoid deadlock. Scenarion:
         * pe1 pols pe2 via shm get. pe2 tries to get static variable from node one, which goes to sm btl
         * In this case pe2 is stuck forever because pe1 never calls opal_progress.
         * May be we do not need to call progress on every get() here but rather once in a while.
         */
        opal_progress();
        return OSHMEM_SUCCESS;
    }

    l_mkey = mca_memheap.memheap_get_local_mkey(dst_addr,
                                                btl_id);
    /*
     * Need a copy if local memory has not been registered or
     * we make GET via SEND
     */
    frag_size = ncopied;
    if ((NULL == l_mkey) || get_via_send) {
        calc_nfrags_get (bml_btl, size, &frag_size, &nfrags, get_via_send);
    }

    p_src = (char*) (unsigned long) rva;
    p_dst = (char*) dst_addr;
    get_holder.active_count = 0;

    for (i = 0; i < nfrags; i++) {
        /**
         * Allocating a get request from a pre-allocated
         * and pre-registered free list.
         */
        getreq = mca_spml_yoda_getreq_alloc(src);
        assert(getreq);
        getreq->p_dst = NULL;
        frag = &getreq->get_frag;
        getreq->parent = &get_holder;

        ncopied = i < nfrags - 1 ? frag_size :(unsigned) ((char *) dst_addr + size - p_dst);
        frag->allocated = 0;
        /* Prepare destination descriptor*/
        memcpy(&frag->rdma_segs[0].base_seg,
                r_mkey->u.data,
                r_mkey->len);

        frag->rdma_segs[0].base_seg.seg_len = (get_via_send ? ncopied + SPML_YODA_SEND_CONTEXT_SIZE : ncopied);
        if (get_via_send) {
            frag->use_send = 1;
            frag->allocated = 1;
            /**
             * Allocate a temporary buffer on the local PE.
             * The local buffer will store the data read
             * from the remote address.
             */
            mca_spml_yoda_bml_alloc(bml_btl,
                                    &des,
                                    MCA_BTL_NO_ORDER,
                                    (int)frag_size,
                                    MCA_BTL_DES_SEND_ALWAYS_CALLBACK,
                                    get_via_send);
            if (OPAL_UNLIKELY(!des || !des->des_segments)) {
                SPML_ERROR("shmem OOM error need %d bytes", ncopied);
                SPML_ERROR("src=%p nfrags = %d frag_size=%d",
                           src_addr, nfrags, frag_size);
                rc = OSHMEM_ERR_FATAL;
                goto exit_fatal;
            }

            segment = des->des_segments;
            spml_yoda_prepare_for_get((void*)segment->seg_addr.pval, ncopied, (void*)p_src, oshmem_my_proc_id(), (void*)p_dst, (void*) getreq);
            des->des_cbfunc = mca_spml_yoda_get_response_completion;
            des->des_cbdata = frag;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }
        else {
            /*
             * Register src memory if do GET via GET
             */
            if (NULL == l_mkey && ybtl->btl->btl_register_mem) {
                local_handle = ybtl->btl->btl_register_mem (ybtl->btl, bml_btl->btl_endpoint, p_dst, ncopied,
                                                            MCA_BTL_REG_FLAG_LOCAL_WRITE);

                if (NULL == local_handle) {
                    SPML_ERROR("%s: failed to register destination memory %p.",
                               btl_type2str(ybtl->btl_type), p_dst);
                }

                frag->local_handle = local_handle;
            } else {
                local_handle = ((mca_spml_yoda_context_t*)l_mkey->spml_context)->registration;
                frag->local_handle = NULL;
            }

            frag->rdma_segs[0].base_seg.seg_addr.lval = (uintptr_t) p_src;
            getreq->p_dst = (uint64_t*) p_dst;
            frag->size = ncopied;

            OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_gets, 1);
        }

        /**
         * Initialize the remote data fragment
         * with remote address data required for
         * executing RDMA READ from a remote buffer.
         */

        frag->rdma_req = getreq;

        /**
         *  Do GET operation
         */
        if (get_via_send) {
            rc = mca_bml_base_send(bml_btl, des, MCA_SPML_YODA_GET);
            if (1 == rc)
                rc = OSHMEM_SUCCESS;
        } else {
            rc = mca_bml_base_get(bml_btl, p_dst, (uint64_t) (intptr_t) p_src, local_handle,
                                  remote_handle, ncopied, 0, 0, mca_spml_yoda_get_completion, frag);
        }

        if (OPAL_UNLIKELY(OSHMEM_SUCCESS != rc)) {
            if (OSHMEM_ERR_OUT_OF_RESOURCE == rc) {
                /* No free resources, Block on completion here */
                oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);
                return OSHMEM_SUCCESS;
            } else {
                SPML_ERROR("oshmem_get: error %d", rc);
                goto exit_fatal;
            }
        }
        p_dst += ncopied;
        p_src += ncopied;
        OPAL_THREAD_ADD32(&get_holder.active_count, 1);
    }

    /* revisit if we really need this for self and sm */
    /* if (YODA_BTL_SELF == ybtl->btl_type) */
    opal_progress();

    /* Wait for completion on request */
    while (get_holder.active_count > 0)
        oshmem_request_wait_completion(&getreq->req_get.req_base.req_oshmem);

    return rc;

exit_fatal:
    if (OSHMEM_SUCCESS != rc) {
        oshmem_shmem_abort(rc);
    }
    return rc;
}
Exemplo n.º 27
0
int mca_spml_ikrit_add_procs(ompi_proc_t** procs, size_t nprocs)
{
    spml_ikrit_mxm_ep_conn_info_t *ep_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t *ep_hw_rdma_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t my_ep_info = {{0}};
#if MXM_API < MXM_VERSION(2,0)
    mxm_conn_req_t *conn_reqs;
    int timeout;
#else
    size_t mxm_addr_len = MXM_MAX_ADDR_LEN;
#endif
    mxm_error_t err;
    size_t i, n;
    int rc = OSHMEM_ERROR;
    ompi_proc_t *proc_self;
    int my_rank = oshmem_my_proc_id();

    OBJ_CONSTRUCT(&mca_spml_ikrit.active_peers, opal_list_t);
    /* Allocate connection requests */
#if MXM_API < MXM_VERSION(2,0)
    conn_reqs = malloc(nprocs * sizeof(mxm_conn_req_t));
    if (NULL == conn_reqs) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }
    memset(conn_reqs, 0x0, sizeof(mxm_conn_req_t));
#endif
    ep_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
    if (NULL == ep_info) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

    if (mca_spml_ikrit.hw_rdma_channel) {
        ep_hw_rdma_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
        if (NULL == ep_hw_rdma_info) {
            rc = OSHMEM_ERR_OUT_OF_RESOURCE;
            goto bail;
        }
    }

    mca_spml_ikrit.mxm_peers = (mxm_peer_t **) malloc(nprocs
            * sizeof(*(mca_spml_ikrit.mxm_peers)));
    if (NULL == mca_spml_ikrit.mxm_peers) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

#if MXM_API < MXM_VERSION(2,0)
    if (OSHMEM_SUCCESS
            != spml_ikrit_get_ep_address(&my_ep_info, MXM_PTL_SELF)) {
        rc = OSHMEM_ERROR;
        goto bail;
    }
    if (OSHMEM_SUCCESS
            != spml_ikrit_get_ep_address(&my_ep_info, MXM_PTL_RDMA)) {
        rc = OSHMEM_ERROR;
        goto bail;
    }
#else
    if (mca_spml_ikrit.hw_rdma_channel) {
        err = mxm_ep_get_address(mca_spml_ikrit.mxm_hw_rdma_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
        if (MXM_OK != err) {
            orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                    mxm_error_string(err));
            rc = OSHMEM_ERROR;
            goto bail;
        }
        oshmem_shmem_allgather(&my_ep_info, ep_hw_rdma_info,
                sizeof(spml_ikrit_mxm_ep_conn_info_t));
    }
    err = mxm_ep_get_address(mca_spml_ikrit.mxm_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
    if (MXM_OK != err) {
        orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                mxm_error_string(err));
        rc = OSHMEM_ERROR;
        goto bail;
    }
#endif
    oshmem_shmem_allgather(&my_ep_info, ep_info,
                           sizeof(spml_ikrit_mxm_ep_conn_info_t));

    opal_progress_register(spml_ikrit_progress);

    /* Get the EP connection requests for all the processes from modex */
    for (n = 0; n < nprocs; ++n) {

        /* mxm 2.0 keeps its connections on a list. Make sure
         * that list have different order on every rank */
        i = (my_rank + n) % nprocs;
        mca_spml_ikrit.mxm_peers[i] = OBJ_NEW(mxm_peer_t);
        if (NULL == mca_spml_ikrit.mxm_peers[i]) {
            rc = OSHMEM_ERR_OUT_OF_RESOURCE;
            goto bail;
        }
        mca_spml_ikrit.mxm_peers[i]->pe = i;

#if MXM_API < MXM_VERSION(2,0)
        conn_reqs[i].ptl_addr[MXM_PTL_SELF] =
                (struct sockaddr *) &ep_info[i].addr.ptl_addr[MXM_PTL_SELF];
        conn_reqs[i].ptl_addr[MXM_PTL_SHM] = NULL;
        conn_reqs[i].ptl_addr[MXM_PTL_RDMA] =
                (struct sockaddr *) &ep_info[i].addr.ptl_addr[MXM_PTL_RDMA];
#else
        err = mxm_ep_connect(mca_spml_ikrit.mxm_ep, ep_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i]->mxm_conn);
        if (MXM_OK != err) {
            SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
            goto bail;
        }
        if (OSHMEM_SUCCESS != create_ptl_idx(i))
                goto bail;
        mxm_conn_ctx_set(mca_spml_ikrit.mxm_peers[i]->mxm_conn, mca_spml_ikrit.mxm_peers[i]);
        if (mca_spml_ikrit.hw_rdma_channel) {
            err = mxm_ep_connect(mca_spml_ikrit.mxm_hw_rdma_ep, ep_hw_rdma_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i]->mxm_hw_rdma_conn);
            if (MXM_OK != err) {
                SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
                goto bail;
            }
        } else {
            mca_spml_ikrit.mxm_peers[i]->mxm_hw_rdma_conn = mca_spml_ikrit.mxm_peers[i]->mxm_conn;
        }
#endif
    }

#if MXM_API < MXM_VERSION(2,0)
    /* Connect to remote peers */
    if (mxm_get_version() < MXM_VERSION(1,5)) {
        timeout = 1000;
    } else {
        timeout = -1;
    }
    err = mxm_ep_connect(mca_spml_ikrit.mxm_ep, conn_reqs, nprocs, timeout);
    if (MXM_OK != err) {
        SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
        for (i = 0; i < nprocs; ++i) {
            if (MXM_OK != conn_reqs[i].error) {
                SPML_ERROR("MXM EP connect to %s error: %s\n",
                           procs[i]->proc_hostname, mxm_error_string(conn_reqs[i].error));
            }
        }
        rc = OSHMEM_ERR_CONNECTION_FAILED;
        goto bail;
    }

    /* Save returned connections */
    for (i = 0; i < nprocs; ++i) {
        mca_spml_ikrit.mxm_peers[i]->mxm_conn = conn_reqs[i].conn;
        if (OSHMEM_SUCCESS != create_ptl_idx(i)) {
            rc = OSHMEM_ERR_CONNECTION_FAILED;
            goto bail;
        }

        mxm_conn_ctx_set(conn_reqs[i].conn, mca_spml_ikrit.mxm_peers[i]);
    }

    if (conn_reqs)
        free(conn_reqs);
#endif
    if (ep_info)
        free(ep_info);
    if (ep_hw_rdma_info)
        free(ep_hw_rdma_info);

#if MXM_API >= MXM_VERSION(2,0)
    if (mca_spml_ikrit.bulk_connect) {
        /* Need a barrier to ensure remote peers already created connection */
        oshmem_shmem_barrier();
        mxm_ep_wireup(mca_spml_ikrit.mxm_ep);
    }
#endif

    proc_self = oshmem_proc_group_find(oshmem_group_all, my_rank);
    /* identify local processes and change transport to SHM */
    for (i = 0; i < nprocs; i++) {
        if (procs[i]->super.proc_name.jobid != proc_self->super.proc_name.jobid ||
            !OPAL_PROC_ON_LOCAL_NODE(procs[i]->super.proc_flags)) {
            continue;
        }
        if (procs[i] == proc_self)
            continue;

        /* use zcopy for put/get via sysv shared memory */
        OSHMEM_PROC_DATA(procs[i])->transport_ids[0] = MXM_PTL_SHM;
        OSHMEM_PROC_DATA(procs[i])->transport_ids[1] = MXM_PTL_RDMA;
        OSHMEM_PROC_DATA(procs[i])->num_transports = 2;
    }

    SPML_VERBOSE(50, "*** ADDED PROCS ***");
    return OSHMEM_SUCCESS;

bail:
#if MXM_API < MXM_VERSION(2,0)
	if (conn_reqs)
		free(conn_reqs);
#endif
	if (ep_info)
		free(ep_info);
	if (ep_hw_rdma_info)
		free(ep_hw_rdma_info);
    SPML_ERROR("add procs FAILED rc=%d", rc);

    return rc;

}
Exemplo n.º 28
0
/**
 * TODO: using put request as handle is not good.
 */
static inline int mca_spml_ikrit_put_internal(void* dst_addr,
                                              size_t size,
                                              void* src_addr,
                                              int dst,
                                              void **handle,
                                              int zcopy)
{
    void *rva;
    mca_spml_ikrit_put_request_t *put_req;
    int ptl_id;
    sshmem_mkey_t *r_mkey;
    static int count;
    int need_progress = 0;

    if (0 >= size) {
        return OSHMEM_SUCCESS;
    }

    ptl_id = get_ptl_id(dst);
    /* Get rkey of remote PE (dst proc) which must be on memheap  */
    r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, ptl_id, &rva);
    if (!r_mkey) {
        SPML_ERROR("pe=%d: %p is not address of shared variable",
                   dst, dst_addr);
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }

#if SPML_IKRIT_PUT_DEBUG == 1

    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif
    if (ptl_id == MXM_PTL_SHM) {

        if (mca_memheap_base_can_local_copy(r_mkey, dst_addr)) {
            memcpy((void *) (unsigned long) rva, src_addr, size);
            /* call progress as often as we would have with regular put */
            if (++count % SPML_IKRIT_PACKETS_PER_SYNC == 0)
                mxm_progress(mca_spml_ikrit.mxm_context);
            return OSHMEM_SUCCESS;
        }
        /* segment not mapped - fallback to rmda */
        ptl_id = MXM_PTL_RDMA;
        r_mkey = mca_memheap_base_get_cached_mkey(dst, dst_addr, ptl_id, &rva);
        if (!r_mkey) {
            SPML_ERROR("pe=%d: %p is not address of shared variable",
                       dst, dst_addr);
            oshmem_shmem_abort(-1);
            return OSHMEM_ERROR;
        }
    }

#if SPML_IKRIT_PUT_DEBUG == 1
    SPML_VERBOSE(100, "put: pe:%d ptl=%d dst=%p <- src: %p sz=%d. dst_rva=%p, %s",
            dst, ptl_id, dst_addr, src_addr, (int)size, (void *)rva, mca_spml_base_mkey2str(r_mkey));
#endif

    put_req = alloc_put_req();
    if (NULL == put_req) {
        SPML_ERROR("out of put requests - aborting");
        oshmem_shmem_abort(-1);
        return OSHMEM_ERROR;
    }
    if (handle)
        *handle = put_req;

    /* fill out request */
    put_req->mxm_req.base.mq = mca_spml_ikrit.mxm_mq;
    /* request immediate responce if we are getting low on send buffers. We only get responce from remote on ack timeout.
     * Also request explicit ack once in a while  */
#if MXM_API < MXM_VERSION(2,0)
    put_req->mxm_req.opcode = MXM_REQ_OP_PUT;
    if (mca_spml_ikrit.free_list_max - mca_spml_ikrit.n_active_puts <= SPML_IKRIT_PUT_LOW_WATER ||
            (mca_spml_ikrit.mxm_peers[dst]->n_active_puts + 1) % SPML_IKRIT_PACKETS_PER_SYNC == 0) {
        put_req->mxm_req.base.flags = MXM_REQ_FLAG_SEND_SYNC;
        need_progress = 1;
    } else  {
        put_req->mxm_req.base.flags = MXM_REQ_FLAG_SEND_LAZY|MXM_REQ_FLAG_SEND_SYNC;
    }
#else
    put_req->mxm_req.flags = 0;
    if (mca_spml_ikrit.free_list_max - mca_spml_ikrit.n_active_puts <= SPML_IKRIT_PUT_LOW_WATER ||
            (int)opal_list_get_size(&mca_spml_ikrit.active_peers) > mca_spml_ikrit.unsync_conn_max ||
            (mca_spml_ikrit.mxm_peers[dst]->n_active_puts + 1) % SPML_IKRIT_PACKETS_PER_SYNC == 0) {
        need_progress = 1;
        put_req->mxm_req.opcode = MXM_REQ_OP_PUT_SYNC;
    } else  {
        put_req->mxm_req.opcode = MXM_REQ_OP_PUT;
    }
    if (!zcopy) {
        if (size < mca_spml_ikrit.put_zcopy_threshold) {
            put_req->mxm_req.flags |= MXM_REQ_SEND_FLAG_BLOCKING;
        } else {
            put_req->mxm_req.opcode = MXM_REQ_OP_PUT_SYNC;
        }
    }
#endif

    put_req->mxm_req.base.conn = mca_spml_ikrit.mxm_peers[dst]->mxm_conn;
    put_req->mxm_req.base.data_type = MXM_REQ_DATA_BUFFER;
    put_req->mxm_req.base.data.buffer.ptr = src_addr;
    put_req->mxm_req.base.data.buffer.length = size;
    put_req->mxm_req.base.completed_cb = put_completion_cb;
    put_req->mxm_req.base.context = put_req;
    put_req->mxm_req.op.mem.remote_vaddr = (intptr_t) rva;
    put_req->mxm_req.base.state = MXM_REQ_NEW;
    put_req->pe = dst;

#if MXM_API < MXM_VERSION(2,0)
    put_req->mxm_req.base.data.buffer.memh = NULL;
    put_req->mxm_req.op.mem.remote_memh = NULL;
#else
    put_req->mxm_req.op.mem.remote_mkey = to_mxm_mkey(r_mkey);
#endif

    OPAL_THREAD_ADD32(&mca_spml_ikrit.n_active_puts, 1);
    if (mca_spml_ikrit.mxm_peers[dst]->need_fence == 0) {
        opal_list_append(&mca_spml_ikrit.active_peers,
                         &mca_spml_ikrit.mxm_peers[dst]->super);
        mca_spml_ikrit.mxm_peers[dst]->need_fence = 1;
    }

    mca_spml_ikrit.mxm_peers[dst]->n_active_puts++;

    SPML_IKRIT_MXM_POST_SEND(put_req->mxm_req);

    if (need_progress)
        mxm_progress(mca_spml_ikrit.mxm_context);

    return OSHMEM_SUCCESS;
}
Exemplo n.º 29
0
sshmem_mkey_t *mca_spml_ikrit_register(void* addr,
                                         size_t size,
                                         uint64_t shmid,
                                         int *count)
{
    int i;
    sshmem_mkey_t *mkeys;
    mxm_error_t err;
    mxm_mem_key_t *m_key;
    int my_rank = oshmem_my_proc_id();

    *count = 0;
    mkeys = (sshmem_mkey_t *) calloc(1, MXM_PTL_LAST * sizeof(*mkeys));
    if (!mkeys) {
        return NULL ;
    }

    for (i = 0; i < MXM_PTL_LAST; i++) {
        mkeys[i].u.key = MAP_SEGMENT_SHM_INVALID;
        switch (i) {
        case MXM_PTL_SHM:
            if ((int)shmid != MAP_SEGMENT_SHM_INVALID) {
                mkeys[i].u.key = shmid;
                mkeys[i].va_base = 0;
            } else {
                mkeys[i].len = 0;
                mkeys[i].va_base = addr;
            }
            mkeys[i].spml_context = 0;
            break;
        case MXM_PTL_RDMA:
            mkeys[i].va_base = addr;
            mkeys[i].spml_context = 0;

            if (mca_spml_ikrit.ud_only) {
                mkeys[i].len = 0;
                break;
            }

            err = mxm_mem_map(mca_spml_ikrit.mxm_context, &addr, &size, 0, 0, 0);
            if (MXM_OK != err) {
                SPML_ERROR("Failed to register memory: %s", mxm_error_string(err));
                goto error_out;
            }
            mkeys[i].spml_context = (void *)(unsigned long)size;

            m_key = malloc(sizeof(*m_key));
            if (NULL == m_key) {
                SPML_ERROR("Failed to allocate m_key memory");
                goto error_out;
            }
            mkeys[i].len = sizeof(*m_key);
            mkeys[i].u.data = m_key;

            err = mxm_mem_get_key(mca_spml_ikrit.mxm_context, addr, m_key);
            if (MXM_OK != err) {
                SPML_ERROR("Failed to get memory key: %s", mxm_error_string(err));
                goto error_out;
            }
            break;

        default:
            SPML_ERROR("unsupported PTL: %d", i);
            goto error_out;
        }
        SPML_VERBOSE(5,
                     "rank %d ptl %d addr %p size %llu %s",
                     my_rank, i, addr, (unsigned long long)size,
                     mca_spml_base_mkey2str(&mkeys[i]));

        mca_spml_ikrit_cache_mkeys(&mkeys[i], memheap_find_segnum(addr), my_rank, i);
    }
    *count = MXM_PTL_LAST;

    return mkeys;

error_out:
    mca_spml_ikrit_deregister(mkeys);

    return NULL;
}
Exemplo n.º 30
0
int mca_spml_ikrit_add_procs(ompi_proc_t** procs, size_t nprocs)
{
    spml_ikrit_mxm_ep_conn_info_t *ep_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t *ep_hw_rdma_info = NULL;
    spml_ikrit_mxm_ep_conn_info_t my_ep_info;
    size_t mxm_addr_len = MXM_MAX_ADDR_LEN;
    mxm_error_t err;
    size_t i, n;
    int rc = OSHMEM_ERROR;
    ompi_proc_t *proc_self;
    int my_rank = oshmem_my_proc_id();

    OBJ_CONSTRUCT(&mca_spml_ikrit.active_peers, opal_list_t);
    /* Allocate connection requests */
    ep_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
    if (NULL == ep_info) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

    if (mca_spml_ikrit.hw_rdma_channel) {
        ep_hw_rdma_info = calloc(sizeof(spml_ikrit_mxm_ep_conn_info_t), nprocs);
        if (NULL == ep_hw_rdma_info) {
            rc = OSHMEM_ERR_OUT_OF_RESOURCE;
            goto bail;
        }
    }

    mca_spml_ikrit.mxm_peers = (mxm_peer_t *) calloc(nprocs , sizeof(mxm_peer_t));
    if (NULL == mca_spml_ikrit.mxm_peers) {
        rc = OSHMEM_ERR_OUT_OF_RESOURCE;
        goto bail;
    }

    memset(&my_ep_info, 0, sizeof(my_ep_info));

    if (mca_spml_ikrit.hw_rdma_channel) {
        err = mxm_ep_get_address(mca_spml_ikrit.mxm_hw_rdma_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
        if (MXM_OK != err) {
            orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                    mxm_error_string(err));
            rc = OSHMEM_ERROR;
            goto bail;
        }
        oshmem_shmem_allgather(&my_ep_info, ep_hw_rdma_info,
                sizeof(spml_ikrit_mxm_ep_conn_info_t));
    }
    err = mxm_ep_get_address(mca_spml_ikrit.mxm_ep, &my_ep_info.addr.ep_addr, &mxm_addr_len);
    if (MXM_OK != err) {
        orte_show_help("help-oshmem-spml-ikrit.txt", "unable to get endpoint address", true,
                mxm_error_string(err));
        rc = OSHMEM_ERROR;
        goto bail;
    }

    oshmem_shmem_allgather(&my_ep_info, ep_info,
                           sizeof(spml_ikrit_mxm_ep_conn_info_t));

    opal_progress_register(spml_ikrit_progress);

    /* Get the EP connection requests for all the processes from modex */
    for (n = 0; n < nprocs; ++n) {

        /* mxm 2.0 keeps its connections on a list. Make sure
         * that list have different order on every rank */
        i = (my_rank + n) % nprocs;
        mxm_peer_construct(&mca_spml_ikrit.mxm_peers[i]);

        err = mxm_ep_connect(mca_spml_ikrit.mxm_ep, ep_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i].mxm_conn);
        if (MXM_OK != err) {
            SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
            goto bail;
        }
        mxm_conn_ctx_set(mca_spml_ikrit.mxm_peers[i].mxm_conn, &mca_spml_ikrit.mxm_peers[i]);
        if (mca_spml_ikrit.hw_rdma_channel) {
            err = mxm_ep_connect(mca_spml_ikrit.mxm_hw_rdma_ep, ep_hw_rdma_info[i].addr.ep_addr, &mca_spml_ikrit.mxm_peers[i].mxm_hw_rdma_conn);
            if (MXM_OK != err) {
                SPML_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
                goto bail;
            }
        } else {
            mca_spml_ikrit.mxm_peers[i].mxm_hw_rdma_conn = mca_spml_ikrit.mxm_peers[i].mxm_conn;
        }
    }

    if (ep_info)
        free(ep_info);
    if (ep_hw_rdma_info)
        free(ep_hw_rdma_info);

    if (mca_spml_ikrit.bulk_connect) {
        /* Need a barrier to ensure remote peers already created connection */
        oshmem_shmem_barrier();
        mxm_ep_wireup(mca_spml_ikrit.mxm_ep);
    }

    proc_self = oshmem_proc_group_find(oshmem_group_all, my_rank);
    /* identify local processes and change transport to SHM */
    for (i = 0; i < nprocs; i++) {
        if (procs[i]->super.proc_name.jobid != proc_self->super.proc_name.jobid ||
            !OPAL_PROC_ON_LOCAL_NODE(procs[i]->super.proc_flags)) {
            continue;
        }
        if (procs[i] == proc_self)
            continue;

        /* use zcopy for put/get via sysv shared memory with fallback to RDMA */
        mca_spml_ikrit.mxm_peers[i].ptl_id = MXM_PTL_SHM;
    }

    SPML_VERBOSE(50, "*** ADDED PROCS ***");
    return OSHMEM_SUCCESS;

bail:
	if (ep_info)
		free(ep_info);
	if (ep_hw_rdma_info)
		free(ep_hw_rdma_info);
    SPML_ERROR("add procs FAILED rc=%d", rc);

    return rc;

}