static inline void get_completion_cb(void *ctx) { mca_spml_ikrit_get_request_t *get_req = (mca_spml_ikrit_get_request_t *) ctx; OPAL_THREAD_ADD32(&mca_spml_ikrit.n_active_gets, -1); get_req->req_get.req_base.req_spml_complete = true; get_req->req_get.req_base.req_oshmem.req_status.SHMEM_ERROR = OSHMEM_SUCCESS; oshmem_request_complete(&get_req->req_get.req_base.req_oshmem, 1); oshmem_request_free((oshmem_request_t**) &get_req); }
static inline void put_completion_cb(void *ctx) { mca_spml_ikrit_put_request_t *put_req = (mca_spml_ikrit_put_request_t *) ctx; mxm_peer_t *peer; OPAL_THREAD_ADD32(&mca_spml_ikrit.n_active_puts, -1); peer = mca_spml_ikrit.mxm_peers[put_req->pe]; /* this was last put in progress. Remove peer from the list so that we do not need explicit fence */ #if SPML_IKRIT_PUT_DEBUG == 1 if (peer) { if (peer->n_active_puts <= 0) { /* actually this can happen because fence forces ref count to 0 while puts still may be in flight */ SPML_VERBOSE(1, "pe %d n_active_puts %d", put_req->pe, peer->n_active_puts); } } if (put_req->mxm_req.base.state != MXM_REQ_COMPLETED) SPML_ERROR("oops: pe %d uncompleted request state %d", put_req->pe, put_req->mxm_req.base.state); #endif if (0 < peer->n_active_puts) { peer->n_active_puts--; #if MXM_API < MXM_VERSION(2,0) if (0 == peer->n_active_puts && (put_req->mxm_req.base.flags & MXM_REQ_FLAG_SEND_SYNC)) { opal_list_remove_item(&mca_spml_ikrit.active_peers, &peer->super); peer->need_fence = 0; } #else if (0 == peer->n_active_puts && (put_req->mxm_req.opcode == MXM_REQ_OP_PUT_SYNC)) { opal_list_remove_item(&mca_spml_ikrit.active_peers, &peer->super); peer->need_fence = 0; } #endif } put_req->req_put.req_base.req_spml_complete = true; put_req->req_put.req_base.req_oshmem.req_status.SHMEM_ERROR = OSHMEM_SUCCESS; oshmem_request_complete(&put_req->req_put.req_base.req_oshmem, 1); oshmem_request_free((oshmem_request_t**) &put_req); }
static void mca_yoda_get_response_callback(mca_btl_base_module_t* btl, mca_btl_base_tag_t tag, mca_btl_base_descriptor_t* des, void* cbdata ) { size_t* size; void** l_addr; mca_spml_yoda_get_request_t* getreq; /* unpacking data*/ size = (size_t *) ( ((char*)des->des_segments->seg_addr.pval) ); l_addr = (void**)( ((char*)size) + sizeof(*size)); getreq = (mca_spml_yoda_get_request_t*)*(void**)((char*)l_addr + sizeof(*l_addr) + *size); /* Complete get request*/ OPAL_THREAD_ADD32(&getreq->parent->active_count, -1); getreq->req_get.req_base.req_spml_complete = true; oshmem_request_complete(&getreq->req_get.req_base.req_oshmem, 1); oshmem_request_free((oshmem_request_t**) &getreq); memcpy(*l_addr, (char*)l_addr + sizeof(*l_addr), *size); }
void mca_spml_yoda_put_completion(mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* ep, struct mca_btl_base_descriptor_t* des, int status) { mca_spml_yoda_rdma_frag_t* frag = (mca_spml_yoda_rdma_frag_t*) des->des_cbdata; mca_spml_yoda_put_request_t* putreq = (mca_spml_yoda_put_request_t*) frag->rdma_req; mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*) des->des_context; OPAL_THREAD_ADD32(&mca_spml_yoda.n_active_puts, -1); /* check completion status */ if (OPAL_UNLIKELY(OSHMEM_SUCCESS != status)) { /* no way to propagete errors. die */ SPML_ERROR("FATAL put completion error"); oshmem_shmem_abort(-1); } putreq->req_put.req_base.req_spml_complete = true; oshmem_request_complete(&putreq->req_put.req_base.req_oshmem, 1); oshmem_request_free((oshmem_request_t**) &putreq); mca_bml_base_free(bml_btl, des); }