int MPIDI_CH3_PktHandler_EagerSyncAck( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, intptr_t *buflen, MPIR_Request **rreqp ) { MPIDI_CH3_Pkt_eager_sync_ack_t * esa_pkt = &pkt->eager_sync_ack; MPIR_Request * sreq; int mpi_errno = MPI_SUCCESS; MPL_DBG_MSG_P(MPIDI_CH3_DBG_OTHER,VERBOSE, "received eager sync ack pkt, sreq=0x%08x", esa_pkt->sender_req_id); MPIR_Request_get_ptr(esa_pkt->sender_req_id, sreq); /* decrement CC (but don't mark data transfer as complete since the transfer could still be in progress) */ /* FIXME: This sometimes segfaults */ mpi_errno = MPID_Request_complete(sreq); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } *buflen = sizeof(MPIDI_CH3_Pkt_t); *rreqp = NULL; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3_Req_handler_rma_op_complete(MPIR_Request * sreq) { int mpi_errno = MPI_SUCCESS; MPIR_Request *ureq = NULL; MPIR_Win *win_ptr = NULL; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE); if (sreq->dev.rma_target_ptr != NULL) { (sreq->dev.rma_target_ptr)->num_pkts_wait_for_local_completion--; } /* get window, decrement active request cnt on window */ MPIR_Win_get_ptr(sreq->dev.source_win_handle, win_ptr); MPIR_Assert(win_ptr != NULL); MPIDI_CH3I_RMA_Active_req_cnt--; MPIR_Assert(MPIDI_CH3I_RMA_Active_req_cnt >= 0); if (sreq->dev.request_handle != MPI_REQUEST_NULL) { /* get user request */ MPIR_Request_get_ptr(sreq->dev.request_handle, ureq); mpi_errno = MPID_Request_complete(ureq); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE); return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3_PktHandler_RndvSend( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, intptr_t *buflen, MPIR_Request **rreqp ) { MPIDI_CH3_Pkt_rndv_send_t * rs_pkt = &pkt->rndv_send; int mpi_errno = MPI_SUCCESS; int complete; char *data_buf; intptr_t data_len; MPIR_Request *req; MPL_DBG_MSG(MPIDI_CH3_DBG_OTHER,VERBOSE,"received rndv send (data) pkt"); MPIR_Request_get_ptr(rs_pkt->receiver_req_id, req); data_len = ((*buflen - sizeof(MPIDI_CH3_Pkt_t) >= req->dev.recv_data_sz) ? req->dev.recv_data_sz : *buflen - sizeof(MPIDI_CH3_Pkt_t)); data_buf = (char *)pkt + sizeof(MPIDI_CH3_Pkt_t); if (req->dev.recv_data_sz == 0) { *buflen = sizeof(MPIDI_CH3_Pkt_t); mpi_errno = MPID_Request_complete(req); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } *rreqp = NULL; } else { mpi_errno = MPIDI_CH3U_Receive_data_found(req, data_buf, &data_len, &complete); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER, "**ch3|postrecv", "**ch3|postrecv %s", "MPIDI_CH3_PKT_RNDV_SEND"); } *buflen = sizeof(MPIDI_CH3_Pkt_t) + data_len; if (complete) { mpi_errno = MPID_Request_complete(req); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } *rreqp = NULL; } else { *rreqp = req; } } fn_fail: return mpi_errno; }
int MPIR_Test(MPI_Request * request, int *flag, MPI_Status * status) { int mpi_errno = MPI_SUCCESS; int active_flag; MPIR_Request *request_ptr = NULL; /* If this is a null request handle, then return an empty status */ if (*request == MPI_REQUEST_NULL) { MPIR_Status_set_empty(status); *flag = TRUE; goto fn_exit; } MPIR_Request_get_ptr(*request, request_ptr); MPIR_Assert(request_ptr != NULL); mpi_errno = MPID_Test(request_ptr, flag, status); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (*flag) { mpi_errno = MPIR_Request_completion_processing(request_ptr, status, &active_flag); if (!MPIR_Request_is_persistent(request_ptr)) { MPIR_Request_free(request_ptr); *request = MPI_REQUEST_NULL; } if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Fall through to the exit */ } else if (unlikely(MPIR_Request_is_anysrc_mismatched(request_ptr))) { MPIR_ERR_SET(mpi_errno, MPIX_ERR_PROC_FAILED_PENDING, "**failure_pending"); if (status != MPI_STATUS_IGNORE) status->MPI_ERROR = mpi_errno; goto fn_fail; } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
static int MPID_PSP_Bsend(const void * buf, MPI_Aint count, MPI_Datatype datatype, int rank, int tag, MPIR_Comm * comm, int context_offset, MPIR_Request ** request) { /* See src/mpid/ch3/src/mpid_startall.c:105 "MPID_Startall(): case MPIDI_REQUEST_TYPE_BSEND:"*/ MPI_Request sreq_handle; int rc; // TODO: check THREADPRIV API! { rc = MPIR_Ibsend_impl((void *)buf, count, datatype, rank, tag, comm, &sreq_handle); if (rc == MPI_SUCCESS) { MPIR_Request *r; MPIR_Request_get_ptr(sreq_handle, r); *request = r; } } return rc; }
/*@ MPI_Start - Initiates a communication with a persistent request handle Input Parameters: . request - communication request (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_REQUEST @*/ int MPI_Start(MPI_Request * request) { MPIR_Request *request_ptr = NULL; int mpi_errno = MPI_SUCCESS; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_START); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_REQUEST_ENTER(MPID_STATE_MPI_START); /* Validate handle parameters needing to be converted */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_ARGNULL(request, "request", mpi_errno); MPIR_ERRTEST_REQUEST(*request, mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* Convert MPI request handle to a request object pointer */ MPIR_Request_get_ptr(*request, request_ptr); /* Validate object pointers if error checking is enabled */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_Request_valid_ptr(request_ptr, mpi_errno); if (mpi_errno) goto fn_fail; MPIR_ERRTEST_PERSISTENT(request_ptr, mpi_errno); MPIR_ERRTEST_PERSISTENT_ACTIVE(request_ptr, mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPID_Startall(1, &request_ptr); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_REQUEST_EXIT(MPID_STATE_MPI_START); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ #ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_start", "**mpi_start %p", request); } #endif mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Test - Tests for the completion of a request Input Parameters: . request - MPI request (handle) Output Parameters: + flag - true if operation completed (logical) - status - status object (Status). May be 'MPI_STATUS_IGNORE'. .N ThreadSafe .N waitstatus .N Fortran .N FortranStatus .N Errors .N MPI_SUCCESS .N MPI_ERR_REQUEST .N MPI_ERR_ARG @*/ int MPI_Test(MPI_Request * request, int *flag, MPI_Status * status) { int mpi_errno = MPI_SUCCESS; MPIR_Request *request_ptr = NULL; MPIR_Comm *comm_ptr = NULL; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_TEST); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(VNI_GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_REQUEST_ENTER(MPID_STATE_MPI_TEST); /* Validate parameters, especially handles needing to be converted */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_ARGNULL(request, "request", mpi_errno); MPIR_ERRTEST_REQUEST_OR_NULL(*request, mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ MPIR_Request_get_ptr(*request, request_ptr); /* Validate parameters and objects (post conversion) */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { if (*request != MPI_REQUEST_NULL) { /* Validate request_ptr */ MPIR_Request_valid_ptr(request_ptr, mpi_errno); if (mpi_errno) goto fn_fail; } MPIR_ERRTEST_ARGNULL(flag, "flag", mpi_errno); /* NOTE: MPI_STATUS_IGNORE != NULL */ MPIR_ERRTEST_ARGNULL(status, "status", mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ /* MPIR_Test may free request_ptr, so make a copy of comm_ptr before calling MPIR_Test. * comm_ptr will be used later at fn_fail when MPIR_Test returns an error. */ if (request_ptr) comm_ptr = request_ptr->comm; mpi_errno = MPIR_Test(request, flag, status); if (mpi_errno) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_REQUEST_EXIT(MPID_STATE_MPI_TEST); MPID_THREAD_CS_EXIT(VNI_GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ #ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_test", "**mpi_test %p %p %p", request, flag, status); } #endif mpi_errno = MPIR_Err_return_comm(comm_ptr, FCNAME, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Imrecv - Nonblocking receive of message matched by MPI_Mprobe or MPI_Improbe. Input/Output Parameters: . message - message (handle) Input Parameters: + count - number of elements in the receive buffer (non-negative integer) - datatype - datatype of each receive buffer element (handle) Output Parameters: + buf - initial address of the receive buffer (choice) - request - communication request (handle) .N ThreadSafe .N Fortran .N Errors @*/ int MPI_Imrecv(void *buf, int count, MPI_Datatype datatype, MPI_Message *message, MPI_Request *request) { int mpi_errno = MPI_SUCCESS; MPIR_Request *rreq = NULL; MPIR_Request *msgp = NULL; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_IMRECV); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_IMRECV); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS { MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno); /* TODO more checks may be appropriate */ } MPID_END_ERROR_CHECKS } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPIR_Request_get_ptr(*message, msgp); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS { if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) { MPIR_Datatype *datatype_ptr = NULL; MPID_Datatype_get_ptr(datatype, datatype_ptr); MPIR_Datatype_valid_ptr(datatype_ptr, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } /* MPI_MESSAGE_NO_PROC should yield a "proc null" status */ if (*message != MPI_MESSAGE_NO_PROC) { MPIR_Request_valid_ptr(msgp, mpi_errno); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_ERR_CHKANDJUMP((msgp->kind != MPIR_REQUEST_KIND__MPROBE), mpi_errno, MPI_ERR_ARG, "**reqnotmsg"); } MPIR_ERRTEST_ARGNULL(request, "request", mpi_errno); /* TODO more checks may be appropriate (counts, in_place, buffer aliasing, etc) */ } MPID_END_ERROR_CHECKS } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPID_Imrecv(buf, count, datatype, msgp, &rreq); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_Assert(rreq != NULL); *request = rreq->handle; *message = MPI_MESSAGE_NULL; /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_IMRECV); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_imrecv", "**mpi_imrecv %p %d %D %p %p", buf, count, datatype, message, request); } # endif mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Request_free - Frees a communication request object Input Parameters: . request - communication request (handle) Notes: This routine is normally used to free inactive persistent requests created with either 'MPI_Recv_init' or 'MPI_Send_init' and friends. It `is` also permissible to free an active request. However, once freed, the request can no longer be used in a wait or test routine (e.g., 'MPI_Wait') to determine completion. This routine may also be used to free a non-persistent requests such as those created with 'MPI_Irecv' or 'MPI_Isend' and friends. Like active persistent requests, once freed, the request can no longer be used with test/wait routines to determine completion. .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_REQUEST .N MPI_ERR_ARG .see also: MPI_Isend, MPI_Irecv, MPI_Issend, MPI_Ibsend, MPI_Irsend, MPI_Recv_init, MPI_Send_init, MPI_Ssend_init, MPI_Rsend_init, MPI_Wait, MPI_Test, MPI_Waitall, MPI_Waitany, MPI_Waitsome, MPI_Testall, MPI_Testany, MPI_Testsome @*/ int MPI_Request_free(MPI_Request * request) { int mpi_errno = MPI_SUCCESS; MPIR_Request *request_ptr = NULL; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_REQUEST_FREE); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_REQUEST_FREE); /* Validate handle parameters needing to be converted */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_ARGNULL(request, "request", mpi_errno); MPIR_ERRTEST_REQUEST(*request, mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPIR_Request_get_ptr(*request, request_ptr); /* Validate object pointers if error checking is enabled */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate request_ptr */ MPIR_Request_valid_ptr(request_ptr, mpi_errno); if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ MPID_Progress_poke(); switch (request_ptr->kind) { case MPIR_REQUEST_KIND__SEND: { MPII_SENDQ_FORGET(request_ptr); break; } case MPIR_REQUEST_KIND__RECV: { break; } case MPIR_REQUEST_KIND__PREQUEST_SEND: { /* If this is an active persistent request, we must also * release the partner request. */ if (request_ptr->u.persist.real_request != NULL) { if (request_ptr->u.persist.real_request->kind == MPIR_REQUEST_KIND__GREQUEST) { /* This is needed for persistent Bsend requests */ mpi_errno = MPIR_Grequest_free(request_ptr->u.persist.real_request); } MPIR_Request_free(request_ptr->u.persist.real_request); } break; } case MPIR_REQUEST_KIND__PREQUEST_RECV: { /* If this is an active persistent request, we must also * release the partner request. */ if (request_ptr->u.persist.real_request != NULL) { MPIR_Request_free(request_ptr->u.persist.real_request); } break; } case MPIR_REQUEST_KIND__GREQUEST: { mpi_errno = MPIR_Grequest_free(request_ptr); break; } /* --BEGIN ERROR HANDLING-- */ default: { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**request_invalid_kind", "**request_invalid_kind %d", request_ptr->kind); break; } /* --END ERROR HANDLING-- */ } MPIR_Request_free(request_ptr); *request = MPI_REQUEST_NULL; if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_REQUEST_FREE); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ #ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_request_free", "**mpi_request_free %p", request); } #endif mpi_errno = MPIR_Err_return_comm(0, FCNAME, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3_PktHandler_RndvClrToSend( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, intptr_t *buflen, MPIR_Request **rreqp ) { MPIDI_CH3_Pkt_rndv_clr_to_send_t * cts_pkt = &pkt->rndv_clr_to_send; MPIR_Request * sreq; MPIR_Request * rts_sreq; MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_rndv_send_t * rs_pkt = &upkt.rndv_send; int dt_contig; MPI_Aint dt_true_lb; intptr_t data_sz; MPIDU_Datatype* dt_ptr; int mpi_errno = MPI_SUCCESS; MPL_DBG_MSG(MPIDI_CH3_DBG_OTHER,VERBOSE,"received rndv CTS pkt"); MPIR_Request_get_ptr(cts_pkt->sender_req_id, sreq); MPL_DBG_MSG_FMT(MPIDI_CH3_DBG_OTHER,TERSE,(MPL_DBG_FDEST,"received cts, count=" MPI_AINT_FMT_DEC_SPEC "\n", sreq->dev.user_count)); sreq->dev.OnDataAvail = 0; sreq->dev.OnFinal = 0; /* Release the RTS request if one exists. MPID_Request_fetch_and_clear_rts_sreq() needs to be atomic to prevent cancel send from cancelling the wrong (future) request. If MPID_Request_fetch_and_clear_rts_sreq() returns a NULL rts_sreq, then MPID_Cancel_send() is responsible for releasing the RTS request object. */ MPIDI_Request_fetch_and_clear_rts_sreq(sreq, &rts_sreq); if (rts_sreq != NULL) { MPIR_Request_free(rts_sreq); } *buflen = sizeof(MPIDI_CH3_Pkt_t); MPIDI_Pkt_init(rs_pkt, MPIDI_CH3_PKT_RNDV_SEND); rs_pkt->receiver_req_id = cts_pkt->receiver_req_id; MPIDI_Datatype_get_info(sreq->dev.user_count, sreq->dev.datatype, dt_contig, data_sz, dt_ptr, dt_true_lb); if (dt_contig) { MPL_IOV iov[MPL_IOV_LIMIT]; MPL_DBG_MSG_FMT(MPIDI_CH3_DBG_OTHER,VERBOSE,(MPL_DBG_FDEST, "sending contiguous rndv data, data_sz=%" PRIdPTR, data_sz)); iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST)rs_pkt; iov[0].MPL_IOV_LEN = sizeof(*rs_pkt); iov[1].MPL_IOV_BUF = (MPL_IOV_BUF_CAST)((char *)sreq->dev.user_buf + dt_true_lb); iov[1].MPL_IOV_LEN = data_sz; MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex); mpi_errno = MPIDI_CH3_iSendv(vc, sreq, iov, 2); MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex); MPIR_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|senddata"); } else { sreq->dev.segment_ptr = MPIDU_Segment_alloc( ); MPIR_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPIDU_Segment_alloc"); MPIDU_Segment_init(sreq->dev.user_buf, sreq->dev.user_count, sreq->dev.datatype, sreq->dev.segment_ptr, 0); sreq->dev.segment_first = 0; sreq->dev.segment_size = data_sz; MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex); mpi_errno = vc->sendNoncontig_fn(vc, sreq, rs_pkt, sizeof(*rs_pkt)); MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex); MPIR_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|senddata"); } *rreqp = NULL; fn_fail: return mpi_errno; }