int MPIC_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) { int mpi_errno, context_id; MPID_Request *request_ptr=NULL; MPID_Comm *comm_ptr=NULL; MPIDI_STATE_DECL(MPID_STATE_MPIC_SEND); MPIDI_PT2PT_FUNC_ENTER_FRONT(MPID_STATE_MPIC_SEND); MPID_Comm_get_ptr( comm, comm_ptr ); context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ? MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL; mpi_errno = MPID_Send(buf, count, datatype, dest, tag, comm_ptr, context_id, &request_ptr); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } if (request_ptr) { mpi_errno = MPIC_Wait(request_ptr); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } MPID_Request_release(request_ptr); } fn_exit: MPIDI_PT2PT_FUNC_EXIT(MPID_STATE_MPIC_SEND); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ if (request_ptr) { MPID_Request_release(request_ptr); } goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPID_nem_tcp_ckpt_restart_vc(MPIDI_VC_t *vc) { int mpi_errno = MPI_SUCCESS; MPIDI_CH3_Pkt_t upkt; MPIDI_nem_tcp_pkt_unpause_t * const pkt = (MPIDI_nem_tcp_pkt_unpause_t *)&upkt; MPID_Request *sreq; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_TCP_CKPT_RESTART_VC); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_TCP_CKPT_RESTART_VC); pkt->type = MPIDI_NEM_PKT_NETMOD; pkt->subtype = MPIDI_NEM_TCP_PKT_UNPAUSE; mpi_errno = MPID_nem_tcp_iStartContigMsg_paused(vc, pkt, sizeof(pkt), NULL, 0, &sreq); if (mpi_errno) MPIU_ERR_POP(mpi_errno); if (sreq != NULL) { if (sreq->status.MPI_ERROR != MPI_SUCCESS) { mpi_errno = sreq->status.MPI_ERROR; MPID_Request_release(sreq); MPIU_ERR_INTERNALANDJUMP(mpi_errno, "Failed to send checkpoint unpause pkt."); } MPID_Request_release(sreq); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_TCP_CKPT_RESTART_VC); return mpi_errno; fn_fail: goto fn_exit; }
/* MPIDI_CH3_SendNoncontig_iov - Sends a message by loading an IOV and calling iSendv. The caller must initialize sreq->dev.segment as well as segment_first and segment_size. */ int MPIDI_CH3_SendNoncontig_iov( MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz ) { int mpi_errno = MPI_SUCCESS; int iov_n; MPL_IOV iov[MPL_IOV_LIMIT]; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_SENDNONCONTIG_IOV); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_SENDNONCONTIG_IOV); iov[0].MPL_IOV_BUF = header; iov[0].MPL_IOV_LEN = hdr_sz; iov_n = MPL_IOV_LIMIT - 1; if (sreq->dev.ext_hdr_sz > 0) { /* When extended packet header exists, here we leave one IOV slot * before loading data to IOVs, so that there will be enough * IOVs for hdr/ext_hdr/data. */ iov_n--; } mpi_errno = MPIDI_CH3U_Request_load_send_iov(sreq, &iov[1], &iov_n); if (mpi_errno == MPI_SUCCESS) { iov_n += 1; /* Note this routine is invoked withing a CH3 critical section */ /* MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex); */ mpi_errno = MPIDI_CH3_iSendv(vc, sreq, iov, iov_n); /* MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex); */ /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { MPID_Request_release(sreq); MPIR_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**ch3|eagermsg"); } /* --END ERROR HANDLING-- */ /* Note that in the non-blocking case, we need to add a ref to the datatypes */ } else { /* --BEGIN ERROR HANDLING-- */ MPID_Request_release(sreq); MPIR_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**ch3|loadsendiov"); /* --END ERROR HANDLING-- */ } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_SENDNONCONTIG_IOV); return mpi_errno; fn_fail: goto fn_exit; }
int MPIC_Ssend(const void *buf, MPI_Aint count, MPI_Datatype datatype, int dest, int tag, MPID_Comm *comm_ptr, mpir_errflag_t *errflag) { int mpi_errno = MPI_SUCCESS; int context_id; MPID_Request *request_ptr = NULL; MPIDI_STATE_DECL(MPID_STATE_MPIC_SSEND); MPIDI_FUNC_ENTER(MPID_STATE_MPIC_SSEND); MPIU_DBG_MSG_D(PT2PT, TYPICAL, "IN: errflag = %d", *errflag); MPIU_ERR_CHKANDJUMP1((count < 0), mpi_errno, MPI_ERR_COUNT, "**countneg", "**countneg %d", count); context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ? MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL; switch(*errflag) { case MPIR_ERR_NONE: break; case MPIR_ERR_PROC_FAILED: MPIR_TAG_SET_PROC_FAILURE_BIT(tag); default: MPIR_TAG_SET_ERROR_BIT(tag); } mpi_errno = MPID_Ssend(buf, count, datatype, dest, tag, comm_ptr, context_id, &request_ptr); if (mpi_errno) MPIU_ERR_POP(mpi_errno); if (request_ptr) { mpi_errno = MPIC_Wait(request_ptr, errflag); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPID_Request_release(request_ptr); } fn_exit: MPIU_DBG_MSG_D(PT2PT, TYPICAL, "OUT: errflag = %d", *errflag); MPIDI_FUNC_EXIT(MPID_STATE_MPIC_SSEND); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ if (request_ptr) MPID_Request_release(request_ptr); if (mpi_errno && !*errflag) { if (MPIX_ERR_PROC_FAILED == MPIR_ERR_GET_CLASS(mpi_errno)) { *errflag = MPIR_ERR_PROC_FAILED; } else { *errflag = MPIR_ERR_OTHER; } } goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIC_Recv(void *buf, MPI_Aint count, MPI_Datatype datatype, int source, int tag, MPID_Comm *comm_ptr, MPI_Status *status, mpir_errflag_t *errflag) { int mpi_errno = MPI_SUCCESS; int context_id; MPI_Status mystatus; MPID_Request *request_ptr = NULL; MPIDI_STATE_DECL(MPID_STATE_MPIC_RECV); MPIDI_FUNC_ENTER(MPID_STATE_MPIC_RECV); MPIU_DBG_MSG_D(PT2PT, TYPICAL, "IN: errflag = %d", *errflag); MPIU_ERR_CHKANDJUMP1((count < 0), mpi_errno, MPI_ERR_COUNT, "**countneg", "**countneg %d", count); context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ? MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL; if (status == MPI_STATUS_IGNORE) status = &mystatus; mpi_errno = MPID_Recv(buf, count, datatype, source, tag, comm_ptr, context_id, status, &request_ptr); if (mpi_errno) MPIU_ERR_POP(mpi_errno); if (request_ptr) { mpi_errno = MPIC_Wait(request_ptr, errflag); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); *status = request_ptr->status; mpi_errno = status->MPI_ERROR; MPID_Request_release(request_ptr); } else { MPIR_Process_status(status, errflag); MPIR_TAG_CLEAR_ERROR_BITS(status->MPI_TAG); } if (MPI_SUCCESS == MPIR_ERR_GET_CLASS(status->MPI_ERROR)) { MPIU_Assert(status->MPI_TAG == tag); } fn_exit: MPIU_DBG_MSG_D(PT2PT, TYPICAL, "OUT: errflag = %d", *errflag); MPIDI_FUNC_EXIT(MPID_STATE_MPIC_RECV); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ if (request_ptr) MPID_Request_release(request_ptr); goto fn_exit; /* --END ERROR HANDLING-- */ }
/* MSGQUEUE lock is not held */ void MPIDI_Callback_process_trunc(pami_context_t context, MPID_Request *rreq, pami_recv_t *recv, const void *sndbuf) { rreq->status.MPI_ERROR = MPI_ERR_TRUNCATE; /* -------------------------------------------------------------- */ /* The data is already available, so we can just unpack it now. */ /* -------------------------------------------------------------- */ if (recv) { MPIDI_Request_setCA(rreq, MPIDI_CA_UNPACK_UEBUF_AND_COMPLETE); rreq->mpid.uebuflen = MPIR_STATUS_GET_COUNT(rreq->status); rreq->mpid.uebuf = MPIU_Malloc(MPIR_STATUS_GET_COUNT(rreq->status)); MPID_assert(rreq->mpid.uebuf != NULL); rreq->mpid.uebuf_malloc = mpiuMalloc; recv->addr = rreq->mpid.uebuf; } else { MPIDI_Request_setCA(rreq, MPIDI_CA_UNPACK_UEBUF_AND_COMPLETE); rreq->mpid.uebuflen = MPIR_STATUS_GET_COUNT(rreq->status); rreq->mpid.uebuf = (void*)sndbuf; MPIDI_RecvDoneCB(context, rreq, PAMI_SUCCESS); MPID_Request_release(rreq); } }
int MPID_nem_tcp_ckpt_continue_vc(MPIDI_VC_t *vc) { int mpi_errno = MPI_SUCCESS; MPID_PKT_DECL_CAST(upkt, MPIDI_nem_tcp_pkt_unpause_t, unpause_pkt); MPID_Request *unpause_req; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_TCP_CKPT_CONTINUE_VC); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_TCP_CKPT_CONTINUE_VC); unpause_pkt->type = MPIDI_NEM_PKT_NETMOD; unpause_pkt->subtype = MPIDI_NEM_TCP_PKT_UNPAUSE; mpi_errno = MPID_nem_tcp_iStartContigMsg_paused(vc, &upkt, sizeof(MPIDI_nem_tcp_pkt_unpause_t), NULL, 0, &unpause_req); if (mpi_errno) MPIU_ERR_POP(mpi_errno); if (unpause_req) { if (unpause_req->status.MPI_ERROR) MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPID_Request_release(unpause_req); if (mpi_errno) goto fn_fail; } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_TCP_CKPT_CONTINUE_VC); return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3_RecvRndv( MPIDI_VC_t * vc, MPID_Request *rreq ) { int mpi_errno = MPI_SUCCESS; /* A rendezvous request-to-send (RTS) message has arrived. We need to send a CTS message to the remote process. */ MPID_Request * cts_req; MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_rndv_clr_to_send_t * cts_pkt = &upkt.rndv_clr_to_send; MPIU_DBG_MSG(CH3_OTHER,VERBOSE, "rndv RTS in the request, sending rndv CTS"); MPIDI_Pkt_init(cts_pkt, MPIDI_CH3_PKT_RNDV_CLR_TO_SEND); cts_pkt->sender_req_id = rreq->dev.sender_req_id; cts_pkt->receiver_req_id = rreq->handle; MPIU_THREAD_CS_ENTER(CH3COMM,vc); mpi_errno = MPIU_CALL(MPIDI_CH3,iStartMsg(vc, cts_pkt, sizeof(*cts_pkt), &cts_req)); MPIU_THREAD_CS_EXIT(CH3COMM,vc); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**ch3|ctspkt"); } if (cts_req != NULL) { /* FIXME: Ideally we could specify that a req not be returned. This would avoid our having to decrement the reference count on a req we don't want/need. */ MPID_Request_release(cts_req); } fn_fail: return mpi_errno; }
int MPIDI_nem_ckpt_start(void) { int mpi_errno = MPI_SUCCESS; int i; MPIDI_STATE_DECL(MPID_STATE_MPIDI_NEM_CKPT_START); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_NEM_CKPT_START); if (checkpointing) goto fn_exit; checkpointing = TRUE; marker_count = MPIDI_Process.my_pg->size - 1; /* We won't receive a marker from ourselves. */ ++current_wave; /* send markers to all other processes */ /* FIXME: we're only handling processes in our pg, so no dynamic connections */ for (i = 0; i < MPIDI_Process.my_pg->size; ++i) { MPID_Request *req; MPIDI_VC_t *vc; MPIDI_CH3I_VC *vc_ch; MPID_PKT_DECL_CAST(upkt, MPID_nem_pkt_ckpt_marker_t, ckpt_pkt); /* Don't send a marker to ourselves. */ if (i == MPIDI_Process.my_pg_rank) continue; MPIDI_PG_Get_vc_set_active(MPIDI_Process.my_pg, i, &vc); vc_ch = &vc->ch; MPIDI_Pkt_init(ckpt_pkt, MPIDI_NEM_PKT_CKPT_MARKER); ckpt_pkt->wave = current_wave; mpi_errno = MPIDI_CH3_iStartMsg(vc, ckpt_pkt, sizeof(ckpt_pkt), &req); MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ckptpkt"); if (req != NULL) { MPIU_ERR_CHKANDJUMP(req->status.MPI_ERROR, mpi_errno, MPI_ERR_OTHER, "**ckptpkt"); MPID_Request_release(req); } if (!vc_ch->is_local) { mpi_errno = vc_ch->ckpt_pause_send_vc(vc); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_NEM_CKPT_START); return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPIDI_CH3U_VC_SendClose - Initiate a close on a virtual connection Input Parameters: + vc - Virtual connection to close - i - rank of virtual connection within a process group (used for debugging) Notes: The current state of this connection must be either 'MPIDI_VC_STATE_ACTIVE' or 'MPIDI_VC_STATE_REMOTE_CLOSE'. @*/ int MPIDI_CH3U_VC_SendClose( MPIDI_VC_t *vc, int rank ) { MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_close_t * close_pkt = &upkt.close; MPID_Request * sreq; int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_VC_SENDCLOSE); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_VC_SENDCLOSE); MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex); MPIU_Assert( vc->state == MPIDI_VC_STATE_ACTIVE || vc->state == MPIDI_VC_STATE_REMOTE_CLOSE ); MPIDI_Pkt_init(close_pkt, MPIDI_CH3_PKT_CLOSE); close_pkt->ack = (vc->state == MPIDI_VC_STATE_ACTIVE) ? FALSE : TRUE; /* MT: this is not thread safe, the POBJ CS is scoped to the vc and * doesn't protect this global correctly */ MPIDI_Outstanding_close_ops += 1; MPIU_DBG_MSG_FMT(CH3_DISCONNECT,TYPICAL,(MPIU_DBG_FDEST, "sending close(%s) on vc (pg=%p) %p to rank %d, ops = %d", close_pkt->ack ? "TRUE" : "FALSE", vc->pg, vc, rank, MPIDI_Outstanding_close_ops)); /* * A close packet acknowledging this close request could be * received during iStartMsg, therefore the state must * be changed before the close packet is sent. */ if (vc->state == MPIDI_VC_STATE_ACTIVE) { MPIDI_CHANGE_VC_STATE(vc, LOCAL_CLOSE); } else { MPIU_Assert( vc->state == MPIDI_VC_STATE_REMOTE_CLOSE ); MPIDI_CHANGE_VC_STATE(vc, CLOSE_ACKED); } mpi_errno = MPIDI_CH3_iStartMsg(vc, close_pkt, sizeof(*close_pkt), &sreq); MPIR_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|send_close_ack"); if (sreq != NULL) { /* There is still another reference being held by the channel. It will not be released until the pkt is actually sent. */ MPID_Request_release(sreq); } fn_exit: MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex); MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3U_VC_SENDCLOSE); return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3_iStartRndvTransfer(MPIDI_VC_t * vc, MPID_Request * rreq) { MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_rndv_clr_to_send_t *cts_pkt = &upkt.rndv_clr_to_send; MPID_Request *cts_req; MPID_Seqnum_t seqnum; int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_ISTARTRNDVTRANSFER); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_ISTARTRNDVTRANSFER); #ifdef CKPT MPIDI_CH3I_CR_lock(); #endif MPIDI_Pkt_init(cts_pkt, MPIDI_CH3_PKT_RNDV_CLR_TO_SEND); if (rreq->dev.iov_count == 1 && rreq->dev.OnDataAvail == NULL) cts_pkt->recv_sz = rreq->dev.iov[0].MPID_IOV_LEN; else cts_pkt->recv_sz = rreq->dev.segment_size; cts_pkt->sender_req_id = rreq->dev.sender_req_id; cts_pkt->receiver_req_id = rreq->handle; MPIDI_VC_FAI_send_seqnum(vc, seqnum); MPIDI_Pkt_set_seqnum(cts_pkt, seqnum); mpi_errno = MPIDI_CH3_Prepare_rndv_cts(vc, cts_pkt, rreq); if (mpi_errno != MPI_SUCCESS) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**ch3|ctspkt", 0); goto fn_exit; } mpi_errno = MPIDI_CH3_iStartMsg(vc, cts_pkt, sizeof(*cts_pkt), &cts_req); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**ch3|ctspkt", 0); goto fn_exit; } /* --END ERROR HANDLING-- */ if (cts_req != NULL) { MPID_Request_release(cts_req); } fn_exit: #ifdef CKPT MPIDI_CH3I_CR_unlock(); #endif MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_ISTARTRNDVTRANSFER); return mpi_errno; }
int MPIC_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { MPID_Request *recv_req_ptr=NULL, *send_req_ptr=NULL; int mpi_errno, context_id; MPID_Comm *comm_ptr = NULL; MPIDI_STATE_DECL(MPID_STATE_MPIC_SENDRECV); MPIDI_PT2PT_FUNC_ENTER_BOTH(MPID_STATE_MPIC_SENDRECV); MPID_Comm_get_ptr( comm, comm_ptr ); context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ? MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL; mpi_errno = MPID_Irecv(recvbuf, recvcount, recvtype, source, recvtag, comm_ptr, context_id, &recv_req_ptr); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } mpi_errno = MPID_Isend(sendbuf, sendcount, sendtype, dest, sendtag, comm_ptr, context_id, &send_req_ptr); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } mpi_errno = MPIC_Wait(send_req_ptr); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } mpi_errno = MPIC_Wait(recv_req_ptr); if (mpi_errno) { MPIU_ERR_POPFATAL(mpi_errno); } if (status != MPI_STATUS_IGNORE) *status = recv_req_ptr->status; mpi_errno = recv_req_ptr->status.MPI_ERROR; MPID_Request_release(send_req_ptr); MPID_Request_release(recv_req_ptr); fn_fail: /* --BEGIN ERROR HANDLING-- */ MPIDI_PT2PT_FUNC_EXIT_BOTH(MPID_STATE_MPIC_SENDRECV); return mpi_errno; /* --END ERROR HANDLING-- */ }
int MPIC_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status) { int mpi_errno, context_id; MPID_Request *request_ptr=NULL; MPID_Comm *comm_ptr = NULL; MPIDI_STATE_DECL(MPID_STATE_MPIC_RECV); MPIDI_PT2PT_FUNC_ENTER_BACK(MPID_STATE_MPIC_RECV); MPID_Comm_get_ptr( comm, comm_ptr ); context_id = (comm_ptr->comm_kind == MPID_INTRACOMM) ? MPID_CONTEXT_INTRA_COLL : MPID_CONTEXT_INTER_COLL; mpi_errno = MPID_Recv(buf, count, datatype, source, tag, comm_ptr, context_id, status, &request_ptr); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } if (request_ptr) { mpi_errno = MPIC_Wait(request_ptr); if (mpi_errno == MPI_SUCCESS) { if (status != MPI_STATUS_IGNORE) { *status = request_ptr->status; } mpi_errno = request_ptr->status.MPI_ERROR; } else { MPIU_ERR_POP(mpi_errno); } MPID_Request_release(request_ptr); } fn_exit: MPIDI_PT2PT_FUNC_EXIT_BACK(MPID_STATE_MPIC_RECV); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ if (request_ptr) { MPID_Request_release(request_ptr); } goto fn_exit; /* --END ERROR HANDLING-- */ }
/* MSGQUEUE lock is not held */ void MPIDI_Callback_process_userdefined_dt(pami_context_t context, const void * sndbuf, size_t sndlen, MPID_Request * rreq) { unsigned dt_contig, dt_size; MPID_Datatype *dt_ptr; MPI_Aint dt_true_lb; MPIDI_Datatype_get_info(rreq->mpid.userbufcount, rreq->mpid.datatype, dt_contig, dt_size, dt_ptr, dt_true_lb); /* ----------------------------- */ /* Test for truncated message. */ /* ----------------------------- */ if (unlikely(sndlen > dt_size)) { #if ASSERT_LEVEL > 0 MPIDI_Callback_process_trunc(context, rreq, NULL, sndbuf); return; #else sndlen = dt_size; #endif } /* * This is to test that the fields don't need to be * initialized. Remove after this doesn't fail for a while. */ if (likely (dt_contig)) { MPID_assert(rreq->mpid.uebuf == NULL); MPID_assert(rreq->mpid.uebuflen == 0); void* rcvbuf = rreq->mpid.userbuf + dt_true_lb;; memcpy(rcvbuf, sndbuf, sndlen); MPIDI_Request_complete(rreq); return; } MPIDI_Request_setCA(rreq, MPIDI_CA_UNPACK_UEBUF_AND_COMPLETE); rreq->mpid.uebuflen = sndlen; rreq->mpid.uebuf = (void*)sndbuf; MPIDI_RecvDoneCB(context, rreq, PAMI_SUCCESS); MPID_Request_release(rreq); }
/* Send a zero-sized message with eager synchronous. This is a temporary routine, as we may want to replace this with a counterpart to the Eager Short message */ int MPIDI_CH3_EagerSyncZero(MPID_Request **sreq_p, int rank, int tag, MPID_Comm * comm, int context_offset ) { int mpi_errno = MPI_SUCCESS; MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_eager_sync_send_t * const es_pkt = &upkt.eager_sync_send; MPIDI_VC_t * vc; MPID_Request *sreq = *sreq_p; MPIU_DBG_MSG(CH3_OTHER,VERBOSE,"sending zero length message"); /* MT FIXME what are the two operations we are waiting for? the send and * the sync response? */ MPID_cc_set(&sreq->cc, 2); MPIDI_Request_set_msg_type(sreq, MPIDI_REQUEST_EAGER_MSG); sreq->dev.OnDataAvail = 0; MPIDI_Pkt_init(es_pkt, MPIDI_CH3_PKT_EAGER_SYNC_SEND); es_pkt->match.parts.rank = comm->rank; es_pkt->match.parts.tag = tag; es_pkt->match.parts.context_id = comm->context_id + context_offset; es_pkt->sender_req_id = sreq->handle; es_pkt->data_sz = 0; MPIDI_Comm_get_vc_set_active(comm, rank, &vc); MPIDI_VC_FAI_send_seqnum(vc, seqnum); MPIDI_Pkt_set_seqnum(es_pkt, seqnum); MPIDI_Request_set_seqnum(sreq, seqnum); MPIU_DBG_MSGPKT(vc,tag,es_pkt->match.parts.context_id,rank,(MPIDI_msg_sz_t)0,"EagerSync0"); MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex); mpi_errno = MPIDI_CH3_iSend(vc, sreq, es_pkt, sizeof(*es_pkt)); MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { MPID_Request_release(sreq); *sreq_p = NULL; MPIR_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**ch3|eagermsg"); } /* --END ERROR HANDLING-- */ fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
static inline int MPID_Cancel_send_rsm(MPID_Request * sreq) { int flag; MPID_assert(sreq != NULL); /* ------------------------------------------------- */ /* Check if we already have a cancel request pending */ /* ------------------------------------------------- */ MPIDI_DCMF_Request_cancel_pending(sreq, &flag); if (flag) return MPI_SUCCESS; /* ------------------------------------ */ /* Try to cancel a send request to self */ /* ------------------------------------ */ if (MPID_Request_isSelf(sreq)) { int source = MPID_Request_getMatchRank(sreq); int tag = MPID_Request_getMatchTag (sreq); int context_id = MPID_Request_getMatchCtxt(sreq); MPID_Request * rreq = MPIDI_Recvq_FDUR(sreq, source, tag, context_id); if (rreq) { MPID_assert(rreq->partner_request == sreq); MPID_Request_release(rreq); sreq->status.cancelled = TRUE; sreq->cc = 0; } return MPI_SUCCESS; } else { if(!sreq->comm) return MPI_SUCCESS; MPID_Request_increment_cc(sreq); MPIDI_DCMF_postCancelReq(sreq); return MPI_SUCCESS; } }
static inline int request_complete_fastpath(MPI_Request *request, MPID_Request *request_ptr) { int mpi_errno = MPI_SUCCESS; MPIU_Assert(request_ptr->kind == MPID_REQUEST_SEND || request_ptr->kind == MPID_REQUEST_RECV); if (request_ptr->kind == MPID_REQUEST_SEND) { /* FIXME: are Ibsend requests added to the send queue? */ MPIR_SENDQ_FORGET(request_ptr); } /* the completion path for SEND and RECV is the same at this time, modulo * the SENDQ hook above */ mpi_errno = request_ptr->status.MPI_ERROR; MPID_Request_release(request_ptr); *request = MPI_REQUEST_NULL; /* avoid normal fn_exit/fn_fail jump pattern to reduce jumps and compiler confusion */ return mpi_errno; }
int MPID_Cancel_recv(MPID_Request * rreq) { MPIDI_STATE_DECL(MPID_STATE_MPID_CANCEL_RECV); MPIDI_FUNC_ENTER(MPID_STATE_MPID_CANCEL_RECV); MPIU_Assert(rreq->kind == MPID_REQUEST_RECV); #if defined (_OSU_PSM_) rreq->psm_flags |= PSM_RECV_CANCEL; if(psm_do_cancel(rreq) == MPI_SUCCESS) { *(rreq->cc_ptr) = 0; // MPID_Request_release(rreq); } goto fn_exit; #endif #if defined(_OSU_MVAPICH_) /* OSU-MPI2 requires extra step to finish rndv request */ MPIDI_CH3I_MRAILI_RREQ_RNDV_FINISH(rreq); #endif /* defined(_OSU_MVAPICH_) */ if (MPIDI_CH3U_Recvq_DP(rreq)) { MPIU_DBG_MSG_P(CH3_OTHER,VERBOSE, "request 0x%08x cancelled", rreq->handle); rreq->status.cancelled = TRUE; rreq->status.count = 0; MPID_REQUEST_SET_COMPLETED(rreq); MPID_Request_release(rreq); } else { MPIU_DBG_MSG_P(CH3_OTHER,VERBOSE, "request 0x%08x already matched, unable to cancel", rreq->handle); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_CANCEL_RECV); return MPI_SUCCESS; }
int MPID_nem_mx_cancel_recv(MPIDI_VC_t *vc, MPID_Request *rreq) { mx_request_t *mx_request = NULL; mx_return_t ret; uint32_t result; int mpi_errno = MPI_SUCCESS; int handled = FALSE; mx_request = &(REQ_FIELD(rreq,mx_request)); /* FIXME this test is probably not correct with multiple netmods */ /* We need to know to which netmod a recv request actually "belongs" to */ if(mx_request != NULL) { ret = mx_cancel(MPID_nem_mx_local_endpoint,mx_request,&result); MPIU_ERR_CHKANDJUMP1(ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_cancel", "**mx_cancel %s", mx_strerror(ret)); if (result) { int found; rreq->status.cancelled = TRUE; found = MPIDI_CH3U_Recvq_DP(rreq); MPIU_Assert(found); rreq->status.count = 0; MPID_REQUEST_SET_COMPLETED(rreq); MPID_Request_release(rreq); } else { rreq->status.cancelled = FALSE; MPIU_DBG_MSG_P(CH3_OTHER,VERBOSE, "request 0x%08x already matched, unable to cancel", rreq->handle); } handled = TRUE; } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
static int check_terminating_vcs(void) { int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_CHECK_TERMINATING_VCS); MPIDI_FUNC_ENTER(MPID_STATE_CHECK_TERMINATING_VCS); while (!TERMQ_EMPTY() && MPID_Request_is_complete(TERMQ_HEAD()->req)) { vc_term_element_t *ep; TERMQ_DEQUEUE(&ep); MPID_Request_release(ep->req); mpi_errno = shm_connection_terminated(ep->vc); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPIU_Free(ep); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_CHECK_TERMINATING_VCS); return mpi_errno; fn_fail: goto fn_exit; }
/* * These routines are called when a receive matches an eager sync send */ int MPIDI_CH3_EagerSyncAck( MPIDI_VC_t *vc, MPID_Request *rreq ) { int mpi_errno = MPI_SUCCESS; MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_eager_sync_ack_t * const esa_pkt = &upkt.eager_sync_ack; MPID_Request * esa_req; MPIU_DBG_MSG(CH3_OTHER,VERBOSE,"sending eager sync ack"); MPIDI_Pkt_init(esa_pkt, MPIDI_CH3_PKT_EAGER_SYNC_ACK); esa_pkt->sender_req_id = rreq->dev.sender_req_id; MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex); mpi_errno = MPIDI_CH3_iStartMsg(vc, esa_pkt, sizeof(*esa_pkt), &esa_req); MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } if (esa_req != NULL) { MPID_Request_release(esa_req); } fn_fail: return mpi_errno; }
static inline void MPIDI_RecvShortCB(pami_context_t context, const void * _msginfo, const void * sndbuf, size_t sndlen, pami_endpoint_t sender, unsigned isSync) { MPID_assert(_msginfo != NULL); const MPIDI_MsgInfo *msginfo = (const MPIDI_MsgInfo *)_msginfo; MPID_Request * rreq = NULL; pami_task_t source; #if TOKEN_FLOW_CONTROL int rettoks=0; #endif /* -------------------- */ /* Match the request. */ /* -------------------- */ unsigned rank = msginfo->MPIrank; unsigned tag = msginfo->MPItag; unsigned context_id = msginfo->MPIctxt; MPIU_THREAD_CS_ENTER(MSGQUEUE,0); source = PAMIX_Endpoint_query(sender); MPIDI_Receive_tokens(msginfo,source); #ifndef OUT_OF_ORDER_HANDLING rreq = MPIDI_Recvq_FDP(rank, tag, context_id); #else rreq = MPIDI_Recvq_FDP(rank, source, tag, context_id, msginfo->MPIseqno); #endif /* Match not found */ if (unlikely(rreq == NULL)) { #if (MPIDI_STATISTICS) MPID_NSTAT(mpid_statp->earlyArrivals); #endif MPIU_THREAD_CS_EXIT(MSGQUEUE,0); MPID_Request *newreq = MPIDI_Request_create2(); MPID_assert(newreq != NULL); if (sndlen) { newreq->mpid.uebuflen = sndlen; if (!TOKEN_FLOW_CONTROL_ON) { newreq->mpid.uebuf = MPL_malloc(sndlen); newreq->mpid.uebuf_malloc = mpiuMalloc; } else { #if TOKEN_FLOW_CONTROL MPIU_THREAD_CS_ENTER(MSGQUEUE,0); newreq->mpid.uebuf = MPIDI_mm_alloc(sndlen); newreq->mpid.uebuf_malloc = mpidiBufMM; MPIU_THREAD_CS_EXIT(MSGQUEUE,0); #else MPID_assert_always(0); #endif } MPID_assert(newreq->mpid.uebuf != NULL); } MPIU_THREAD_CS_ENTER(MSGQUEUE,0); #ifndef OUT_OF_ORDER_HANDLING rreq = MPIDI_Recvq_FDP(rank, tag, context_id); #else rreq = MPIDI_Recvq_FDP(rank, PAMIX_Endpoint_query(sender), tag, context_id, msginfo->MPIseqno); #endif if (unlikely(rreq == NULL)) { MPIDI_Callback_process_unexp(newreq, context, msginfo, sndlen, sender, sndbuf, NULL, isSync); /* request is always complete now */ if (TOKEN_FLOW_CONTROL_ON && sndlen) { #if TOKEN_FLOW_CONTROL MPIDI_Token_cntr[source].unmatched++; #else MPID_assert_always(0); #endif } MPIU_THREAD_CS_EXIT(MSGQUEUE,0); MPID_Request_release(newreq); goto fn_exit_short; } else { MPIU_THREAD_CS_EXIT(MSGQUEUE,0); MPID_Request_discard(newreq); } } else { #if (MPIDI_STATISTICS) MPID_NSTAT(mpid_statp->earlyArrivalsMatched); #endif if (TOKEN_FLOW_CONTROL_ON && sndlen) { #if TOKEN_FLOW_CONTROL MPIDI_Update_rettoks(source); MPIDI_Must_return_tokens(context,source); #else MPID_assert_always(0); #endif } MPIU_THREAD_CS_EXIT(MSGQUEUE,0); } /* the receive queue processing has been completed and we found match*/ /* ---------------------- */ /* Copy in information. */ /* ---------------------- */ rreq->status.MPI_SOURCE = rank; rreq->status.MPI_TAG = tag; MPIR_STATUS_SET_COUNT(rreq->status, sndlen); MPIDI_Request_setCA (rreq, MPIDI_CA_COMPLETE); MPIDI_Request_cpyPeerRequestH(rreq, msginfo); MPIDI_Request_setSync (rreq, isSync); MPIDI_Request_setRzv (rreq, 0); /* ----------------------------- */ /* Request was already posted. */ /* ----------------------------- */ if (unlikely(isSync)) MPIDI_SyncAck_post(context, rreq, PAMIX_Endpoint_query(sender)); if (unlikely(HANDLE_GET_KIND(rreq->mpid.datatype) != HANDLE_KIND_BUILTIN)) { MPIDI_Callback_process_userdefined_dt(context, sndbuf, sndlen, rreq); goto fn_exit_short; } size_t dt_size = rreq->mpid.userbufcount * MPID_Datatype_get_basic_size(rreq->mpid.datatype); /* ----------------------------- */ /* Test for truncated message. */ /* ----------------------------- */ if (unlikely(sndlen > dt_size)) { #if ASSERT_LEVEL > 0 MPIDI_Callback_process_trunc(context, rreq, NULL, sndbuf); goto fn_exit_short; #else sndlen = dt_size; #endif } MPID_assert(rreq->mpid.uebuf == NULL); MPID_assert(rreq->mpid.uebuflen == 0); void* rcvbuf = rreq->mpid.userbuf; if (sndlen > 0) { #if CUDA_AWARE_SUPPORT if(MPIDI_Process.cuda_aware_support_on && MPIDI_cuda_is_device_buf(rcvbuf)) { cudaError_t cudaerr = CudaMemcpy(rcvbuf, sndbuf, (size_t)sndlen, cudaMemcpyHostToDevice); } else #endif memcpy(rcvbuf, sndbuf, sndlen); } TRACE_SET_R_VAL(source,(rreq->mpid.idx),rlen,sndlen); TRACE_SET_R_BIT(source,(rreq->mpid.idx),fl.f.comp_in_HH); TRACE_SET_R_VAL(source,(rreq->mpid.idx),bufadd,rreq->mpid.userbuf); MPIDI_Request_complete(rreq); fn_exit_short: #ifdef OUT_OF_ORDER_HANDLING MPIU_THREAD_CS_ENTER(MSGQUEUE,0); if (MPIDI_In_cntr[source].n_OutOfOrderMsgs>0) { MPIDI_Recvq_process_out_of_order_msgs(source, context); } MPIU_THREAD_CS_EXIT(MSGQUEUE,0); #endif /* ---------------------------------------- */ /* Signal that the recv has been started. */ /* ---------------------------------------- */ MPIDI_Progress_signal(); }
/*@ MPI_Recv - Blocking receive for a message Output Parameters: + buf - initial address of receive buffer (choice) - status - status object (Status) Input Parameters: + count - maximum number of elements in receive buffer (integer) . datatype - datatype of each receive buffer element (handle) . source - rank of source (integer) . tag - message tag (integer) - comm - communicator (handle) Notes: The 'count' argument indicates the maximum length of a message; the actual length of the message can be determined with 'MPI_Get_count'. .N ThreadSafe .N Fortran .N FortranStatus .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TYPE .N MPI_ERR_COUNT .N MPI_ERR_TAG .N MPI_ERR_RANK @*/ int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status) { static const char FCNAME[] = "MPI_Recv"; int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPID_Request * request_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_RECV); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_PT2PT_FUNC_ENTER_BACK(MPID_STATE_MPI_RECV); /* Validate handle parameters needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); /* NOTE: MPI_STATUS_IGNORE != NULL */ MPIR_ERRTEST_ARGNULL(status, "status", mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters if error checking is enabled */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE ); if (mpi_errno) goto fn_fail; MPIR_ERRTEST_COUNT(count, mpi_errno); MPIR_ERRTEST_RECV_RANK(comm_ptr, source, mpi_errno); MPIR_ERRTEST_RECV_TAG(tag, mpi_errno); /* Validate datatype handle */ MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno); /* Validate datatype object */ if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) { MPID_Datatype *datatype_ptr = NULL; MPID_Datatype_get_ptr(datatype, datatype_ptr); MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno); if (mpi_errno) goto fn_fail; MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno); if (mpi_errno) goto fn_fail; } /* Validate buffer */ MPIR_ERRTEST_USERBUFFER(buf,count,datatype,mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ /* MT: Note that MPID_Recv may release the SINGLE_CS if it decides to block internally. MPID_Recv in that case will re-aquire the SINGLE_CS before returnning */ mpi_errno = MPID_Recv(buf, count, datatype, source, tag, comm_ptr, MPID_CONTEXT_INTRA_PT2PT, status, &request_ptr); if (mpi_errno != MPI_SUCCESS) goto fn_fail; if (request_ptr == NULL) { goto fn_exit; } /* If a request was returned, then we need to block until the request is complete */ if (!MPID_Request_is_complete(request_ptr)) { MPID_Progress_state progress_state; MPID_Progress_start(&progress_state); while (!MPID_Request_is_complete(request_ptr)) { /* MT: Progress_wait may release the SINGLE_CS while it waits */ mpi_errno = MPID_Progress_wait(&progress_state); if (mpi_errno != MPI_SUCCESS) { /* --BEGIN ERROR HANDLING-- */ MPID_Progress_end(&progress_state); goto fn_fail; /* --END ERROR HANDLING-- */ } if (unlikely(MPIR_CVAR_ENABLE_FT && !MPID_Request_is_complete(request_ptr) && MPID_Request_is_anysource(request_ptr) && !MPID_Comm_AS_enabled(request_ptr->comm))) { /* --BEGIN ERROR HANDLING-- */ MPID_Cancel_recv(request_ptr); MPIR_STATUS_SET_CANCEL_BIT(request_ptr->status, FALSE); MPIU_ERR_SET(request_ptr->status.MPI_ERROR, MPIX_ERR_PROC_FAILED, "**proc_failed"); mpi_errno = request_ptr->status.MPI_ERROR; goto fn_fail; /* --END ERROR HANDLING-- */ } } MPID_Progress_end(&progress_state); } mpi_errno = request_ptr->status.MPI_ERROR; MPIR_Request_extract_status(request_ptr, status); MPID_Request_release(request_ptr); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_PT2PT_FUNC_EXIT_BACK(MPID_STATE_MPI_RECV); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_recv", "**mpi_recv %p %d %D %i %t %C %p", buf, count, datatype, source, tag, comm, status); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3I_RMA_Free_ops_before_completion(MPID_Win * win_ptr) { MPIDI_RMA_Op_t *curr_op = NULL; MPIDI_RMA_Target_t *curr_target = NULL; MPIDI_RMA_Op_t **op_list = NULL, **op_list_tail = NULL; int read_flag = 0; int i, made_progress = 0; int mpi_errno = MPI_SUCCESS; /* If we are in an free_ops_before_completion, the window must be holding * up resources. If it isn't, we are in the wrong window and * incorrectly entered this function. */ MPIU_ERR_CHKANDJUMP(win_ptr->non_empty_slots == 0, mpi_errno, MPI_ERR_OTHER, "**rmanoop"); /* make nonblocking progress once */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_win(win_ptr, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); if (win_ptr->states.access_state == MPIDI_RMA_FENCE_ISSUED || win_ptr->states.access_state == MPIDI_RMA_PSCW_ISSUED || win_ptr->states.access_state == MPIDI_RMA_LOCK_ALL_ISSUED) goto fn_exit; /* find targets that have operations */ for (i = 0; i < win_ptr->num_slots; i++) { if (win_ptr->slots[i].target_list != NULL) { curr_target = win_ptr->slots[i].target_list; while (curr_target != NULL) { if (curr_target->read_op_list != NULL || curr_target->write_op_list != NULL) { if (win_ptr->states.access_state == MPIDI_RMA_PER_TARGET || win_ptr->states.access_state == MPIDI_RMA_LOCK_ALL_CALLED) { if (curr_target->access_state == MPIDI_RMA_LOCK_GRANTED) break; } else { break; } } curr_target = curr_target->next; } if (curr_target != NULL) break; } } if (curr_target == NULL) goto fn_exit; /* After we do this, all following Win_flush_local * must do a Win_flush instead. */ curr_target->disable_flush_local = 1; if (curr_target->read_op_list != NULL) { op_list = &curr_target->read_op_list; op_list_tail = &curr_target->read_op_list_tail; read_flag = 1; } else { op_list = &curr_target->write_op_list; op_list_tail = &curr_target->write_op_list_tail; } /* free all ops in the list since we do not need to maintain them anymore */ for (curr_op = *op_list; curr_op != NULL;) { if (curr_op->reqs_size > 0) { MPIU_Assert(curr_op->reqs != NULL); for (i = 0; i < curr_op->reqs_size; i++) { if (curr_op->reqs[i] != NULL) { MPID_Request_release(curr_op->reqs[i]); curr_op->reqs[i] = NULL; win_ptr->active_req_cnt--; } } /* free req array in this op */ MPIU_Free(curr_op->reqs); curr_op->reqs = NULL; curr_op->reqs_size = 0; } MPL_LL_DELETE(*op_list, *op_list_tail, curr_op); MPIDI_CH3I_Win_op_free(win_ptr, curr_op); if (*op_list == NULL) { if (read_flag == 1) { op_list = &curr_target->write_op_list; op_list = &curr_target->write_op_list_tail; read_flag = 0; } } curr_op = *op_list; } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
static inline int check_window_state(MPID_Win * win_ptr, int *made_progress) { MPID_Request *fence_req_ptr = NULL; int i, mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_CHECK_WINDOW_STATE); MPIDI_RMA_FUNC_ENTER(MPID_STATE_CHECK_WINDOW_STATE); (*made_progress) = 0; switch (win_ptr->states.access_state) { case MPIDI_RMA_FENCE_ISSUED: MPID_Request_get_ptr(win_ptr->fence_sync_req, fence_req_ptr); if (MPID_Request_is_complete(fence_req_ptr)) { win_ptr->states.access_state = MPIDI_RMA_FENCE_GRANTED; MPID_Request_release(fence_req_ptr); win_ptr->fence_sync_req = MPI_REQUEST_NULL; num_active_issued_win--; MPIU_Assert(num_active_issued_win >= 0); (*made_progress) = 1; } break; case MPIDI_RMA_PSCW_ISSUED: if (win_ptr->start_req == NULL) { /* for MPI_MODE_NOCHECK and all targets on SHM, * we do not create PSCW requests on window. */ win_ptr->states.access_state = MPIDI_RMA_PSCW_GRANTED; num_active_issued_win--; MPIU_Assert(num_active_issued_win >= 0); (*made_progress) = 1; } else { for (i = 0; i < win_ptr->start_grp_size; i++) { MPID_Request *start_req_ptr = NULL; if (win_ptr->start_req[i] == MPI_REQUEST_NULL) continue; MPID_Request_get_ptr(win_ptr->start_req[i], start_req_ptr); if (MPID_Request_is_complete(start_req_ptr)) { MPID_Request_release(start_req_ptr); win_ptr->start_req[i] = MPI_REQUEST_NULL; } else { break; } } if (i == win_ptr->start_grp_size) { win_ptr->states.access_state = MPIDI_RMA_PSCW_GRANTED; num_active_issued_win--; MPIU_Assert(num_active_issued_win >= 0); (*made_progress) = 1; MPIU_Free(win_ptr->start_req); win_ptr->start_req = NULL; } } break; case MPIDI_RMA_LOCK_ALL_ISSUED: if (win_ptr->outstanding_locks == 0) { win_ptr->states.access_state = MPIDI_RMA_LOCK_ALL_GRANTED; (*made_progress) = 1; } break; default: break; } /* end of switch */ fn_exit: MPIDI_RMA_FUNC_EXIT(MPID_STATE_CHECK_WINDOW_STATE); return mpi_errno; /* --BEGIN ERROR HANDLING-- */ fn_fail: goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3_Connect_to_root(const char* port_name, MPIDI_VC_t** new_vc) { int mpi_errno = MPI_SUCCESS; int str_errno; char ifname[MAX_HOST_DESCRIPTION_LEN]; MPIDI_VC_t *vc; MPIDI_CH3_Pkt_cm_establish_t pkt; MPID_Request * sreq; int seqnum; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_CONNECT_TO_ROOT); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_CONNECT_TO_ROOT); *new_vc = NULL; if (!MPIDI_CH3I_Process.has_dpm) return MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**notimpl", 0); str_errno = MPIU_Str_get_string_arg(port_name, MPIDI_CH3I_HOST_DESCRIPTION_KEY, ifname, MAX_HOST_DESCRIPTION_LEN); if (str_errno != MPIU_STR_SUCCESS) { /* --BEGIN ERROR HANDLING */ if (str_errno == MPIU_STR_FAIL) { MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**argstr_missinghost"); } else { /* MPIU_STR_TRUNCATED or MPIU_STR_NONEM */ MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd"); } /* --END ERROR HANDLING-- */ } vc = MPIU_Malloc(sizeof(MPIDI_VC_t)); if (!vc) { MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**nomem"); } MPIDI_VC_Init(vc, NULL, 0); mpi_errno = MPIDI_CH3I_CM_Connect_raw_vc(vc, ifname); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } while (vc->ch.state != MPIDI_CH3I_VC_STATE_IDLE) { mpi_errno = MPID_Progress_test(); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_POP(mpi_errno); } } /* fprintf(stderr, "[###] vc state to idel, now send cm_establish msg\n") */ /* Now a connection is created, send a cm_establish message */ /* FIXME: vc->mrail.remote_vc_addr is used to find remote vc * A more elegant way is needed */ MPIDI_Pkt_init(&pkt, MPIDI_CH3_PKT_CM_ESTABLISH); MPIDI_VC_FAI_send_seqnum(vc, seqnum); MPIDI_Pkt_set_seqnum(&pkt, seqnum); pkt.vc_addr = vc->mrail.remote_vc_addr; mpi_errno = MPIDI_GetTagFromPort(port_name, &pkt.port_name_tag); if (mpi_errno != MPIU_STR_SUCCESS) { MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_port_name_tag"); } mpi_errno = MPIDI_CH3_iStartMsg(vc, &pkt, sizeof(pkt), &sreq); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER,"**fail", "**fail %s", "Failed to send cm establish message"); } if (sreq != NULL) { if (sreq->status.MPI_ERROR != MPI_SUCCESS) { mpi_errno = MPIR_Err_create_code(sreq->status.MPI_ERROR, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", 0); MPID_Request_release(sreq); goto fn_fail; } MPID_Request_release(sreq); } *new_vc = vc; fn_fail: MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_CONNECT_TO_ROOT); return mpi_errno; }
int MPID_nem_ptl_improbe(MPIDI_VC_t *vc, int source, int tag, MPID_Comm *comm, int context_offset, int *flag, MPID_Request **message, MPI_Status *status) { int mpi_errno = MPI_SUCCESS; MPID_nem_ptl_vc_area *const vc_ptl = VC_PTL(vc); int ret; ptl_process_t id_any; ptl_me_t me; MPID_Request *req; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_IMPROBE); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_IMPROBE); id_any.phys.nid = PTL_NID_ANY; id_any.phys.pid = PTL_PID_ANY; /* create a request */ req = MPID_Request_create(); MPID_nem_ptl_init_req(req); MPIR_ERR_CHKANDJUMP1(!req, mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Request_create"); MPIU_Object_set_ref(req, 2); /* 1 ref for progress engine and 1 ref for us */ REQ_PTL(req)->event_handler = handle_mprobe; req->kind = MPID_REQUEST_MPROBE; /* create a dummy ME to use for searching the list */ me.start = NULL; me.length = 0; me.ct_handle = PTL_CT_NONE; me.uid = PTL_UID_ANY; me.options = ( PTL_ME_OP_PUT | PTL_ME_USE_ONCE ); me.min_free = 0; me.match_bits = NPTL_MATCH(tag, comm->context_id + context_offset, source); if (source == MPI_ANY_SOURCE) me.match_id = id_any; else { if (!vc_ptl->id_initialized) { mpi_errno = MPID_nem_ptl_init_id(vc); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } me.match_id = vc_ptl->id; } if (tag == MPI_ANY_TAG) me.ignore_bits = NPTL_MATCH_IGNORE_ANY_TAG; else me.ignore_bits = NPTL_MATCH_IGNORE; /* submit a search request */ ret = PtlMESearch(MPIDI_nem_ptl_ni, MPIDI_nem_ptl_pt, &me, PTL_SEARCH_DELETE, req); MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlmesearch", "**ptlmesearch %s", MPID_nem_ptl_strerror(ret)); DBG_MSG_MESearch("REG", vc ? vc->pg_rank : 0, me, req); /* wait for search request to complete */ do { mpi_errno = MPID_nem_ptl_poll(FALSE); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } while (!MPID_Request_is_complete(req)); *flag = REQ_PTL(req)->found; if (*flag) { req->comm = comm; MPIR_Comm_add_ref(comm); MPIR_Request_extract_status(req, status); *message = req; } else { MPID_Request_release(req); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_PTL_IMPROBE); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_Irsend(const void * buf, int count, MPI_Datatype datatype, int rank, int tag, MPID_Comm * comm, int context_offset, MPID_Request ** request) { MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_ready_send_t * const ready_pkt = &upkt.ready_send; MPIDI_msg_sz_t data_sz; int dt_contig; MPI_Aint dt_true_lb; MPID_Datatype * dt_ptr; MPID_Request * sreq; MPIDI_VC_t * vc; #if defined(MPID_USE_SEQUENCE_NUMBERS) MPID_Seqnum_t seqnum; #endif int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MPID_IRSEND); MPIDI_FUNC_ENTER(MPID_STATE_MPID_IRSEND); MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST, "rank=%d, tag=%d, context=%d", rank, tag, comm->context_id + context_offset)); /* Check to make sure the communicator hasn't already been revoked */ if (comm->revoked && MPIR_AGREE_TAG != MPIR_TAG_MASK_ERROR_BITS(tag & ~MPIR_Process.tagged_coll_mask) && MPIR_SHRINK_TAG != MPIR_TAG_MASK_ERROR_BITS(tag & ~MPIR_Process.tagged_coll_mask)) { MPIR_ERR_SETANDJUMP(mpi_errno,MPIX_ERR_REVOKED,"**revoked"); } if (rank == comm->rank && comm->comm_kind != MPID_INTERCOMM) { mpi_errno = MPIDI_Isend_self(buf, count, datatype, rank, tag, comm, context_offset, MPIDI_REQUEST_TYPE_RSEND, &sreq); goto fn_exit; } if (rank != MPI_PROC_NULL) { MPIDI_Comm_get_vc_set_active(comm, rank, &vc); #ifdef ENABLE_COMM_OVERRIDES /* this needs to come before the sreq is created, since the override * function is responsible for creating its own request */ if (vc->comm_ops && vc->comm_ops->irsend) { mpi_errno = vc->comm_ops->irsend( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq); goto fn_exit; } #endif } MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit); MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_RSEND); MPIDI_Request_set_msg_type(sreq, MPIDI_REQUEST_EAGER_MSG); if (rank == MPI_PROC_NULL) { MPIU_Object_set_ref(sreq, 1); MPID_cc_set(&sreq->cc, 0); goto fn_exit; } MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb); MPIDI_Pkt_init(ready_pkt, MPIDI_CH3_PKT_READY_SEND); ready_pkt->match.parts.rank = comm->rank; ready_pkt->match.parts.tag = tag; ready_pkt->match.parts.context_id = comm->context_id + context_offset; ready_pkt->sender_req_id = MPI_REQUEST_NULL; ready_pkt->data_sz = data_sz; if (data_sz == 0) { MPIU_DBG_MSG(CH3_OTHER,VERBOSE,"sending zero length message"); sreq->dev.OnDataAvail = 0; MPIDI_VC_FAI_send_seqnum(vc, seqnum); MPIDI_Pkt_set_seqnum(ready_pkt, seqnum); MPIDI_Request_set_seqnum(sreq, seqnum); MPID_THREAD_CS_ENTER(POBJ, vc->pobj_mutex); mpi_errno = MPIDI_CH3_iSend(vc, sreq, ready_pkt, sizeof(*ready_pkt)); MPID_THREAD_CS_EXIT(POBJ, vc->pobj_mutex); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { MPID_Request_release(sreq); sreq = NULL; MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**ch3|eagermsg"); goto fn_exit; } /* --END ERROR HANDLING-- */ goto fn_exit; } if (vc->ready_eager_max_msg_sz < 0 || data_sz + sizeof(MPIDI_CH3_Pkt_ready_send_t) <= vc->ready_eager_max_msg_sz) { if (dt_contig) { mpi_errno = MPIDI_CH3_EagerContigIsend( &sreq, MPIDI_CH3_PKT_READY_SEND, (char*)buf + dt_true_lb, data_sz, rank, tag, comm, context_offset ); } else { mpi_errno = MPIDI_CH3_EagerNoncontigSend( &sreq, MPIDI_CH3_PKT_READY_SEND, buf, count, datatype, data_sz, rank, tag, comm, context_offset ); /* If we're not complete, then add a reference to the datatype */ if (sreq && sreq->dev.OnDataAvail) { sreq->dev.datatype_ptr = dt_ptr; MPID_Datatype_add_ref(dt_ptr); } } } else { /* Do rendezvous. This will be sent as a regular send not as a ready send, so the receiver won't know to send an error if the receive has not been posted */ MPIDI_Request_set_msg_type( sreq, MPIDI_REQUEST_RNDV_MSG ); mpi_errno = vc->rndvSend_fn( &sreq, buf, count, datatype, dt_contig, data_sz, dt_true_lb, rank, tag, comm, context_offset ); if (sreq && dt_ptr != NULL) { sreq->dev.datatype_ptr = dt_ptr; MPID_Datatype_add_ref(dt_ptr); } } fn_exit: *request = sreq; MPIU_DBG_STMT(CH3_OTHER,VERBOSE,{ if (sreq != NULL) { MPIU_DBG_MSG_P(CH3_OTHER,VERBOSE,"request allocated, handle=0x%08x", sreq->handle); } } );
static int MPIDI_CH3_SMP_Rendezvous_push(MPIDI_VC_t * vc, MPID_Request * sreq) { int nb; int complete = 0; int seqnum; int mpi_errno; MPIDI_CH3_Pkt_rndv_r3_data_t pkt_head; MPID_Request * send_req; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_SMP_RNDV_PUSH); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_SMP_RNDV_PUSH); MPIDI_Pkt_init(&pkt_head, MPIDI_CH3_PKT_RNDV_R3_DATA); pkt_head.receiver_req_id = sreq->mrail.partner_id; MPIDI_VC_FAI_send_seqnum(vc, seqnum); MPIDI_Pkt_set_seqnum(&pkt_head, seqnum); MPIDI_Request_set_seqnum(sreq, seqnum); #if defined(_SMP_LIMIC_) /* Use limic2 for contiguous data * Use shared memory for non-contiguous data */ if (!g_smp_use_limic2 || sreq->dev.OnDataAvail == MPIDI_CH3_ReqHandler_SendReloadIOV || sreq->dev.iov_count > 1) { pkt_head.send_req_id = NULL; } else { pkt_head.send_req_id = sreq; } #endif mpi_errno = MPIDI_CH3_iStartMsg(vc, &pkt_head, sizeof(MPIDI_CH3_Pkt_rndv_r3_data_t), &send_req); if (mpi_errno != MPI_SUCCESS) { MPIU_Object_set_ref(sreq, 0); MPIDI_CH3_Request_destroy(sreq); sreq = NULL; mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**ch3|rtspkt", 0); return mpi_errno; } /* --END ERROR HANDLING-- */ if (send_req != NULL) { DEBUG_PRINT("r3 packet not sent \n"); MPID_Request_release(send_req); } #if defined(_SMP_LIMIC_) if (pkt_head.send_req_id) { sreq->mrail.nearly_complete = 1; return MPI_SUCCESS; } #endif vc->smp.send_current_pkt_type = SMP_RNDV_MSG; DEBUG_PRINT("r3 sent req is %p\n", sreq); if (MPIDI_CH3I_SMP_SendQ_empty(vc)) { for (;;) { DEBUG_PRINT("iov count (sreq): %d, offset %d, len[1] %d\n", sreq->dev.iov_count, sreq->dev.iov_offset, sreq->dev.iov[0].MPID_IOV_LEN); if (vc->smp.send_current_pkt_type == SMP_RNDV_MSG) { mpi_errno = MPIDI_CH3I_SMP_writev_rndv_data(vc, &sreq->dev.iov[sreq->dev.iov_offset], sreq->dev.iov_count - sreq->dev.iov_offset, &nb); } else { MPIU_Assert(vc->smp.send_current_pkt_type == SMP_RNDV_MSG_CONT); MPIDI_CH3I_SMP_writev_rndv_data_cont(vc, &sreq->dev.iov[sreq->dev.iov_offset], sreq->dev.iov_count - sreq->dev.iov_offset, &nb); } if (MPI_SUCCESS != mpi_errno) { vc->ch.state = MPIDI_CH3I_VC_STATE_FAILED; sreq->status.MPI_ERROR = MPI_ERR_INTERN; MPIDI_CH3U_Request_complete(sreq); return mpi_errno; } if (nb >= 0) { if (MPIDI_CH3I_Request_adjust_iov(sreq, nb)) { MPIDI_CH3U_Handle_send_req(vc, sreq, &complete); if (complete) { sreq->mrail.nearly_complete = 1; break; } else { vc->smp.send_current_pkt_type = SMP_RNDV_MSG_CONT; } } else { sreq->ch.reqtype = REQUEST_RNDV_R3_DATA; MPIDI_CH3I_SMP_SendQ_enqueue_head(vc, sreq); vc->smp.send_active = sreq; sreq->mrail.nearly_complete = 1; vc->smp.send_current_pkt_type = SMP_RNDV_MSG_CONT; break; } } else { MPIDI_CH3I_SMP_SendQ_enqueue_head(vc, sreq); vc->smp.send_active = sreq; sreq->mrail.nearly_complete = 1; break; } } } else { sreq->ch.reqtype = REQUEST_RNDV_R3_DATA; MPIDI_CH3I_SMP_SendQ_enqueue(vc, sreq); sreq->mrail.nearly_complete = 1; vc->smp.send_current_pkt_type = SMP_RNDV_MSG; DEBUG_PRINT("Enqueue sreq %p", sreq); } MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_SMP_RNDV_PUSH); return MPI_SUCCESS; }
int MPID_nem_ptl_pkt_cancel_send_req_handler(MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, MPIDI_msg_sz_t *buflen, MPID_Request **rreqp) { int ret, mpi_errno = MPI_SUCCESS; MPIDI_nem_ptl_pkt_cancel_send_req_t *req_pkt = (MPIDI_nem_ptl_pkt_cancel_send_req_t *)pkt; MPID_PKT_DECL_CAST(upkt, MPIDI_nem_ptl_pkt_cancel_send_resp_t, resp_pkt); MPID_Request *search_req, *resp_req; ptl_me_t me; MPID_nem_ptl_vc_area *const vc_ptl = VC_PTL(vc); MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST, "received cancel send req pkt, sreq=0x%08x, rank=%d, tag=%d, context=%d", req_pkt->sender_req_id, req_pkt->match.parts.rank, req_pkt->match.parts.tag, req_pkt->match.parts.context_id)); /* create a dummy request and search for the message */ /* create a request */ search_req = MPID_Request_create(); MPID_nem_ptl_init_req(search_req); MPIR_ERR_CHKANDJUMP1(!search_req, mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Request_create"); MPIU_Object_set_ref(search_req, 2); /* 1 ref for progress engine and 1 ref for us */ search_req->kind = MPID_REQUEST_MPROBE; /* create a dummy ME to use for searching the list */ me.start = NULL; me.length = 0; me.ct_handle = PTL_CT_NONE; me.uid = PTL_UID_ANY; me.options = ( PTL_ME_OP_PUT | PTL_ME_USE_ONCE ); me.min_free = 0; me.match_bits = NPTL_MATCH(req_pkt->match.parts.tag, req_pkt->match.parts.context_id, req_pkt->match.parts.rank); me.match_id = vc_ptl->id; me.ignore_bits = NPTL_MATCH_IGNORE; /* FIXME: this should use a custom handler that throws the data away inline */ REQ_PTL(search_req)->event_handler = handle_mprobe; /* submit a search request */ ret = PtlMESearch(MPIDI_nem_ptl_ni, MPIDI_nem_ptl_pt, &me, PTL_SEARCH_DELETE, search_req); MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlmesearch", "**ptlmesearch %s", MPID_nem_ptl_strerror(ret)); DBG_MSG_MESearch("REG", vc ? vc->pg_rank : 0, me, search_req); /* wait for search request to complete */ do { mpi_errno = MPID_nem_ptl_poll(FALSE); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } while (!MPID_Request_is_complete(search_req)); /* send response */ resp_pkt->type = MPIDI_NEM_PKT_NETMOD; resp_pkt->subtype = MPIDI_NEM_PTL_PKT_CANCEL_SEND_RESP; resp_pkt->ack = REQ_PTL(search_req)->found; resp_pkt->sender_req_id = req_pkt->sender_req_id; MPID_nem_ptl_iStartContigMsg(vc, resp_pkt, sizeof(*resp_pkt), NULL, 0, &resp_req); /* if the message was found, free the temporary buffer used to copy the data */ if (REQ_PTL(search_req)->found) MPIU_Free(search_req->dev.tmpbuf); MPID_Request_release(search_req); if (resp_req != NULL) MPID_Request_release(resp_req); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }