int MPIDI_CH3_PktPrint_RndvClrToSend( FILE *fp, MPIDI_CH3_Pkt_t *pkt ) { MPIU_DBG_PRINTF((" type ......... CLR_TO_SEND\n")); MPIU_DBG_PRINTF((" sender_reqid . 0x%08X\n", pkt->rndv_clr_to_send.sender_req_id)); MPIU_DBG_PRINTF((" recvr_reqid .. 0x%08X\n", pkt->rndv_clr_to_send.receiver_req_id)); return MPI_SUCCESS; }
int MPIDI_CH3_PktPrint_RndvReqToSend( FILE *fp, MPIDI_CH3_Pkt_t *pkt ) { MPIU_DBG_PRINTF((" type ......... REQ_TO_SEND\n")); MPIU_DBG_PRINTF((" sender_reqid . 0x%08X\n", pkt->rndv_req_to_send.sender_req_id)); MPIU_DBG_PRINTF((" context_id ... %d\n", pkt->rndv_req_to_send.match.parts.context_id)); MPIU_DBG_PRINTF((" tag .......... %d\n", pkt->rndv_req_to_send.match.parts.tag)); MPIU_DBG_PRINTF((" rank ......... %d\n", pkt->rndv_req_to_send.match.parts.rank)); MPIU_DBG_PRINTF((" data_sz ...... %d\n", pkt->rndv_req_to_send.data_sz)); #ifdef MPID_USE_SEQUENCE_NUMBERS MPIU_DBG_PRINTF((" seqnum ....... %d\n", pkt->rndv_req_to_send.seqnum)); #endif return MPI_SUCCESS; }
int MPIDI_CH3_PktPrint_EagerSyncSend( FILE *fp, MPIDI_CH3_Pkt_t *pkt ) { MPIU_DBG_PRINTF((" type ......... EAGER_SYNC_SEND\n")); MPIU_DBG_PRINTF((" sender_reqid . 0x%08X\n", pkt->eager_sync_send.sender_req_id)); MPIU_DBG_PRINTF((" context_id ... %d\n", pkt->eager_sync_send.match.parts.context_id)); MPIU_DBG_PRINTF((" tag .......... %d\n", pkt->eager_sync_send.match.parts.tag)); MPIU_DBG_PRINTF((" rank ......... %d\n", pkt->eager_sync_send.match.parts.rank)); MPIU_DBG_PRINTF((" data_sz ...... %d\n", pkt->eager_sync_send.data_sz)); #ifdef MPID_USE_SEQUENCE_NUMBERS MPIU_DBG_PRINTF((" seqnum ....... %d\n", pkt->eager_sync_send.seqnum)); #endif return MPI_SUCCESS; }
/*@ MPID_Type_commit Input Parameters: . datatype_p - pointer to MPI datatype Output Parameters: Return Value: 0 on success, -1 on failure. @*/ int MPID_Type_commit(MPI_Datatype *datatype_p) { int mpi_errno=MPI_SUCCESS; MPID_Datatype *datatype_ptr; MPIU_Assert(HANDLE_GET_KIND(*datatype_p) != HANDLE_KIND_BUILTIN); MPID_Datatype_get_ptr(*datatype_p, datatype_ptr); if (datatype_ptr->is_committed == 0) { datatype_ptr->is_committed = 1; #ifdef MPID_NEEDS_DLOOP_ALL_BYTES /* If MPID implementation needs use to reduce everything to a byte stream, do that. */ MPID_Dataloop_create(*datatype_p, &datatype_ptr->dataloop, &datatype_ptr->dataloop_size, &datatype_ptr->dataloop_depth, MPID_DATALOOP_ALL_BYTES); #else MPID_Dataloop_create(*datatype_p, &datatype_ptr->dataloop, &datatype_ptr->dataloop_size, &datatype_ptr->dataloop_depth, MPID_DATALOOP_HOMOGENEOUS); #endif /* create heterogeneous dataloop */ MPID_Dataloop_create(*datatype_p, &datatype_ptr->hetero_dloop, &datatype_ptr->hetero_dloop_size, &datatype_ptr->hetero_dloop_depth, MPID_DATALOOP_HETEROGENEOUS); MPIU_DBG_PRINTF(("# contig blocks = %d\n", (int) datatype_ptr->max_contig_blocks)); #if 0 MPIDI_Dataloop_dot_printf(datatype_ptr->dataloop, 0, 1); #endif } return mpi_errno; }
static int mpi_to_pmi_keyvals( MPID_Info *info_ptr, PMI_keyval_t **kv_ptr, int *nkeys_ptr ) { char key[MPI_MAX_INFO_KEY]; PMI_keyval_t *kv = 0; int i, nkeys = 0, vallen, flag, mpi_errno=MPI_SUCCESS; if (!info_ptr || info_ptr->handle == MPI_INFO_NULL) { goto fn_exit; } MPIR_Info_get_nkeys_impl( info_ptr, &nkeys ); if (nkeys == 0) { goto fn_exit; } kv = (PMI_keyval_t *)MPIU_Malloc( nkeys * sizeof(PMI_keyval_t) ); if (!kv) { MPIU_ERR_POP(mpi_errno); } for (i=0; i<nkeys; i++) { mpi_errno = MPIR_Info_get_nthkey_impl( info_ptr, i, key ); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); } MPIR_Info_get_valuelen_impl( info_ptr, key, &vallen, &flag ); MPIU_ERR_CHKANDJUMP1(!flag, mpi_errno, MPI_ERR_OTHER,"**infonokey", "**infonokey %s", key); kv[i].key = MPIU_Strdup(key); kv[i].val = MPIU_Malloc( vallen + 1 ); if (!kv[i].key || !kv[i].val) { MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER,"**nomem" ); } MPIR_Info_get_impl( info_ptr, key, vallen+1, kv[i].val, &flag ); MPIU_ERR_CHKANDJUMP1(!flag, mpi_errno, MPI_ERR_OTHER,"**infonokey", "**infonokey %s", key); MPIU_DBG_PRINTF(("key: <%s>, value: <%s>\n", kv[i].key, kv[i].val)); } fn_fail: fn_exit: *kv_ptr = kv; *nkeys_ptr = nkeys; return mpi_errno; }
int MPIDI_CH3_Packetized_send(MPIDI_VC_t * vc, MPID_Request * sreq) { MPIDI_CH3_Pkt_packetized_send_start_t send_start; MPIDI_CH3_Pkt_packetized_send_data_t pkt_head; vbuf *buf; int mpi_errno = MPI_SUCCESS; int n_iov; int msg_buffered = 0; int nb; int complete; int pkt_len; int seqnum; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_SENDV); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_SENDV); MPIU_DBG_PRINTF(("ch3_isendv\n")); MPIDI_DBG_PRINTF((50, FCNAME, "entering")); MPIDI_Pkt_init(&send_start, MPIDI_CH3_PKT_PACKETIZED_SEND_START); iov[0].MPID_IOV_LEN = sizeof(MPIDI_CH3_Pkt_packetized_send_start_t); iov[0].MPID_IOV_BUF = (void*) &send_start; MPIU_Memcpy(&iov[1], sreq->dev.iov, sreq->dev.iov_count * sizeof(MPID_IOV)); n_iov = 1 + sreq->dev.iov_count; GET_SEQ_NUM(sreq->dev.iov[0].MPID_IOV_BUF, seqnum); if (-1 == seqnum) { MPIDI_VC_FAI_send_seqnum(vc, seqnum); } MPIDI_Pkt_set_seqnum(&send_start, seqnum); MPIDI_Request_set_seqnum(sreq, seqnum); send_start.origin_head_size = sreq->dev.iov[0].MPID_IOV_LEN; Calculate_IOV_len(iov, n_iov, pkt_len); mpi_errno = MPIDI_CH3I_MRAILI_Eager_send(vc, iov, n_iov, pkt_len, &nb, &buf); DEBUG_PRINT("[pkt send] mpierr %d, nb %d\n", mpi_errno, nb); if (MPI_SUCCESS != mpi_errno && MPI_MRAIL_MSG_QUEUED != mpi_errno) { vc->ch.state = MPIDI_CH3I_VC_STATE_FAILED; sreq->status.MPI_ERROR = MPI_ERR_INTERN; MPIDI_CH3U_Request_complete(sreq); goto fn_exit; } else if (MPI_MRAIL_MSG_QUEUED == mpi_errno) { msg_buffered = 1; } nb -= sizeof(MPIDI_CH3_Pkt_packetized_send_start_t); MPIDI_Pkt_init(&pkt_head, MPIDI_CH3_PKT_PACKETIZED_SEND_DATA); iov[0].MPID_IOV_LEN = sizeof(MPIDI_CH3_Pkt_packetized_send_data_t); iov[0].MPID_IOV_BUF = (void*) &pkt_head; do { while (!MPIDI_CH3I_Request_adjust_iov(sreq, nb)) { MPIDI_VC_FAI_send_seqnum(vc, seqnum); MPIDI_Pkt_set_seqnum(&pkt_head, seqnum); MPIDI_Request_set_seqnum(sreq, seqnum); MPIU_Memcpy((void *) &iov[1], &sreq->dev.iov[sreq->dev.iov_offset], (sreq->dev.iov_count - sreq->dev.iov_offset) * sizeof(MPID_IOV)); n_iov = sreq->dev.iov_count - sreq->dev.iov_offset + 1; Calculate_IOV_len(iov, n_iov, pkt_len); mpi_errno = MPIDI_CH3I_MRAILI_Eager_send(vc, iov, n_iov, pkt_len, &nb, &buf); DEBUG_PRINT("[istartmsgv] mpierr %d, nb %d\n", mpi_errno, nb); MPIU_Assert(NULL == buf->sreq); if (MPI_SUCCESS != mpi_errno && MPI_MRAIL_MSG_QUEUED != mpi_errno) { vc->ch.state = MPIDI_CH3I_VC_STATE_FAILED; sreq->status.MPI_ERROR = MPI_ERR_INTERN; MPIDI_CH3U_Request_complete(sreq); goto fn_exit; } else if (MPI_MRAIL_MSG_QUEUED == mpi_errno) { msg_buffered = 1; } nb -= sizeof(MPIDI_CH3_Pkt_packetized_send_data_t); } if (sreq->dev.OnDataAvail == MPIDI_CH3_ReqHandler_SendReloadIOV) { MPIDI_CH3U_Handle_send_req(vc, sreq, &complete); nb = 0; complete = 0; } else { complete = 1; } } while (!complete); if (msg_buffered) { mpi_errno = MPI_MRAIL_MSG_QUEUED; buf->sreq = (void *) sreq; } else { MPIDI_CH3U_Handle_send_req(vc, sreq, &complete); } fn_exit: MPIDI_DBG_PRINTF((50, FCNAME, "exiting")); MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_SENDV); return mpi_errno; }
int MPIDI_CH3_PktPrint_RndvSend( FILE *fp, MPIDI_CH3_Pkt_t *pkt ) { MPIU_DBG_PRINTF((" type ......... RNDV_SEND\n")); MPIU_DBG_PRINTF((" recvr_reqid .. 0x%08X\n", pkt->rndv_send.receiver_req_id)); return MPI_SUCCESS; }
int MPIDI_CH3_PktHandler_RndvClrToSend( MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, MPIDI_msg_sz_t *buflen, MPID_Request **rreqp ) { MPIDI_CH3_Pkt_rndv_clr_to_send_t * cts_pkt = &pkt->rndv_clr_to_send; MPID_Request * sreq; MPID_Request * rts_sreq; MPIDI_CH3_Pkt_t upkt; MPIDI_CH3_Pkt_rndv_send_t * rs_pkt = &upkt.rndv_send; int dt_contig; MPI_Aint dt_true_lb; MPIDI_msg_sz_t data_sz; MPID_Datatype * dt_ptr; int mpi_errno = MPI_SUCCESS; MPIU_DBG_MSG(CH3_OTHER,VERBOSE,"received rndv CTS pkt"); MPID_Request_get_ptr(cts_pkt->sender_req_id, sreq); MPIU_DBG_PRINTF(("received cts, count=%d\n", sreq->dev.user_count)); sreq->dev.OnDataAvail = 0; sreq->dev.OnFinal = 0; /* Release the RTS request if one exists. MPID_Request_fetch_and_clear_rts_sreq() needs to be atomic to prevent cancel send from cancelling the wrong (future) request. If MPID_Request_fetch_and_clear_rts_sreq() returns a NULL rts_sreq, then MPID_Cancel_send() is responsible for releasing the RTS request object. */ MPIDI_Request_fetch_and_clear_rts_sreq(sreq, &rts_sreq); if (rts_sreq != NULL) { MPID_Request_release(rts_sreq); } *buflen = sizeof(MPIDI_CH3_Pkt_t); MPIDI_Pkt_init(rs_pkt, MPIDI_CH3_PKT_RNDV_SEND); rs_pkt->receiver_req_id = cts_pkt->receiver_req_id; MPIDI_Datatype_get_info(sreq->dev.user_count, sreq->dev.datatype, dt_contig, data_sz, dt_ptr, dt_true_lb); if (dt_contig) { MPID_IOV iov[MPID_IOV_LIMIT]; MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST, "sending contiguous rndv data, data_sz=" MPIDI_MSG_SZ_FMT, data_sz)); iov[0].MPID_IOV_BUF = (MPID_IOV_BUF_CAST)rs_pkt; iov[0].MPID_IOV_LEN = sizeof(*rs_pkt); iov[1].MPID_IOV_BUF = (MPID_IOV_BUF_CAST)((char *)sreq->dev.user_buf + dt_true_lb); iov[1].MPID_IOV_LEN = data_sz; MPIU_THREAD_CS_ENTER(CH3COMM,vc); mpi_errno = MPIU_CALL(MPIDI_CH3,iSendv(vc, sreq, iov, 2)); MPIU_THREAD_CS_EXIT(CH3COMM,vc); MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|senddata"); } else { sreq->dev.segment_ptr = MPID_Segment_alloc( ); MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc"); MPID_Segment_init(sreq->dev.user_buf, sreq->dev.user_count, sreq->dev.datatype, sreq->dev.segment_ptr, 0); sreq->dev.segment_first = 0; sreq->dev.segment_size = data_sz; MPIU_THREAD_CS_ENTER(CH3COMM,vc); mpi_errno = vc->sendNoncontig_fn(vc, sreq, rs_pkt, sizeof(*rs_pkt)); MPIU_THREAD_CS_EXIT(CH3COMM,vc); MPIU_ERR_CHKANDJUMP(mpi_errno, mpi_errno, MPI_ERR_OTHER, "**ch3|senddata"); } *rreqp = NULL; fn_fail: return mpi_errno; }
int MPIDI_CH3_PktPrint_Close( FILE *fp, MPIDI_CH3_Pkt_t *pkt ) { MPIU_DBG_PRINTF((" type ......... MPIDI_CH3_PKT_CLOSE\n")); MPIU_DBG_PRINTF((" ack ......... %s\n", pkt->close.ack ? "TRUE" : "FALSE")); return MPI_SUCCESS; }
int MPIDI_CH3_PktPrint_EagerSyncAck( FILE *fp, MPIDI_CH3_Pkt_t *pkt ) { MPIU_DBG_PRINTF((" type ......... EAGER_SYNC_ACK\n")); MPIU_DBG_PRINTF((" sender_reqid . 0x%08X\n", pkt->eager_sync_ack.sender_req_id)); return MPI_SUCCESS; }