int MPIDI_CH3_Req_handler_rma_op_complete(MPIR_Request * sreq) { int mpi_errno = MPI_SUCCESS; MPIR_Request *ureq = NULL; MPIR_Win *win_ptr = NULL; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE); if (sreq->dev.rma_target_ptr != NULL) { (sreq->dev.rma_target_ptr)->num_pkts_wait_for_local_completion--; } /* get window, decrement active request cnt on window */ MPIR_Win_get_ptr(sreq->dev.source_win_handle, win_ptr); MPIR_Assert(win_ptr != NULL); MPIDI_CH3I_RMA_Active_req_cnt--; MPIR_Assert(MPIDI_CH3I_RMA_Active_req_cnt >= 0); if (sreq->dev.request_handle != MPI_REQUEST_NULL) { /* get user request */ MPIR_Request_get_ptr(sreq->dev.request_handle, ureq); mpi_errno = MPID_Request_complete(ureq); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3_REQ_HANDLER_RMA_OP_COMPLETE); return mpi_errno; fn_fail: goto fn_exit; }
/* FIXME: this is not a scalable algorithm because everyone is polling on the same cacheline */ int MPIDU_shm_barrier(MPIDU_shm_barrier_t * barrier, int num_local) { int mpi_errno = MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDU_SHM_BARRIER); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDU_SHM_BARRIER); if (num_local == 1) goto fn_exit; MPIR_ERR_CHKINTERNAL(!barrier_init, mpi_errno, "barrier not initialized"); if (OPA_fetch_and_incr_int(&barrier->val) == num_local - 1) { OPA_store_int(&barrier->val, 0); OPA_store_int(&barrier->wait, 1 - sense); OPA_write_barrier(); } else { /* wait */ while (OPA_load_int(&barrier->wait) == sense) MPL_sched_yield(); /* skip */ } sense = 1 - sense; fn_fail: fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDU_SHM_BARRIER); return mpi_errno; }
int MPIDI_CH3I_Progress_register_hook(int (*progress_fn)(int*), int *id) { int mpi_errno = MPI_SUCCESS; int i; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3I_PROGRESS_REGISTER_HOOK); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3I_PROGRESS_REGISTER_HOOK); MPID_THREAD_CS_ENTER(POBJ, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); for (i = 0; i < MAX_PROGRESS_HOOKS; i++) { if (progress_hooks[i].func_ptr == NULL) { progress_hooks[i].func_ptr = progress_fn; progress_hooks[i].active = FALSE; break; } } if (i >= MAX_PROGRESS_HOOKS) { return MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, "MPIDI_CH3I_Progress_register_hook", __LINE__, MPI_ERR_INTERN, "**progresshookstoomany", 0 ); } (*id) = i; fn_exit: MPID_THREAD_CS_EXIT(POBJ, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3I_PROGRESS_REGISTER_HOOK); return mpi_errno; fn_fail: goto fn_exit; }
static MPIR_Request *create_request(MPL_IOV * iov, int iov_count, int iov_offset, size_t nb) { MPIR_Request *sreq; int i; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_CREATE_REQUEST); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_CREATE_REQUEST); sreq = MPIR_Request_create(MPIR_REQUEST_KIND__SEND); /* --BEGIN ERROR HANDLING-- */ if (sreq == NULL) return NULL; /* --END ERROR HANDLING-- */ MPIR_Object_set_ref(sreq, 2); for (i = 0; i < iov_count; i++) { sreq->dev.iov[i] = iov[i]; } if (iov_offset == 0) { MPIR_Assert(iov[0].MPL_IOV_LEN == sizeof(MPIDI_CH3_Pkt_t)); sreq->dev.pending_pkt = *(MPIDI_CH3_Pkt_t *) iov[0].MPL_IOV_BUF; sreq->dev.iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST) & sreq->dev.pending_pkt; } sreq->dev.iov[iov_offset].MPL_IOV_BUF = (MPL_IOV_BUF_CAST) ((char *) sreq->dev.iov[iov_offset].MPL_IOV_BUF + nb); sreq->dev.iov[iov_offset].MPL_IOV_LEN -= nb; sreq->dev.iov_count = iov_count; sreq->dev.OnDataAvail = 0; MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_CREATE_REQUEST); return sreq; }
static MPIR_Request * create_request(void * hdr, intptr_t hdr_sz, size_t nb) { MPIR_Request * sreq; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_CREATE_REQUEST); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_CREATE_REQUEST); sreq = MPIR_Request_create(MPIR_REQUEST_KIND__UNDEFINED); /* --BEGIN ERROR HANDLING-- */ if (sreq == NULL) return NULL; /* --END ERROR HANDLING-- */ MPIR_Object_set_ref(sreq, 2); sreq->kind = MPIR_REQUEST_KIND__SEND; MPIR_Assert(hdr_sz == sizeof(MPIDI_CH3_Pkt_t)); sreq->dev.pending_pkt = *(MPIDI_CH3_Pkt_t *) hdr; sreq->dev.iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST)((char *) &sreq->dev.pending_pkt + nb); sreq->dev.iov[0].MPL_IOV_LEN = hdr_sz - nb; sreq->dev.iov_count = 1; sreq->dev.OnDataAvail = 0; MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_CREATE_REQUEST); return sreq; }
static inline int connection_pop_sendq_req(MPIDI_CH3I_Connection_t * conn) { int mpi_errno = MPI_SUCCESS; MPIDI_CH3I_VC *vcch = &conn->vc->ch; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_CONNECTION_POP_SENDQ_REQ); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_CONNECTION_POP_SENDQ_REQ); /* post send of next request on the send queue */ /* FIXME: Is dequeue/get next the operation we really want? */ MPIDI_CH3I_SendQ_dequeue(vcch); conn->send_active = MPIDI_CH3I_SendQ_head(vcch); /* MT */ if (conn->send_active != NULL) { MPL_DBG_MSG_P(MPIDI_CH3_DBG_CONNECT,TYPICAL,"conn=%p: Posting message from connection send queue", conn ); mpi_errno = MPIDI_CH3I_Sock_post_writev(conn->sock, conn->send_active->dev.iov, conn->send_active->dev.iov_count, NULL); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } fn_fail: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_CONNECTION_POP_SENDQ_REQ); return mpi_errno; }
int MPIDI_VCRT_Create(int size, struct MPIDI_VCRT **vcrt_ptr) { MPIDI_VCRT_t * vcrt; int mpi_errno = MPI_SUCCESS; MPIR_CHKPMEM_DECL(1); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_VCRT_CREATE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_VCRT_CREATE); MPIR_CHKPMEM_MALLOC(vcrt, MPIDI_VCRT_t *, sizeof(MPIDI_VCRT_t) + (size - 1) * sizeof(MPIDI_VC_t *), mpi_errno, "**nomem"); vcrt->handle = HANDLE_SET_KIND(0, HANDLE_KIND_INVALID); MPIR_Object_set_ref(vcrt, 1); vcrt->size = size; *vcrt_ptr = vcrt; fn_exit: MPIR_CHKPMEM_COMMIT(); MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_VCRT_CREATE); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ MPIR_CHKPMEM_REAP(); goto fn_exit; /* --END ERROR HANDLING-- */ }
static int handle_probe(const ptl_event_t *e) { int mpi_errno = MPI_SUCCESS; MPIR_Request *const req = e->user_ptr; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_HANDLE_PROBE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_HANDLE_PROBE); if (e->ni_fail_type == PTL_NI_NO_MATCH) { REQ_PTL(req)->found = FALSE; goto finish_probe; } REQ_PTL(req)->found = TRUE; req->status.MPI_SOURCE = NPTL_MATCH_GET_RANK(e->match_bits); req->status.MPI_TAG = NPTL_MATCH_GET_TAG(e->match_bits); MPIR_STATUS_SET_COUNT(req->status, NPTL_HEADER_GET_LENGTH(e->hdr_data)); finish_probe: mpi_errno = MPID_Request_complete(req); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_HANDLE_PROBE); return mpi_errno; fn_fail: goto fn_exit; }
static int handler_recv_complete(const ptl_event_t *e) { int mpi_errno = MPI_SUCCESS; MPIR_Request *const rreq = e->user_ptr; int ret; int i; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_HANDLER_RECV_COMPLETE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_HANDLER_RECV_COMPLETE); MPIR_Assert(e->type == PTL_EVENT_REPLY || e->type == PTL_EVENT_PUT || e->type == PTL_EVENT_PUT_OVERFLOW); if (REQ_PTL(rreq)->md != PTL_INVALID_HANDLE) { ret = PtlMDRelease(REQ_PTL(rreq)->md); MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlmdrelease", "**ptlmdrelease %s", MPID_nem_ptl_strerror(ret)); } for (i = 0; i < MPID_NEM_PTL_NUM_CHUNK_BUFFERS; ++i) if (REQ_PTL(rreq)->chunk_buffer[i]) MPL_free(REQ_PTL(rreq)->chunk_buffer[i]); mpi_errno = MPID_Request_complete(rreq); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_HANDLER_RECV_COMPLETE); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_Rget_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPIR_Win * win_ptr, MPIR_Request ** request) { int mpi_errno = MPI_SUCCESS; int dt_contig ATTRIBUTE((unused)); MPIR_Datatype*dtp; MPI_Aint dt_true_lb ATTRIBUTE((unused)); intptr_t data_sz, trg_data_sz; MPIR_Request *ureq; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_RGET_ACCUMULATE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_RGET_ACCUMULATE); /* request-based RMA operations are only valid within a passive epoch */ MPIR_ERR_CHKANDJUMP(win_ptr->states.access_state != MPIDI_RMA_PER_TARGET && win_ptr->states.access_state != MPIDI_RMA_LOCK_ALL_CALLED && win_ptr->states.access_state != MPIDI_RMA_LOCK_ALL_ISSUED && win_ptr->states.access_state != MPIDI_RMA_LOCK_ALL_GRANTED, mpi_errno, MPI_ERR_RMA_SYNC, "**rmasync"); /* Create user request, initially cc=1, ref=1 */ ureq = MPIR_Request_create(MPIR_REQUEST_KIND__RMA); MPIR_ERR_CHKANDJUMP(ureq == NULL, mpi_errno, MPI_ERR_OTHER, "**nomemreq"); /* This request is referenced by user and ch3 by default. */ MPIR_Object_set_ref(ureq, 2); /* Note that GACC is only a no-op if no data goes in both directions */ MPIDI_Datatype_get_info(origin_count, origin_datatype, dt_contig, data_sz, dtp, dt_true_lb); MPIDI_Datatype_get_info(origin_count, origin_datatype, dt_contig, trg_data_sz, dtp, dt_true_lb); /* Enqueue or perform the RMA operation */ if (target_rank != MPI_PROC_NULL && (data_sz != 0 || trg_data_sz != 0)) { mpi_errno = MPIDI_CH3I_Get_accumulate(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win_ptr, ureq); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } else { mpi_errno = MPID_Request_complete(ureq); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } *request = ureq; fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_RGET_ACCUMULATE); return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPIDI_CH3U_VC_WaitForClose - Wait for all virtual connections to close @*/ int MPIDI_CH3U_VC_WaitForClose( void ) { MPID_Progress_state progress_state; int mpi_errno = MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_VC_WAITFORCLOSE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_VC_WAITFORCLOSE); MPID_Progress_start(&progress_state); while(MPIDI_Outstanding_close_ops > 0) { MPL_DBG_MSG_D(MPIDI_CH3_DBG_DISCONNECT,TYPICAL, "Waiting for %d close operations", MPIDI_Outstanding_close_ops); mpi_errno = MPID_Progress_wait(&progress_state); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_SET(mpi_errno,MPI_ERR_OTHER,"**ch3|close_progress"); break; } /* --END ERROR HANDLING-- */ } MPID_Progress_end(&progress_state); MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_VC_WAITFORCLOSE); return mpi_errno; }
int MPID_nem_mxm_vc_terminate(MPIDI_VC_t * vc) { int mpi_errno = MPI_SUCCESS; MPID_nem_mxm_vc_area *vc_area = VC_BASE(vc); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MXM_VC_TERMINATE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MXM_VC_TERMINATE); if (vc->state != MPIDI_VC_STATE_CLOSED) { /* VC is terminated as a result of a fault. Complete * outstanding sends with an error and terminate connection * immediately. */ MPIR_ERR_SET1(mpi_errno, MPI_ERR_OTHER, "**comm_fail", "**comm_fail %d", vc->pg_rank); } else { while (vc_area->pending_sends > 0) MPID_nem_mxm_poll(FALSE); } mpi_errno = MPIDI_CH3U_Handle_connection(vc, MPIDI_VC_EVENT_TERMINATED); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MXM_VC_TERMINATE); return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_PG_Find(void * id, MPIDI_PG_t ** pg_ptr) { MPIDI_PG_t * pg; int mpi_errno = MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_PG_FIND); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_PG_FIND); pg = MPIDI_PG_list; while (pg != NULL) { if (MPIDI_PG_Compare_ids_fn(id, pg->id) != FALSE) { *pg_ptr = pg; goto fn_exit; } pg = pg->next; } *pg_ptr = NULL; fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_PG_FIND); return mpi_errno; }
int MPID_nem_tcp_pkt_unpause_handler(MPIDI_VC_t *vc, MPIDI_CH3_Pkt_t *pkt, intptr_t *buflen, MPIR_Request **rreqp) { int mpi_errno = MPI_SUCCESS; MPID_nem_tcp_vc_area *vc_tcp = VC_TCP(vc); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_NEM_TCP_CKPT_UNPAUSE_HANDLER); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_NEM_TCP_CKPT_UNPAUSE_HANDLER); vc_tcp->send_paused = FALSE; /* There may be a unpause message in the send queue. If so, just enqueue everything on the send queue. */ if (MPIDI_CH3I_Sendq_empty(vc_tcp->send_queue)) mpi_errno = MPID_nem_tcp_send_queued(vc, &vc_tcp->paused_send_queue); /* if anything is left on the paused queue, put it on the send queue and wait for the reconnect */ if (!MPIDI_CH3I_Sendq_empty(vc_tcp->paused_send_queue)) { MPIDI_CH3I_Sendq_enqueue_multiple_no_refcount(&vc_tcp->send_queue, vc_tcp->paused_send_queue.head, vc_tcp->paused_send_queue.tail); vc_tcp->paused_send_queue.head = vc_tcp->paused_send_queue.tail = NULL; } fn_exit: *buflen = sizeof(MPIDI_CH3_Pkt_t); MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_NEM_TCP_CKPT_UNPAUSE_HANDLER); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_tcp_ckpt_continue_vc(MPIDI_VC_t *vc) { int mpi_errno = MPI_SUCCESS; MPID_PKT_DECL_CAST(upkt, MPIDI_nem_tcp_pkt_unpause_t, unpause_pkt); MPIR_Request *unpause_req; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_NEM_TCP_CKPT_CONTINUE_VC); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_NEM_TCP_CKPT_CONTINUE_VC); unpause_pkt->type = MPIDI_NEM_PKT_NETMOD; unpause_pkt->subtype = MPIDI_NEM_TCP_PKT_UNPAUSE; mpi_errno = MPID_nem_tcp_iStartContigMsg_paused(vc, &upkt, sizeof(MPIDI_nem_tcp_pkt_unpause_t), NULL, 0, &unpause_req); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (unpause_req) { if (unpause_req->status.MPI_ERROR) MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**fail"); MPIR_Request_free(unpause_req); if (mpi_errno) goto fn_fail; } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_NEM_TCP_CKPT_CONTINUE_VC); return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPID_Close_port - Close port Input Parameters: . port_name - Name of MPI port to close Notes: .N Errors .N MPI_SUCCESS .N MPI_ERR_OTHER @*/ int MPID_Close_port(const char *port_name) { int mpi_errno=MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_CLOSE_PORT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_CLOSE_PORT); /* Check to see if we need to setup channel-specific functions for handling the port operations */ if (setupPortFunctions) { MPIDI_CH3_PortFnsInit( &portFns ); setupPortFunctions = 0; } /* The default for this function is 0 (no function). A channel may define its own function and set it in the init check above; such a function may be named MPIDI_CH3_Close_port */ if (portFns.ClosePort) { mpi_errno = portFns.ClosePort( port_name ); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } else { MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**notimpl" ); } fn_fail: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_CLOSE_PORT); return mpi_errno; }
int MPID_nem_tcp_ckpt_restart_vc(MPIDI_VC_t *vc) { int mpi_errno = MPI_SUCCESS; MPIDI_CH3_Pkt_t upkt; MPIDI_nem_tcp_pkt_unpause_t * const pkt = (MPIDI_nem_tcp_pkt_unpause_t *)&upkt; MPIR_Request *sreq; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_NEM_TCP_CKPT_RESTART_VC); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_NEM_TCP_CKPT_RESTART_VC); pkt->type = MPIDI_NEM_PKT_NETMOD; pkt->subtype = MPIDI_NEM_TCP_PKT_UNPAUSE; mpi_errno = MPID_nem_tcp_iStartContigMsg_paused(vc, pkt, sizeof(pkt), NULL, 0, &sreq); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (sreq != NULL) { if (sreq->status.MPI_ERROR != MPI_SUCCESS) { mpi_errno = sreq->status.MPI_ERROR; MPIR_Request_free(sreq); MPIR_ERR_INTERNALANDJUMP(mpi_errno, "Failed to send checkpoint unpause pkt."); } MPIR_Request_free(sreq); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_NEM_TCP_CKPT_RESTART_VC); return mpi_errno; fn_fail: goto fn_exit; }
int vc_terminate(MPIDI_VC_t *vc) { int mpi_errno = MPI_SUCCESS; int req_errno = MPI_SUCCESS; MPID_nem_ptl_vc_area *const vc_ptl = VC_PTL(vc); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_VC_TERMINATE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_VC_TERMINATE); if (vc->state != MPIDI_VC_STATE_CLOSED) { /* VC is terminated as a result of a fault. Complete outstanding sends with an error and terminate connection immediately. */ MPIR_ERR_SET1(req_errno, MPIX_ERR_PROC_FAILED, "**comm_fail", "**comm_fail %d", vc->pg_rank); mpi_errno = MPID_nem_ptl_vc_terminated(vc); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else if (vc_ptl->num_queued_sends == 0) { mpi_errno = MPID_nem_ptl_vc_terminated(vc); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* the send_queued function will call vc_terminated if vc->state is CLOSED and the last queued send has been sent*/ } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_VC_TERMINATE); return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPID_Open_port - Open an MPI Port Input Arguments: . MPI_Info info - info Output Arguments: . char *port_name - port name Notes: .N Errors .N MPI_SUCCESS .N MPI_ERR_OTHER @*/ int MPID_Open_port(MPIR_Info *info_ptr, char *port_name) { int mpi_errno=MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIR_OPEN_PORT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIR_OPEN_PORT); /* Check to see if we need to setup channel-specific functions for handling the port operations */ if (setupPortFunctions) { MPIDI_CH3_PortFnsInit( &portFns ); setupPortFunctions = 0; } /* The default for this function is MPIDI_Open_port. A channel may define its own function and set it in the init check above; such a function may be named MPIDI_CH3_Open_port. In addition, not all channels can implement this operation, so those channels will set the function pointer to NULL */ if (portFns.OpenPort) { mpi_errno = portFns.OpenPort( info_ptr, port_name ); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } else { MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**notimpl" ); } fn_fail: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIR_OPEN_PORT); return mpi_errno; }
int MPIDI_CH3U_Handle_send_req(MPIDI_VC_t * vc, MPIR_Request * sreq, int *complete) { int mpi_errno = MPI_SUCCESS; int (*reqFn) (MPIDI_VC_t *, MPIR_Request *, int *); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ); /* Use the associated function rather than switching on the old ca field */ /* Routines can call the attached function directly */ reqFn = sreq->dev.OnDataAvail; if (!reqFn) { MPIR_Assert(MPIDI_Request_get_type(sreq) != MPIDI_REQUEST_TYPE_GET_RESP); mpi_errno = MPID_Request_complete(sreq); *complete = 1; } else { mpi_errno = reqFn(vc, sreq, complete); } if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_HANDLE_SEND_REQ); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_Comm_connect(const char *port_name, MPIR_Info * info, int root, MPIR_Comm * comm, MPIR_Comm ** newcomm_ptr) { int mpi_errno = MPI_SUCCESS; int timeout = MPIR_CVAR_CH4_COMM_CONNECT_TIMEOUT; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_COMM_CONNECT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_COMM_CONNECT); if (info != NULL) { int info_flag = 0; char info_value[MPI_MAX_INFO_VAL + 1]; MPIR_Info_get_impl(info, "timeout", MPI_MAX_INFO_VAL, info_value, &info_flag); if (info_flag) { timeout = atoi(info_value); } } mpi_errno = MPIDI_NM_mpi_comm_connect(port_name, info, root, timeout, comm, newcomm_ptr); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_COMM_CONNECT); return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3U_Post_data_receive_unexpected(MPIR_Request * rreq) { int mpi_errno = MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED); /* FIXME: to improve performance, allocate temporary buffer from a specialized buffer pool. */ /* FIXME: to avoid memory exhaustion, integrate buffer pool management with flow control */ MPL_DBG_MSG(MPIDI_CH3_DBG_OTHER,VERBOSE,"unexpected request allocated"); rreq->dev.tmpbuf = MPL_malloc(rreq->dev.recv_data_sz, MPL_MEM_BUFFER); if (!rreq->dev.tmpbuf) { MPIR_ERR_SETANDJUMP1(mpi_errno,MPI_ERR_OTHER,"**nomem","**nomem %d", rreq->dev.recv_data_sz); } rreq->dev.tmpbuf_sz = rreq->dev.recv_data_sz; rreq->dev.iov[0].MPL_IOV_BUF = (MPL_IOV_BUF_CAST)rreq->dev.tmpbuf; rreq->dev.iov[0].MPL_IOV_LEN = rreq->dev.recv_data_sz; rreq->dev.iov_count = 1; rreq->dev.OnDataAvail = MPIDI_CH3_ReqHandler_UnpackUEBufComplete; rreq->dev.recv_pending_count = 2; fn_fail: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_POST_DATA_RECEIVE_UNEXPECTED); return mpi_errno; }
int MPIDI_CH3U_Handle_ordered_recv_pkt(MPIDI_VC_t * vc, MPIDI_CH3_Pkt_t * pkt, void *data, intptr_t *buflen, MPIR_Request ** rreqp) { int mpi_errno = MPI_SUCCESS; static MPIDI_CH3_PktHandler_Fcn *pktArray[MPIDI_CH3_PKT_END_CH3+1]; static int needsInit = 1; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_CH3U_HANDLE_ORDERED_RECV_PKT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDI_CH3U_HANDLE_ORDERED_RECV_PKT); MPL_DBG_STMT(MPIDI_CH3_DBG_OTHER,VERBOSE,MPIDI_DBG_Print_packet(pkt)); /* FIXME: We can turn this into something like MPIR_Assert(pkt->type <= MAX_PACKET_TYPE); mpi_errno = MPIDI_CH3_ProgressFunctions[pkt->type](vc,pkt,rreqp); in the progress engine itself. Then this routine is not necessary. */ if (needsInit) { MPIDI_CH3_PktHandler_Init( pktArray, MPIDI_CH3_PKT_END_CH3 ); needsInit = 0; } /* Packet type is an enum and hence >= 0 */ MPIR_Assert(pkt->type <= MPIDI_CH3_PKT_END_CH3); mpi_errno = pktArray[pkt->type](vc, pkt, data, buflen, rreqp); MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_CH3U_HANDLE_ORDERED_RECV_PKT); return mpi_errno; }
int MPIDU_Ftb_init(void) { int mpi_errno = MPI_SUCCESS; int ret; FTB_client_t ci; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDU_FTB_INIT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPIDU_FTB_INIT); MPL_strncpy(ci.event_space, "ftb.mpi.mpich", sizeof(ci.event_space)); MPL_strncpy(ci.client_name, "mpich " MPICH_VERSION, sizeof(ci.client_name)); MPL_strncpy(ci.client_subscription_style, "FTB_SUBSCRIPTION_NONE", sizeof(ci.client_subscription_style)); ci.client_polling_queue_len = -1; #ifdef USE_PMI2_API ret = PMI2_Job_GetId(ci.client_jobid, sizeof(ci.client_jobid)); MPIR_ERR_CHKANDJUMP(ret, mpi_errno, MPI_ERR_OTHER, "**pmi_jobgetid"); #else ret = PMI_KVS_Get_my_name(ci.client_jobid, sizeof(ci.client_jobid)); MPIR_ERR_CHKANDJUMP(ret, mpi_errno, MPI_ERR_OTHER, "**pmi_get_id"); #endif ret = FTB_Connect(&ci, &client_handle); MPIR_ERR_CHKANDJUMP(ret, mpi_errno, MPI_ERR_OTHER, "**ftb_connect"); ret = FTB_Declare_publishable_events(client_handle, NULL, event_info, sizeof(event_info) / sizeof(event_info[0])); MPIR_ERR_CHKANDJUMP(ret, mpi_errno, MPI_ERR_OTHER, "**ftb_declare_publishable_events"); fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDU_FTB_INIT); return mpi_errno; fn_fail: goto fn_exit; }
static int handler_recv_unpack_complete(const ptl_event_t *e) { int mpi_errno = MPI_SUCCESS; MPIR_Request *const rreq = e->user_ptr; void *buf; MPI_Aint last; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_HANDLER_RECV_UNPACK_COMPLETE); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_HANDLER_RECV_UNPACK_COMPLETE); MPIR_Assert(e->type == PTL_EVENT_REPLY || e->type == PTL_EVENT_PUT || e->type == PTL_EVENT_PUT_OVERFLOW); if (e->type == PTL_EVENT_PUT_OVERFLOW) buf = e->start; else buf = REQ_PTL(rreq)->chunk_buffer[0]; last = rreq->dev.segment_first + e->mlength; MPIDU_Segment_unpack(rreq->dev.segment_ptr, rreq->dev.segment_first, &last, buf); MPIR_Assert(last == rreq->dev.segment_first + e->mlength); mpi_errno = handler_recv_complete(e); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_HANDLER_RECV_UNPACK_COMPLETE); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_ptl_init_id(MPIDI_VC_t *vc) { int mpi_errno = MPI_SUCCESS; MPID_nem_ptl_vc_area *const vc_ptl = VC_PTL(vc); char *bc; int pmi_errno; int val_max_sz; MPIR_CHKLMEM_DECL(1); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_NEM_PTL_INIT_ID); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_NEM_PTL_INIT_ID); pmi_errno = PMI_KVS_Get_value_length_max(&val_max_sz); MPIR_ERR_CHKANDJUMP1(pmi_errno, mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %d", pmi_errno); MPIR_CHKLMEM_MALLOC(bc, char *, val_max_sz, mpi_errno, "bc"); mpi_errno = vc->pg->getConnInfo(vc->pg_rank, bc, val_max_sz, vc->pg); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPID_nem_ptl_get_id_from_bc(bc, &vc_ptl->id, &vc_ptl->pt, &vc_ptl->ptg, &vc_ptl->ptc, &vc_ptl->ptr, &vc_ptl->ptrg, &vc_ptl->ptrc); if (mpi_errno) MPIR_ERR_POP(mpi_errno); vc_ptl->id_initialized = TRUE; MPIDI_CHANGE_VC_STATE(vc, ACTIVE); fn_exit: MPIR_CHKLMEM_FREEALL(); MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_NEM_PTL_INIT_ID); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_mxm_vc_destroy(MPIDI_VC_t * vc) { int mpi_errno = MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MXM_VC_DESTROY); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MXM_VC_DESTROY); /* Do nothing because * finalize is called before vc destroy as result it is not possible * to destroy endpoint here */ #if 0 MPID_nem_mxm_vc_area *vc_area = VC_BASE(vc); if (vc_area->ctx == vc) { mpi_errno = _mxm_disconnect(vc_area->mxm_ep); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } #endif fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MXM_VC_DESTROY); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_Comm_connect(const char * port_name, MPIR_Info * info, int root, MPIR_Comm * comm, MPIR_Comm ** newcomm_ptr) { int mpi_errno=MPI_SUCCESS; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_COMM_CONNECT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_COMM_CONNECT); /* Check to see if we need to setup channel-specific functions for handling the port operations */ if (setupPortFunctions) { MPIDI_CH3_PortFnsInit( &portFns ); setupPortFunctions = 0; } /* A channel may define its own function and set it in the init check above; such a function may be named MPIDI_CH3_Comm_connect. If the function is null, we signal a not-implemented error */ if (portFns.CommConnect) { mpi_errno = portFns.CommConnect( port_name, info, root, comm, newcomm_ptr ); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } else { MPIR_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**notimpl" ); } fn_fail: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_COMM_CONNECT); return mpi_errno; }
static int handler_recv_big_get(const ptl_event_t *e) { int mpi_errno = MPI_SUCCESS; MPIR_Request *const rreq = e->user_ptr; MPI_Aint last; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_HANDLER_RECV_UNPACK); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_HANDLER_RECV_UNPACK); MPIR_Assert(e->type == PTL_EVENT_REPLY); /* decrement the number of remaining gets */ REQ_PTL(rreq)->num_gets--; if (REQ_PTL(rreq)->num_gets == 0) { /* if we used a temporary buffer, unpack the data */ if (REQ_PTL(rreq)->chunk_buffer[0]) { last = rreq->dev.segment_size; MPIDU_Segment_unpack(rreq->dev.segment_ptr, rreq->dev.segment_first, &last, REQ_PTL(rreq)->chunk_buffer[0]); MPIR_Assert(last == rreq->dev.segment_size); } mpi_errno = handler_recv_complete(e); } if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_HANDLER_RECV_UNPACK); return mpi_errno; fn_fail: goto fn_exit; }
static int MPIDU_Segment_contig_unpack_external32_to_buf(DLOOP_Offset *blocks_p, DLOOP_Type el_type, DLOOP_Offset rel_off, void *bufp, void *v_paramp) { int src_el_size, dest_el_size; struct MPIDU_Segment_piece_params *paramp = v_paramp; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_SEGMENT_CONTIG_UNPACK_EXTERNAL32_TO_BUF); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_MPID_SEGMENT_CONTIG_UNPACK_EXTERNAL32_TO_BUF); src_el_size = MPIDU_Datatype_get_basic_size(el_type); dest_el_size = MPIDI_Datatype_get_basic_size_external32(el_type); MPIR_Assert(dest_el_size); /* * h = handle value * do = datatype buffer offset * dp = datatype buffer pointer * up = unpack buffer pointer (current location, incremented as we go) * sz = size of datatype (guess we could get this from handle value if * we wanted...) */ #ifdef MPID_SP_VERBOSE dbg_printf("\t[contig unpack [external32]: do=%d, dp=%x, up=%x, " "src_el_sz=%d, dest_el_sz=%d, blksz=%d]\n", rel_off, (unsigned) bufp, (unsigned) paramp->u.unpack.unpack_buffer, src_el_size, dest_el_size, (int) *blocks_p); #endif /* TODO: DEAL WITH CASE WHERE ALL DATA DOESN'T FIT! */ if ((src_el_size == dest_el_size) && (src_el_size == 1)) { MPIR_Memcpy(((char *)bufp) + rel_off, paramp->u.unpack.unpack_buffer, *blocks_p); } else if (is_float_type(el_type)) { external32_float_convert(((char *) bufp) + rel_off, paramp->u.unpack.unpack_buffer, dest_el_size, src_el_size, *blocks_p); } else { external32_basic_convert(((char *) bufp) + rel_off, paramp->u.unpack.unpack_buffer, dest_el_size, src_el_size, *blocks_p); } paramp->u.unpack.unpack_buffer += (dest_el_size * (*blocks_p)); MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPID_SEGMENT_CONTIG_UNPACK_EXTERNAL32_TO_BUF); return 0; }