int MPIR_Graph_neighbors_impl(MPID_Comm *comm_ptr, int rank, int maxneighbors, int neighbors[]) { int mpi_errno = MPI_SUCCESS; MPIR_Topology *graph_ptr; int i, is, ie; graph_ptr = MPIR_Topology_get(comm_ptr); MPIU_ERR_CHKANDJUMP((!graph_ptr || graph_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo"); MPIU_ERR_CHKANDJUMP2((rank < 0 || rank >= graph_ptr->topo.graph.nnodes), mpi_errno, MPI_ERR_RANK, "**rank", "**rank %d %d", rank, graph_ptr->topo.graph.nnodes); /* Get location in edges array of the neighbors of the specified rank */ if (rank == 0) is = 0; else is = graph_ptr->topo.graph.index[rank-1]; ie = graph_ptr->topo.graph.index[rank]; /* Get neighbors */ for (i=is; i<ie; i++) *neighbors++ = graph_ptr->topo.graph.edges[i]; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_SHM_Win_free(MPID_Win **win_ptr) { static char FCNAME[] = "MPID_SHM_Win_free"; int rc; int mpi_errno = MPI_SUCCESS; /* Free shared memory region */ /* free shm_base_addrs that's only used for shared memory windows */ if ((*win_ptr)->mpid.shm->allocated) { OPA_fetch_and_add_int((OPA_int_t *) &((*win_ptr)->mpid.shm->ctrl->shm_count),-1); while((*win_ptr)->mpid.shm->ctrl->shm_count !=0) MPIDI_QUICKSLEEP; if ((*win_ptr)->comm_ptr->rank == 0) { MPIDI_SHM_MUTEX_DESTROY(*win_ptr); } #ifdef USE_SYSV_SHM mpi_errno = shmdt((*win_ptr)->mpid.shm->base_addr); if ((*win_ptr)->comm_ptr->rank == 0) { rc=shmctl((*win_ptr)->mpid.shm->shm_id,IPC_RMID,NULL); MPIU_ERR_CHKANDJUMP((rc == -1), errno,MPI_ERR_RMA_SHARED, "**shmctl"); } #elif USE_MMAP_SHM munmap ((*win_ptr)->mpid.shm->base_addr, (*win_ptr)->mpid.shm->segment_len); if (0 == (*win_ptr)->comm_ptr->rank) shm_unlink ((*win_ptr)->mpid.shm->shm_key); #else MPID_Abort(NULL, MPI_ERR_RMA_SHARED, -1, "MPI_Win_free error"); #endif } else {/* one task on a node */ MPIU_Free((*win_ptr)->mpid.shm->base_addr); } MPIU_Free((*win_ptr)->mpid.shm); (*win_ptr)->mpid.shm = NULL; fn_fail: return mpi_errno; }
static int create_r_cookie (char *hostname, int port, int data_sz, char **cookie, int *len) { int mpi_errno = MPI_SUCCESS; int hostname_len; int cookie_len; r_cookie_t *c; hostname_len = strnlen (hostname, MAX_HOSTNAME_LEN) + 1; cookie_len = sizeof (r_cookie_t) - 1 + hostname_len; c = MPIU_Malloc (cookie_len); MPIU_ERR_CHKANDJUMP (c == NULL, mpi_errno, MPI_ERR_OTHER, "**nomem"); c->port = port; c->data_sz = data_sz; MPIU_Strncpy (c->hostname, hostname, hostname_len); *cookie = (char *)c; *len = sizeof (r_cookie_t) - 1 + hostname_len; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_scif_get_business_card(int my_rank, char **bc_val_p, int *val_max_sz_p) { int mpi_errno = MPI_SUCCESS; int str_errno = MPIU_STR_SUCCESS; int ret; char hostname[512]; uint16_t self; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_SCIF_GET_BUSINESS_CARD); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_SCIF_GET_BUSINESS_CARD); hostname[sizeof(hostname) - 1] = 0; gethostname(hostname, sizeof(hostname) - 1); str_errno = MPIU_Str_add_string_arg(bc_val_p, val_max_sz_p, MPIDI_CH3I_HOST_DESCRIPTION_KEY, hostname); if (str_errno) { MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len"); MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard"); } str_errno = MPIU_Str_add_int_arg(bc_val_p, val_max_sz_p, MPIDI_CH3I_PORT_KEY, listen_port); if (str_errno) { MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len"); MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard"); } ret = scif_get_nodeIDs(NULL, 0, &self); MPIU_ERR_CHKANDJUMP2(ret == -1, mpi_errno, MPI_ERR_OTHER, "**scif_get_nodeIDs", "**scif_get_nodeIDs %s %d", MPIU_Strerror(errno), errno); str_errno = MPIU_Str_add_int_arg(bc_val_p, val_max_sz_p, MPIDI_CH3I_NODE_KEY, self); if (str_errno) { MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len"); MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard"); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_SCIF_GET_BUSINESS_CARD); return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPI_File_create_errhandler - Create a file error handler Input Parameters: . file_errhandler_fn - user defined error handling procedure (function) Output Parameters: . errhandler - MPI error handler (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS @*/ int MPI_File_create_errhandler(MPI_File_errhandler_function *file_errhandler_fn, MPI_Errhandler *errhandler) { static const char FCNAME[] = "MPI_File_create_errhandler"; int mpi_errno = MPI_SUCCESS; MPID_Errhandler *errhan_ptr; MPID_MPI_STATE_DECL(MPID_STATE_MPI_FILE_CREATE_ERRHANDLER); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_FILE_CREATE_ERRHANDLER); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_ARGNULL(file_errhandler_fn, "file_errhandler_fn", mpi_errno); MPIR_ERRTEST_ARGNULL(errhandler, "errhandler", mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ errhan_ptr = (MPID_Errhandler *)MPIU_Handle_obj_alloc( &MPID_Errhandler_mem ); MPIU_ERR_CHKANDJUMP(!errhan_ptr,mpi_errno,MPI_ERR_OTHER,"**nomem"); errhan_ptr->language = MPID_LANG_C; errhan_ptr->kind = MPID_FILE; MPIU_Object_set_ref(errhan_ptr,1); errhan_ptr->errfn.C_File_Handler_function = file_errhandler_fn; MPIU_OBJ_PUBLISH_HANDLE(*errhandler, errhan_ptr->handle); /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_FILE_CREATE_ERRHANDLER); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_file_create_errhandler", "**mpi_file_create_errhandler %p %p", file_errhandler_fn, errhandler); } # endif mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3I_RMA_Cleanup_ops_aggressive(MPID_Win * win_ptr) { int i, local_completed = 0, remote_completed = 0; int mpi_errno = MPI_SUCCESS; MPIDI_RMA_Target_t *curr_target = NULL; int made_progress = 0; /* If we are in an aggressive cleanup, the window must be holding * up resources. If it isn't, we are in the wrong window and * incorrectly entered this function. */ MPIU_ERR_CHKANDJUMP(win_ptr->non_empty_slots == 0, mpi_errno, MPI_ERR_OTHER, "**rmanoop"); /* find the first target that has something to issue */ for (i = 0; i < win_ptr->num_slots; i++) { if (win_ptr->slots[i].target_list != NULL) { curr_target = win_ptr->slots[i].target_list; while (curr_target != NULL && curr_target->pending_op_list == NULL) curr_target = curr_target->next; if (curr_target != NULL) break; } } if (curr_target == NULL) goto fn_exit; if (curr_target->sync.sync_flag < MPIDI_RMA_SYNC_FLUSH_LOCAL) curr_target->sync.sync_flag = MPIDI_RMA_SYNC_FLUSH_LOCAL; /* Issue out all operations. */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, curr_target->target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); /* Wait for local completion. */ do { mpi_errno = MPIDI_CH3I_RMA_Cleanup_ops_target(win_ptr, curr_target, &local_completed, &remote_completed); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); if (!local_completed) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); } } while (!local_completed); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_mx_get_from_bc(const char *business_card, uint32_t *remote_endpoint_id, uint64_t *remote_nic_id) { int mpi_errno = MPI_SUCCESS; int str_errno = MPIU_STR_SUCCESS; int len; int tmp_endpoint_id; mpi_errno = MPIU_Str_get_int_arg(business_card, MPIDI_CH3I_ENDPOINT_KEY, &tmp_endpoint_id); /* FIXME: create a real error string for this */ MPIU_ERR_CHKANDJUMP(str_errno, mpi_errno, MPI_ERR_OTHER, "**argstr_hostd"); *remote_endpoint_id = (uint32_t)tmp_endpoint_id; mpi_errno = MPIU_Str_get_binary_arg (business_card, MPIDI_CH3I_NIC_KEY, (char *)remote_nic_id, sizeof(uint64_t), &len); /* FIXME: create a real error string for this */ MPIU_ERR_CHKANDJUMP(str_errno || len != sizeof(uint64_t), mpi_errno, MPI_ERR_OTHER, "**argstr_hostd"); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
static int read_s_cookie (MPID_IOV cookie, int *data_sz) { int mpi_errno = MPI_SUCCESS; MPIU_ERR_CHKANDJUMP (cookie.MPID_IOV_LEN != sizeof (data_sz), mpi_errno, MPI_ERR_OTHER, "**fail"); *data_sz = *(int *)cookie.MPID_IOV_BUF; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
/** * \brief MPI-PAMI glue for MPI_Win_allocate function * * Create a window object. Allocates a MPID_Win object and initializes it, * then allocates the collective info array, initalizes our entry, and * performs an Allgather to distribute/collect the rest of the array entries. * On each process, it allocates memory of at least size bytes, returns a * pointer to it, and returns a window object that can be used by all processes * in comm to * perform RMA operations. The returned memory consists of size * bytes local to each process, starting at address base_ptr and is associated * with the window as if the user called 'MPI_Win_create' on existing memory. * The size argument may be different at each process and size = 0 is valid; * however, a library might allocate and expose more memory in order to create * a fast, globally symmetric allocation. * Input Parameters: * \param[in] size size of window in bytes (nonnegative integer) * \param[in] disp_unit local unit size for displacements, in bytes (positive integer) * \param[in] info info argument (handle)) * \param[in] comm_ptr Communicator (handle) * \param[out] base_ptr - base address of the window in local memory * \param[out] win_ptr window object returned by the call (handle) * \return MPI_SUCCESS, MPI_ERR_ARG, MPI_ERR_COMM, MPI_ERR_INFO. MPI_ERR_OTHER, * MPI_ERR_SIZE */ int MPID_Win_allocate(MPI_Aint size, int disp_unit, MPID_Info * info, MPID_Comm * comm_ptr, void *base_ptr, MPID_Win ** win_ptr) { int mpi_errno = MPI_SUCCESS; int rc = MPI_SUCCESS; mpir_errflag_t errflag = MPIR_ERR_NONE; void *baseP; static char FCNAME[] = "MPID_Win_allocate"; MPIDI_Win_info *winfo; MPID_Win *win; int rank; rc=MPIDI_Win_init(size,disp_unit,win_ptr, info, comm_ptr, MPI_WIN_FLAVOR_ALLOCATE, MPI_WIN_UNIFIED); win = *win_ptr; if (size > 0) { baseP = MPIU_Malloc(size); #ifndef MPIDI_NO_ASSERT MPID_assert(baseP != NULL); #else MPIU_ERR_CHKANDJUMP((baseP == NULL), mpi_errno, MPI_ERR_BUFFER, "**bufnull"); #endif } else if (size == 0) { baseP = NULL; } else { MPIU_ERR_CHKANDSTMT(size >=0 , mpi_errno, MPI_ERR_SIZE, return mpi_errno, "**rmasize"); } win->base = baseP; rank = comm_ptr->rank; winfo = &win->mpid.info[rank]; winfo->base_addr = baseP; winfo->win = win; winfo->disp_unit = disp_unit; rc= MPIDI_Win_allgather(size,win_ptr); if (rc != MPI_SUCCESS) return rc; *(void**) base_ptr = (void *) win->base; mpi_errno = MPIR_Barrier_impl(comm_ptr, &errflag); fn_fail: return mpi_errno; }
/*@ MPI_Add_error_code - Add and MPI error code to an MPI error class Input Parameters: . errorclass - Error class to add an error code. Output Parameters: . errorcode - New error code for this error class. .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_OTHER @*/ int MPI_Add_error_code(int errorclass, int *errorcode) { static const char FCNAME[] = "MPI_Add_error_code"; int mpi_errno = MPI_SUCCESS; int new_code; MPID_MPI_STATE_DECL(MPID_STATE_MPI_ADD_ERROR_CODE); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_ADD_ERROR_CODE); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* FIXME: verify that errorclass is a dynamic class */ MPIR_ERRTEST_ARGNULL(errorcode, "errorcode", mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ new_code = MPIR_Err_add_code( errorclass ); MPIU_ERR_CHKANDJUMP(new_code<0,mpi_errno,MPI_ERR_OTHER,"**noerrcodes"); *errorcode = new_code; /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_ADD_ERROR_CODE); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_add_error_code", "**mpi_add_error_code %d %p", errorclass, errorcode); } # endif mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPID_nem_mx_get_business_card (int my_rank, char **bc_val_p, int *val_max_sz_p) { int mpi_errno = MPI_SUCCESS; int str_errno = MPIU_STR_SUCCESS; str_errno = MPIU_Str_add_int_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_ENDPOINT_KEY, MPID_nem_mx_local_endpoint_id); if (str_errno) { MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len"); MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard"); } str_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_NIC_KEY, (char *)&MPID_nem_mx_local_nic_id, sizeof(uint64_t)); if (str_errno) { MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len"); MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard"); } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_newmad_get_business_card (int my_rank, char **bc_val_p, int *val_max_sz_p) { int mpi_errno = MPI_SUCCESS; int str_errno = MPIU_STR_SUCCESS; str_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_URL_KEY, local_session_url, strlen(local_session_url)); if (str_errno) { MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len"); MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard"); } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
static int create_s_cookie (int data_sz, char **cookie, int *len) { int mpi_errno = MPI_SUCCESS; int *int_cookie; int_cookie = MPIU_Malloc (sizeof (data_sz)); MPIU_ERR_CHKANDJUMP (int_cookie == NULL, mpi_errno, MPI_ERR_OTHER, "**nomem"); *int_cookie = data_sz; *cookie = (char *)int_cookie; *len = sizeof (data_sz); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_Win_free(MPID_Win **win_ptr) { int mpi_errno=MPI_SUCCESS; int in_use; MPID_Comm *comm_ptr; MPIDI_STATE_DECL(MPID_STATE_MPIDI_WIN_FREE); MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPIDI_WIN_FREE); MPIU_ERR_CHKANDJUMP((*win_ptr)->epoch_state != MPIDI_EPOCH_NONE, mpi_errno, MPI_ERR_RMA_SYNC, "**rmasync"); mpi_errno = MPIDI_CH3I_Wait_for_pt_ops_finish(*win_ptr); if(mpi_errno) MPIU_ERR_POP(mpi_errno); comm_ptr = (*win_ptr)->comm_ptr; mpi_errno = MPIR_Comm_free_impl(comm_ptr); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPIU_Free((*win_ptr)->targets); MPIU_Free((*win_ptr)->base_addrs); MPIU_Free((*win_ptr)->sizes); MPIU_Free((*win_ptr)->disp_units); MPIU_Free((*win_ptr)->all_win_handles); MPIU_Free((*win_ptr)->pt_rma_puts_accs); /* Free the attached buffer for windows created with MPI_Win_allocate() */ if ((*win_ptr)->create_flavor == MPI_WIN_FLAVOR_ALLOCATE || (*win_ptr)->create_flavor == MPI_WIN_FLAVOR_SHARED) { if ((*win_ptr)->shm_allocated == FALSE && (*win_ptr)->size > 0) { MPIU_Free((*win_ptr)->base); } } MPIU_Object_release_ref(*win_ptr, &in_use); /* MPI windows don't have reference count semantics, so this should always be true */ MPIU_Assert(!in_use); MPIU_Handle_obj_free( &MPID_Win_mem, *win_ptr ); fn_exit: MPIDI_RMA_FUNC_EXIT(MPID_STATE_MPIDI_WIN_FREE); return mpi_errno; fn_fail: goto fn_exit; }
static int read_r_cookie (MPID_IOV cookie, char **hostname, int *port, int *data_sz) { int mpi_errno = MPI_SUCCESS; r_cookie_t *c; MPIU_ERR_CHKANDJUMP (cookie.MPID_IOV_LEN < sizeof (r_cookie_t), mpi_errno, MPI_ERR_OTHER, "**fail"); c = (r_cookie_t *)cookie.MPID_IOV_BUF; *hostname = c->hostname; *port = c->port; *data_sz = c->data_sz; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_ib_lmt_start_recv_core(struct MPID_Request *req, void *raddr, uint32_t rkey, void *write_to_buf) { int mpi_errno = MPI_SUCCESS; int ibcom_errno; struct MPIDI_VC *vc = req->ch.vc; MPID_nem_ib_vc_area *vc_ib = VC_IB(vc); MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_IB_LMT_START_RECV_CORE); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_IB_LMT_START_RECV_CORE); ibcom_errno = MPID_nem_ib_com_lrecv(vc_ib->sc->fd, (uint64_t) req, raddr, req->ch.lmt_data_sz, rkey, write_to_buf); MPID_nem_ib_ncqe += 1; //dprintf("start_recv,ncqe=%d\n", MPID_nem_ib_ncqe); MPIU_ERR_CHKANDJUMP(ibcom_errno, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_com_lrecv"); dprintf("lmt_start_recv_core,MPID_nem_ib_ncqe=%d\n", MPID_nem_ib_ncqe); dprintf ("lmt_start_recv_core,req=%p,sz=%ld,write_to_buf=%p,lmt_pack_buf=%p,user_buf=%p,raddr=%p,rkey=%08x,tail=%p=%02x\n", req, req->ch.lmt_data_sz, write_to_buf, REQ_FIELD(req, lmt_pack_buf), req->dev.user_buf, raddr, rkey, write_to_buf + req->ch.lmt_data_sz - sizeof(uint8_t), *((uint8_t *) (write_to_buf + req->ch.lmt_data_sz - sizeof(uint8_t)))); #ifdef MPID_NEM_IB_LMT_GET_CQE MPID_nem_ib_ncqe_to_drain += 1; /* use CQE instead of polling */ #else /* drain_scq and ib_poll is not ordered, so both can decrement ref_count */ MPIR_Request_add_ref(req); /* register to poll list in ib_poll() */ /* don't use req->dev.next because it causes unknown problem */ MPID_nem_ib_lmtq_enqueue(&MPID_nem_ib_lmtq, req); dprintf("lmt_start_recv_core,lmtq enqueue\n"); //volatile uint8_t* tailmagic = (uint8_t*)((void*)req->dev.user_buf + req->ch.lmt_data_sz - sizeof(uint8_t)); //dprintf("start_recv_core,cur_tail=%02x,lmt_receiver_tail=%02x\n", *tailmagic, REQ_FIELD(req, lmt_receiver_tail)); #endif fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_IB_LMT_START_RECV_CORE); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_ib_lmt_done_send(struct MPIDI_VC *vc, struct MPID_Request *req) { int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_IB_LMT_DONE_SEND); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_IB_LMT_DONE_SEND); dprintf("lmt_done_send,enter,%d<-%d,req=%p,REQ_FIELD(req, lmt_pack_buf)=%p\n", MPID_nem_ib_myrank, vc->pg_rank, req, REQ_FIELD(req, lmt_pack_buf)); /* free memory area for cookie */ if (!req->ch.s_cookie) { dprintf("lmt_done_send,enter,req->ch.s_cookie is zero"); } MPIU_Free(req->ch.s_cookie); //dprintf("lmt_done_send,free cookie,%p\n", req->ch.s_cookie); /* free temporal buffer for eager-send non-contiguous data. * MPIDI_CH3U_Recvq_FDU_or_AEP (in mpid_isend.c) sets req->dev.datatype */ int is_contig; MPID_Datatype_is_contig(req->dev.datatype, &is_contig); if (!is_contig && REQ_FIELD(req, lmt_pack_buf)) { dprintf("lmt_done_send,lmt-get,non-contiguous,free lmt_pack_buf\n"); #if 1 /* debug, enable again later */ MPIU_Free(REQ_FIELD(req, lmt_pack_buf)); #endif } /* mark completion on sreq */ MPIU_ERR_CHKANDJUMP(req->dev.OnDataAvail, mpi_errno, MPI_ERR_OTHER, "**MPID_nem_ib_lmt_done_send"); dprintf("lmt_done_send,1,req=%p,pcc=%d\n", req, MPIDI_CH3I_progress_completion_count.v); MPIDI_CH3U_Request_complete(req); dprintf("lmt_done_send,complete,req=%p\n", req); dprintf("lmt_done_send,2,req=%p,pcc=%d\n", req, MPIDI_CH3I_progress_completion_count.v); //dprintf("lmt_done_send, mark completion on sreq\n"); fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_IB_LMT_DONE_SEND); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_ptl_get_id_from_bc(const char *business_card, ptl_process_t *id, ptl_pt_index_t *pt, ptl_pt_index_t *ptg, ptl_pt_index_t *ptc, ptl_pt_index_t *ptr, ptl_pt_index_t *ptrg, ptl_pt_index_t *ptrc) { int mpi_errno = MPI_SUCCESS; int ret; int len; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_GET_ID_FROM_BC); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_GET_ID_FROM_BC); ret = MPIU_Str_get_binary_arg(business_card, NID_KEY, (char *)&id->phys.nid, sizeof(id->phys.nid), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(id->phys.nid), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); ret = MPIU_Str_get_binary_arg(business_card, PID_KEY, (char *)&id->phys.pid, sizeof(id->phys.pid), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(id->phys.pid), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); ret = MPIU_Str_get_binary_arg(business_card, PTI_KEY, (char *)pt, sizeof(pt), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(*pt), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); ret = MPIU_Str_get_binary_arg(business_card, PTIG_KEY, (char *)ptg, sizeof(ptg), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(*ptg), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); ret = MPIU_Str_get_binary_arg(business_card, PTIC_KEY, (char *)ptc, sizeof(ptc), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(*ptc), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); ret = MPIU_Str_get_binary_arg(business_card, PTIR_KEY, (char *)ptr, sizeof(ptr), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(*ptr), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); ret = MPIU_Str_get_binary_arg(business_card, PTIRG_KEY, (char *)ptrg, sizeof(ptr), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(*ptrc), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); ret = MPIU_Str_get_binary_arg(business_card, PTIRC_KEY, (char *)ptrc, sizeof(ptr), &len); MPIU_ERR_CHKANDJUMP(ret != MPIU_STR_SUCCESS || len != sizeof(*ptrc), mpi_errno, MPI_ERR_OTHER, "**badbusinesscard"); fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_PTL_GET_ID_FROM_BC); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_newmad_get_from_bc (const char *business_card, char *url) { int mpi_errno = MPI_SUCCESS; int str_errno = MPIU_STR_SUCCESS; int len; str_errno = MPIU_Str_get_binary_arg (business_card, MPIDI_CH3I_URL_KEY, url, MPID_NEM_NMAD_MAX_SIZE, &len); if (str_errno != MPIU_STR_SUCCESS) { /* FIXME: create a real error string for this */ MPIU_ERR_CHKANDJUMP(str_errno, mpi_errno, MPI_ERR_OTHER, "**argstr_hostd"); } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_nem_mxm_get_business_card(int my_rank, char **bc_val_p, int *val_max_sz_p) { int mpi_errno = MPI_SUCCESS; int str_errno = MPIU_STR_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MXM_GET_BUSINESS_CARD); MPIDI_FUNC_ENTER(MPID_STATE_MXM_GET_BUSINESS_CARD); str_errno = MPIU_Str_add_binary_arg(bc_val_p, val_max_sz_p, MXM_MPICH_ENDPOINT_KEY, _mxm_obj.mxm_ep_addr, _mxm_obj.mxm_ep_addr_size); if (str_errno) { MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len"); MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard"); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MXM_GET_BUSINESS_CARD); return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Comm_create_errhandler_impl(MPI_Comm_errhandler_function *comm_errhandler_fn, MPI_Errhandler *errhandler) { int mpi_errno = MPI_SUCCESS; MPID_Errhandler *errhan_ptr; errhan_ptr = (MPID_Errhandler *)MPIU_Handle_obj_alloc( &MPID_Errhandler_mem ); MPIU_ERR_CHKANDJUMP(!errhan_ptr, mpi_errno, MPI_ERR_OTHER, "**nomem"); errhan_ptr->language = MPID_LANG_C; errhan_ptr->kind = MPID_COMM; MPIU_Object_set_ref(errhan_ptr,1); errhan_ptr->errfn.C_Comm_Handler_function = comm_errhandler_fn; MPIU_OBJ_PUBLISH_HANDLE(*errhandler, errhan_ptr->handle); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Graph_neighbors_count_impl(MPID_Comm *comm_ptr, int rank, int *nneighbors) { int mpi_errno = MPI_SUCCESS; MPIR_Topology *graph_ptr; graph_ptr = MPIR_Topology_get( comm_ptr ); MPIU_ERR_CHKANDJUMP((!graph_ptr || graph_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo"); MPIU_ERR_CHKANDJUMP2((rank < 0 || rank >= graph_ptr->topo.graph.nnodes), mpi_errno, MPI_ERR_RANK, "**rank", "**rank %d %d", rank, graph_ptr->topo.graph.nnodes ); if ( rank == 0 ) *nneighbors = graph_ptr->topo.graph.index[rank]; else *nneighbors = graph_ptr->topo.graph.index[rank] - graph_ptr->topo.graph.index[rank-1]; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Comm_create_keyval_impl(MPI_Comm_copy_attr_function *comm_copy_attr_fn, MPI_Comm_delete_attr_function *comm_delete_attr_fn, int *comm_keyval, void *extra_state) { int mpi_errno = MPI_SUCCESS; MPID_Keyval *keyval_ptr; keyval_ptr = (MPID_Keyval *)MPIU_Handle_obj_alloc( &MPID_Keyval_mem ); MPIU_ERR_CHKANDJUMP(!keyval_ptr, mpi_errno, MPI_ERR_OTHER,"**nomem"); /* Initialize the attribute dup function */ if (!MPIR_Process.attr_dup) { MPIR_Process.attr_dup = MPIR_Attr_dup_list; MPIR_Process.attr_free = MPIR_Attr_delete_list; } /* The handle encodes the keyval kind. Modify it to have the correct field */ keyval_ptr->handle = (keyval_ptr->handle & ~(0x03c00000)) | (MPID_COMM << 22); MPIU_Object_set_ref(keyval_ptr,1); keyval_ptr->was_freed = 0; keyval_ptr->kind = MPID_COMM; keyval_ptr->extra_state = extra_state; keyval_ptr->copyfn.user_function = comm_copy_attr_fn; keyval_ptr->copyfn.proxy = MPIR_Attr_copy_c_proxy; keyval_ptr->delfn.user_function = comm_delete_attr_fn; keyval_ptr->delfn.proxy = MPIR_Attr_delete_c_proxy; MPIU_OBJ_PUBLISH_HANDLE(*comm_keyval, keyval_ptr->handle); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPI_Cart_shift - Returns the shifted source and destination ranks, given a shift direction and amount Input Parameters: + comm - communicator with cartesian structure (handle) . direction - coordinate dimension of shift (integer) - displ - displacement (> 0: upwards shift, < 0: downwards shift) (integer) Output Parameters: + source - rank of source process (integer) - dest - rank of destination process (integer) Notes: The 'direction' argument is in the range '[0,n-1]' for an n-dimensional Cartesian mesh. .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Cart_shift(MPI_Comm comm, int direction, int displ, int *source, int *dest) { int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPIR_Topology *cart_ptr; int i; int pos[MAX_CART_DIM]; int rank; MPID_MPI_STATE_DECL(MPID_STATE_MPI_CART_SHIFT); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_CART_SHIFT); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); /* If comm_ptr is not valid, it will be reset to null */ MPIR_ERRTEST_ARGNULL( source, "source", mpi_errno ); MPIR_ERRTEST_ARGNULL( dest, "dest", mpi_errno ); MPIR_ERRTEST_ARGNEG( direction, "direction", mpi_errno ); /* Nothing in the standard indicates that a zero displacement is not valid, so we don't check for a zero shift */ if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ cart_ptr = MPIR_Topology_get( comm_ptr ); MPIU_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo"); MPIU_ERR_CHKANDJUMP((cart_ptr->topo.cart.ndims == 0), mpi_errno, MPI_ERR_TOPOLOGY, "**dimszero"); MPIU_ERR_CHKANDJUMP2((direction >= cart_ptr->topo.cart.ndims), mpi_errno, MPI_ERR_ARG, "**dimsmany", "**dimsmany %d %d", cart_ptr->topo.cart.ndims, direction); /* Check for the case of a 0 displacement */ rank = comm_ptr->rank; if (displ == 0) { *source = *dest = rank; } else { /* To support advanced implementations that support MPI_Cart_create, we compute the new position and call PMPI_Cart_rank to get the source and destination. We could bypass that step if we know that the mapping is trivial. Copy the current position. */ for (i=0; i<cart_ptr->topo.cart.ndims; i++) { pos[i] = cart_ptr->topo.cart.position[i]; } /* We must return MPI_PROC_NULL if shifted over the edge of a non-periodic mesh */ pos[direction] += displ; if (!cart_ptr->topo.cart.periodic[direction] && (pos[direction] >= cart_ptr->topo.cart.dims[direction] || pos[direction] < 0)) { *dest = MPI_PROC_NULL; } else { MPIR_Cart_rank_impl( cart_ptr, pos, dest ); } pos[direction] = cart_ptr->topo.cart.position[direction] - displ; if (!cart_ptr->topo.cart.periodic[direction] && (pos[direction] >= cart_ptr->topo.cart.dims[direction] || pos[direction] < 0)) { *source = MPI_PROC_NULL; } else { MPIR_Cart_rank_impl( cart_ptr, pos, source ); } } /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_CART_SHIFT); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_cart_shift", "**mpi_cart_shift %C %d %d %p %p", comm, direction, displ, source, dest); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPI_Info_get_nthkey( MPI_Info info, int n, char *key ) { int mpi_errno = MPI_SUCCESS; MPID_Info *info_ptr=0; MPID_MPI_STATE_DECL(MPID_STATE_MPI_INFO_GET_NTHKEY); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_INFO_GET_NTHKEY); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_INFO(info, mpi_errno); if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Info_get_ptr( info, info_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate info_ptr */ MPID_Info_valid_ptr( info_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; MPIU_ERR_CHKANDJUMP((!key), mpi_errno, MPI_ERR_INFO_KEY, "**infokeynull"); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPIR_Info_get_nthkey_impl(info_ptr, n, key); if (mpi_errno) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_INFO_GET_NTHKEY); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_info_get_nthkey", "**mpi_info_get_nthkey %I %d %p", info, n, key); } # endif mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Comm_join - Create a communicator by joining two processes connected by a socket. Input Parameter: . fd - socket file descriptor Output Parameter: . intercomm - new intercommunicator (handle) Notes: The socket must be quiescent before 'MPI_COMM_JOIN' is called and after 'MPI_COMM_JOIN' returns. More specifically, on entry to 'MPI_COMM_JOIN', a read on the socket will not read any data that was written to the socket before the remote process called 'MPI_COMM_JOIN'. .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG @*/ int MPI_Comm_join(int fd, MPI_Comm *intercomm) { static const char FCNAME[] = "MPI_Comm_join"; int mpi_errno = MPI_SUCCESS, err; MPID_Comm *intercomm_ptr; char *local_port, *remote_port; MPIU_CHKLMEM_DECL(2); MPID_MPI_STATE_DECL(MPID_STATE_MPI_COMM_JOIN); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_COMM_JOIN); /* ... body of routine ... */ MPIU_CHKLMEM_MALLOC(local_port, char *, MPI_MAX_PORT_NAME, mpi_errno, "local port name"); MPIU_CHKLMEM_MALLOC(remote_port, char *, MPI_MAX_PORT_NAME, mpi_errno, "remote port name"); mpi_errno = MPIR_Open_port_impl(NULL, local_port); MPIU_ERR_CHKANDJUMP((mpi_errno != MPI_SUCCESS), mpi_errno, MPI_ERR_OTHER, "**openportfailed"); err = MPIR_fd_send(fd, local_port, MPI_MAX_PORT_NAME); MPIU_ERR_CHKANDJUMP1((err != 0), mpi_errno, MPI_ERR_INTERN, "**join_send", "**join_send %d", err); err = MPIR_fd_recv(fd, remote_port, MPI_MAX_PORT_NAME); MPIU_ERR_CHKANDJUMP1((err != 0), mpi_errno, MPI_ERR_INTERN, "**join_recv", "**join_recv %d", err); MPIU_ERR_CHKANDJUMP2((strcmp(local_port, remote_port) == 0), mpi_errno, MPI_ERR_INTERN, "**join_portname", "**join_portname %s %s", local_port, remote_port); if (strcmp(local_port, remote_port) < 0) { MPID_Comm *comm_self_ptr; MPID_Comm_get_ptr( MPI_COMM_SELF, comm_self_ptr ); mpi_errno = MPIR_Comm_accept_impl(local_port, NULL, 0, comm_self_ptr, &intercomm_ptr); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } else { MPID_Comm *comm_self_ptr; MPID_Comm_get_ptr( MPI_COMM_SELF, comm_self_ptr ); mpi_errno = MPIR_Comm_connect_impl(remote_port, NULL, 0, comm_self_ptr, &intercomm_ptr); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } mpi_errno = MPIR_Close_port_impl(local_port); if (mpi_errno) MPIU_ERR_POP(mpi_errno); MPIU_OBJ_PUBLISH_HANDLE(*intercomm, intercomm_ptr->handle); /* ... end of body of routine ... */ fn_exit: MPIU_CHKLMEM_FREEALL(); MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_COMM_JOIN); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_comm_join", "**mpi_comm_join %d %p", fd, intercomm); } # endif mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Type_create_keyval - Create an attribute keyval for MPI datatypes Input Parameters: + type_copy_attr_fn - copy callback function for type_keyval (function) . type_delete_attr_fn - delete callback function for type_keyval (function) - extra_state - extra state for callback functions Output Parameters: . type_keyval - key value for future access (integer) Notes: Default copy and delete functions are available. These are + MPI_TYPE_NULL_COPY_FN - empty copy function . MPI_TYPE_NULL_DELETE_FN - empty delete function - MPI_TYPE_DUP_FN - simple dup function .N AttrErrReturn .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_OTHER @*/ int MPI_Type_create_keyval(MPI_Type_copy_attr_function *type_copy_attr_fn, MPI_Type_delete_attr_function *type_delete_attr_fn, int *type_keyval, void *extra_state) { static const char FCNAME[] = "MPI_Type_create_keyval"; int mpi_errno = MPI_SUCCESS; MPID_Keyval *keyval_ptr; MPID_MPI_STATE_DECL(MPID_STATE_MPI_TYPE_CREATE_KEYVAL); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_TYPE_CREATE_KEYVAL); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_ARGNULL(type_keyval,"type_keyval",mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ keyval_ptr = (MPID_Keyval *)MPIU_Handle_obj_alloc( &MPID_Keyval_mem ); MPIU_ERR_CHKANDJUMP(!keyval_ptr,mpi_errno,MPI_ERR_OTHER,"**nomem"); /* Initialize the attribute dup function */ if (!MPIR_Process.attr_dup) { MPIR_Process.attr_dup = MPIR_Attr_dup_list; MPIR_Process.attr_free = MPIR_Attr_delete_list; } /* The handle encodes the keyval kind. Modify it to have the correct field */ keyval_ptr->handle = (keyval_ptr->handle & ~(0x03c00000)) | (MPID_DATATYPE << 22); MPIU_Object_set_ref(keyval_ptr,1); keyval_ptr->was_freed = 0; keyval_ptr->kind = MPID_DATATYPE; keyval_ptr->extra_state = extra_state; keyval_ptr->copyfn.user_function = type_copy_attr_fn; keyval_ptr->copyfn.proxy = MPIR_Attr_copy_c_proxy; keyval_ptr->delfn.user_function = type_delete_attr_fn; keyval_ptr->delfn.proxy = MPIR_Attr_delete_c_proxy; /* Tell finalize to check for attributes on permenant types */ MPIR_DatatypeAttrFinalize(); MPIU_OBJ_PUBLISH_HANDLE(*type_keyval, keyval_ptr->handle); /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_TYPE_CREATE_KEYVAL); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_type_create_keyval", "**mpi_type_create_keyval %p %p %p %p", type_copy_attr_fn, type_delete_attr_fn, type_keyval, extra_state); } # endif mpi_errno = MPIR_Err_return_comm( NULL, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3I_RMA_Cleanup_target_aggressive(MPID_Win * win_ptr, MPIDI_RMA_Target_t ** target) { int i, local_completed = 0, remote_completed = 0; int made_progress = 0; MPIDI_RMA_Target_t *curr_target = NULL; int mpi_errno = MPI_SUCCESS; (*target) = NULL; /* If we are in an aggressive cleanup, the window must be holding * up resources. If it isn't, we are in the wrong window and * incorrectly entered this function. */ MPIU_ERR_CHKANDJUMP(win_ptr->non_empty_slots == 0, mpi_errno, MPI_ERR_OTHER, "**rmanotarget"); if (win_ptr->states.access_state == MPIDI_RMA_LOCK_ALL_CALLED) { /* switch to window-wide protocol */ MPIDI_VC_t *orig_vc = NULL, *target_vc = NULL; MPIDI_Comm_get_vc(win_ptr->comm_ptr, win_ptr->comm_ptr->rank, &orig_vc); for (i = 0; i < win_ptr->comm_ptr->local_size; i++) { if (i == win_ptr->comm_ptr->rank) continue; MPIDI_Comm_get_vc(win_ptr->comm_ptr, i, &target_vc); if (orig_vc->node_id != target_vc->node_id) { mpi_errno = MPIDI_CH3I_Win_find_target(win_ptr, i, &curr_target); if (mpi_errno) MPIU_ERR_POP(mpi_errno); if (curr_target == NULL) { win_ptr->outstanding_locks++; mpi_errno = send_lock_msg(i, MPI_LOCK_SHARED, win_ptr); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); } } } win_ptr->states.access_state = MPIDI_RMA_LOCK_ALL_ISSUED; } do { /* find a non-empty slot and set the FLUSH flag on the first * target */ /* TODO: we should think about better strategies on selecting the target */ for (i = 0; i < win_ptr->num_slots; i++) if (win_ptr->slots[i].target_list != NULL) break; curr_target = win_ptr->slots[i].target_list; if (curr_target->sync.sync_flag < MPIDI_RMA_SYNC_FLUSH) { curr_target->sync.sync_flag = MPIDI_RMA_SYNC_FLUSH; curr_target->sync.have_remote_incomplete_ops = 0; curr_target->sync.outstanding_acks++; } /* Issue out all operations. */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, curr_target->target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); /* Wait for remote completion. */ do { mpi_errno = MPIDI_CH3I_RMA_Cleanup_ops_target(win_ptr, curr_target, &local_completed, &remote_completed); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); if (!remote_completed) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); } } while (!remote_completed); /* Cleanup the target. */ mpi_errno = MPIDI_CH3I_RMA_Cleanup_single_target(win_ptr, curr_target); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); /* check if we got a target */ (*target) = MPIDI_CH3I_Win_target_alloc(win_ptr); } while ((*target) == NULL); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3I_RMA_Free_ops_before_completion(MPID_Win * win_ptr) { MPIDI_RMA_Op_t *curr_op = NULL; MPIDI_RMA_Target_t *curr_target = NULL; MPIDI_RMA_Op_t **op_list = NULL, **op_list_tail = NULL; int read_flag = 0; int i, made_progress = 0; int mpi_errno = MPI_SUCCESS; /* If we are in an free_ops_before_completion, the window must be holding * up resources. If it isn't, we are in the wrong window and * incorrectly entered this function. */ MPIU_ERR_CHKANDJUMP(win_ptr->non_empty_slots == 0, mpi_errno, MPI_ERR_OTHER, "**rmanoop"); /* make nonblocking progress once */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_win(win_ptr, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); if (win_ptr->states.access_state == MPIDI_RMA_FENCE_ISSUED || win_ptr->states.access_state == MPIDI_RMA_PSCW_ISSUED || win_ptr->states.access_state == MPIDI_RMA_LOCK_ALL_ISSUED) goto fn_exit; /* find targets that have operations */ for (i = 0; i < win_ptr->num_slots; i++) { if (win_ptr->slots[i].target_list != NULL) { curr_target = win_ptr->slots[i].target_list; while (curr_target != NULL) { if (curr_target->read_op_list != NULL || curr_target->write_op_list != NULL) { if (win_ptr->states.access_state == MPIDI_RMA_PER_TARGET || win_ptr->states.access_state == MPIDI_RMA_LOCK_ALL_CALLED) { if (curr_target->access_state == MPIDI_RMA_LOCK_GRANTED) break; } else { break; } } curr_target = curr_target->next; } if (curr_target != NULL) break; } } if (curr_target == NULL) goto fn_exit; /* After we do this, all following Win_flush_local * must do a Win_flush instead. */ curr_target->disable_flush_local = 1; if (curr_target->read_op_list != NULL) { op_list = &curr_target->read_op_list; op_list_tail = &curr_target->read_op_list_tail; read_flag = 1; } else { op_list = &curr_target->write_op_list; op_list_tail = &curr_target->write_op_list_tail; } /* free all ops in the list since we do not need to maintain them anymore */ for (curr_op = *op_list; curr_op != NULL;) { if (curr_op->reqs_size > 0) { MPIU_Assert(curr_op->reqs != NULL); for (i = 0; i < curr_op->reqs_size; i++) { if (curr_op->reqs[i] != NULL) { MPID_Request_release(curr_op->reqs[i]); curr_op->reqs[i] = NULL; win_ptr->active_req_cnt--; } } /* free req array in this op */ MPIU_Free(curr_op->reqs); curr_op->reqs = NULL; curr_op->reqs_size = 0; } MPL_LL_DELETE(*op_list, *op_list_tail, curr_op); MPIDI_CH3I_Win_op_free(win_ptr, curr_op); if (*op_list == NULL) { if (read_flag == 1) { op_list = &curr_target->write_op_list; op_list = &curr_target->write_op_list_tail; read_flag = 0; } } curr_op = *op_list; } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPI_Cart_coords - Determines process coords in cartesian topology given rank in group Input Parameters: + comm - communicator with cartesian structure (handle) . rank - rank of a process within group of 'comm' (integer) - maxdims - length of vector 'coords' in the calling program (integer) Output Parameter: . coords - integer array (of size 'ndims') containing the Cartesian coordinates of specified process (integer) .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_RANK .N MPI_ERR_DIMS .N MPI_ERR_ARG @*/ int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int *coords) { static const char FCNAME[] = "MPI_Cart_coords"; int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPIR_Topology *cart_ptr; int i, nnodes; MPID_MPI_STATE_DECL(MPID_STATE_MPI_CART_COORDS); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_CART_COORDS); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); /* If comm_ptr is not valid, it will be reset to null */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_ERRTEST_RANK(comm_ptr, rank, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ cart_ptr = MPIR_Topology_get( comm_ptr ); # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIU_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo"); MPIU_ERR_CHKANDJUMP2((cart_ptr->topo.cart.ndims > maxdims), mpi_errno, MPI_ERR_ARG, "**dimsmany", "**dimsmany %d %d", cart_ptr->topo.cart.ndims, maxdims); if (cart_ptr->topo.cart.ndims) { MPIR_ERRTEST_ARGNULL(coords,"coords",mpi_errno); if (mpi_errno) goto fn_fail; } } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ /* Calculate coords */ nnodes = cart_ptr->topo.cart.nnodes; for ( i=0; i < cart_ptr->topo.cart.ndims; i++ ) { nnodes = nnodes / cart_ptr->topo.cart.dims[i]; coords[i] = rank / nnodes; rank = rank % nnodes; } /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_CART_COORDS); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_cart_coords", "**mpi_cart_coords %C %d %d %p", comm, rank, maxdims, coords); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }