static int split_type(MPID_Comm * comm_ptr, int stype, int key, MPID_Info *info_ptr, MPID_Comm ** newcomm_ptr) { MPID_Node_id_t id; MPIDI_Rank_t nid; int mpi_errno = MPI_SUCCESS; if (MPIDI_CH3I_Shm_supported()) { mpi_errno = MPID_Get_node_id(comm_ptr, comm_ptr->rank, &id); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else id = comm_ptr->rank; nid = (stype == MPI_COMM_TYPE_SHARED) ? id : MPI_UNDEFINED; mpi_errno = MPIR_Comm_split_impl(comm_ptr, nid, key, newcomm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: return mpi_errno; /* --BEGIN ERROR HANDLING-- */ fn_fail: goto fn_exit; /* --END ERROR HANDLING-- */ }
static int split_type(MPIR_Comm * user_comm_ptr, int stype, int key, MPIR_Info *info_ptr, MPIR_Comm ** newcomm_ptr) { MPIR_Comm *comm_ptr = NULL; int mpi_errno = MPI_SUCCESS; mpi_errno = MPIR_Comm_split_impl(user_comm_ptr, stype == MPI_UNDEFINED ? MPI_UNDEFINED : 0, key, &comm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (stype == MPI_UNDEFINED) { *newcomm_ptr = NULL; goto fn_exit; } if (stype == MPI_COMM_TYPE_SHARED) { if (MPIDI_CH3I_Shm_supported()) { mpi_errno = MPIR_Comm_split_type_node_topo(comm_ptr, stype, key, info_ptr, newcomm_ptr); } else { mpi_errno = MPIR_Comm_split_type_self(comm_ptr, stype, key, newcomm_ptr); } } else if (stype == MPIX_COMM_TYPE_NEIGHBORHOOD) { mpi_errno = MPIR_Comm_split_type_neighborhood(comm_ptr, stype, key, info_ptr, newcomm_ptr); } else { /* we don't know how to handle other split types; hand it back * to the upper layer */ mpi_errno = MPIR_Comm_split_type(comm_ptr, stype, key, info_ptr, newcomm_ptr); } if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: if (comm_ptr) MPIR_Comm_free_impl(comm_ptr); return mpi_errno; /* --BEGIN ERROR HANDLING-- */ fn_fail: goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIR_Graph_create( MPIR_Comm *comm_ptr, int nnodes, const int indx[], const int edges[], int reorder, MPI_Comm *comm_graph) { int mpi_errno = MPI_SUCCESS; int i, nedges; MPIR_Comm *newcomm_ptr = NULL; MPIR_Topology *graph_ptr = NULL; MPIR_CHKPMEM_DECL(3); /* Set this to null in case there is an error */ *comm_graph = MPI_COMM_NULL; /* Create a new communicator */ if (reorder) { int nrank; /* Allow the cart map routine to remap the assignment of ranks to processes */ mpi_errno = MPIR_Graph_map_impl(comm_ptr, nnodes, indx, edges, &nrank); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Create the new communicator with split, since we need to reorder the ranks (including the related internals, such as the connection tables */ mpi_errno = MPIR_Comm_split_impl( comm_ptr, nrank == MPI_UNDEFINED ? MPI_UNDEFINED : 1, nrank, &newcomm_ptr ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { /* Just use the first nnodes processes in the communicator */ mpi_errno = MPII_Comm_copy( (MPIR_Comm *)comm_ptr, nnodes, &newcomm_ptr ); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } /* If this process is not in the resulting communicator, return a null communicator and exit */ if (!newcomm_ptr) { *comm_graph = MPI_COMM_NULL; goto fn_exit; } nedges = indx[nnodes-1]; MPIR_CHKPMEM_MALLOC(graph_ptr,MPIR_Topology*,sizeof(MPIR_Topology), mpi_errno,"graph_ptr"); graph_ptr->kind = MPI_GRAPH; graph_ptr->topo.graph.nnodes = nnodes; graph_ptr->topo.graph.nedges = nedges; MPIR_CHKPMEM_MALLOC(graph_ptr->topo.graph.index,int*, nnodes*sizeof(int),mpi_errno,"graph.index"); MPIR_CHKPMEM_MALLOC(graph_ptr->topo.graph.edges,int*, nedges*sizeof(int),mpi_errno,"graph.edges"); for (i=0; i<nnodes; i++) graph_ptr->topo.graph.index[i] = indx[i]; for (i=0; i<nedges; i++) graph_ptr->topo.graph.edges[i] = edges[i]; /* Finally, place the topology onto the new communicator and return the handle */ mpi_errno = MPIR_Topology_put( newcomm_ptr, graph_ptr ); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_OBJ_PUBLISH_HANDLE(*comm_graph, newcomm_ptr->handle); /* ... end of body of routine ... */ fn_exit: return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ MPIR_CHKPMEM_REAP(); # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_graph_create", "**mpi_graph_create %C %d %p %p %d %p", comm_ptr->handle, nnodes, indx, edges, reorder, comm_graph); } # endif mpi_errno = MPIR_Err_return_comm( (MPIR_Comm*)comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Cart_sub - Partitions a communicator into subgroups which form lower-dimensional cartesian subgrids Input Parameters: + comm - communicator with cartesian structure (handle) - remain_dims - the 'i'th entry of remain_dims specifies whether the 'i'th dimension is kept in the subgrid (true) or is dropped (false) (logical vector) Output Parameters: . newcomm - communicator containing the subgrid that includes the calling process (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm) { int mpi_errno = MPI_SUCCESS, all_false; int ndims, key, color, ndims_in_subcomm, nnodes_in_subcomm, i, j, rank; MPID_Comm *comm_ptr = NULL, *newcomm_ptr; MPIR_Topology *topo_ptr, *toponew_ptr; MPIU_CHKPMEM_DECL(4); MPID_MPI_STATE_DECL(MPID_STATE_MPI_CART_SUB); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_CART_SUB); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE ); /* If comm_ptr is not valid, it will be reset to null */ if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ /* Check that the communicator already has a Cartesian topology */ topo_ptr = MPIR_Topology_get( comm_ptr ); MPIU_ERR_CHKANDJUMP(!topo_ptr,mpi_errno,MPI_ERR_TOPOLOGY,"**notopology"); MPIU_ERR_CHKANDJUMP(topo_ptr->kind != MPI_CART,mpi_errno,MPI_ERR_TOPOLOGY, "**notcarttopo"); ndims = topo_ptr->topo.cart.ndims; all_false = 1; /* all entries in remain_dims are false */ for (i=0; i<ndims; i++) { if (remain_dims[i]) { /* any 1 is true, set flag to 0 and break */ all_false = 0; break; } } if (all_false) { /* ndims=0, or all entries in remain_dims are false. MPI 2.1 says return a 0D Cartesian topology. */ mpi_errno = MPIR_Cart_create_impl(comm_ptr, 0, NULL, NULL, 0, newcomm); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } else { /* Determine the number of remaining dimensions */ ndims_in_subcomm = 0; nnodes_in_subcomm = 1; for (i=0; i<ndims; i++) { if (remain_dims[i]) { ndims_in_subcomm ++; nnodes_in_subcomm *= topo_ptr->topo.cart.dims[i]; } } /* Split this communicator. Do this even if there are no remaining dimensions so that the topology information is attached */ key = 0; color = 0; for (i=0; i<ndims; i++) { if (remain_dims[i]) { key = (key * topo_ptr->topo.cart.dims[i]) + topo_ptr->topo.cart.position[i]; } else { color = (color * topo_ptr->topo.cart.dims[i]) + topo_ptr->topo.cart.position[i]; } } mpi_errno = MPIR_Comm_split_impl( comm_ptr, color, key, &newcomm_ptr ); if (mpi_errno) MPIU_ERR_POP(mpi_errno); *newcomm = newcomm_ptr->handle; /* Save the topology of this new communicator */ MPIU_CHKPMEM_MALLOC(toponew_ptr,MPIR_Topology*,sizeof(MPIR_Topology), mpi_errno,"toponew_ptr"); toponew_ptr->kind = MPI_CART; toponew_ptr->topo.cart.ndims = ndims_in_subcomm; toponew_ptr->topo.cart.nnodes = nnodes_in_subcomm; if (ndims_in_subcomm) { MPIU_CHKPMEM_MALLOC(toponew_ptr->topo.cart.dims,int*, ndims_in_subcomm*sizeof(int),mpi_errno,"cart.dims"); MPIU_CHKPMEM_MALLOC(toponew_ptr->topo.cart.periodic,int*, ndims_in_subcomm*sizeof(int),mpi_errno,"cart.periodic"); MPIU_CHKPMEM_MALLOC(toponew_ptr->topo.cart.position,int*, ndims_in_subcomm*sizeof(int),mpi_errno,"cart.position"); } else { toponew_ptr->topo.cart.dims = 0; toponew_ptr->topo.cart.periodic = 0; toponew_ptr->topo.cart.position = 0; } j = 0; for (i=0; i<ndims; i++) { if (remain_dims[i]) { toponew_ptr->topo.cart.dims[j] = topo_ptr->topo.cart.dims[i]; toponew_ptr->topo.cart.periodic[j] = topo_ptr->topo.cart.periodic[i]; j++; } } /* Compute the position of this process in the new communicator */ rank = newcomm_ptr->rank; for (i=0; i<ndims_in_subcomm; i++) { nnodes_in_subcomm /= toponew_ptr->topo.cart.dims[i]; toponew_ptr->topo.cart.position[i] = rank / nnodes_in_subcomm; rank = rank % nnodes_in_subcomm; } mpi_errno = MPIR_Topology_put( newcomm_ptr, toponew_ptr ); if (mpi_errno) goto fn_fail; }
int MPIR_Comm_split_type_impl(MPIR_Comm * comm_ptr, int split_type, int key, MPIR_Info * info_ptr, MPIR_Comm ** newcomm_ptr) { int mpi_errno = MPI_SUCCESS; /* Only MPI_COMM_TYPE_SHARED, MPI_UNDEFINED, and * NEIGHBORHOOD are supported */ MPIR_Assert(split_type == MPI_COMM_TYPE_SHARED || split_type == MPI_UNDEFINED || split_type == MPIX_COMM_TYPE_NEIGHBORHOOD); if (split_type == MPIX_COMM_TYPE_NEIGHBORHOOD) { int flag; char hintval[MPI_MAX_INFO_VAL+1]; /* We plan on dispatching different NEIGHBORHOOD support to * different parts of MPICH, based on the key provided in the * info object. Right now, the one NEIGHBORHOOD we support is * "nbhd_common_dirname", implementation of which lives in ROMIO */ MPIR_Info_get_impl(info_ptr, "nbhd_common_dirname", MPI_MAX_INFO_VAL, hintval, &flag); if (flag) { #ifdef HAVE_ROMIO MPI_Comm dummycomm; MPIR_Comm * dummycomm_ptr; mpi_errno = MPIR_Comm_split_filesystem(comm_ptr->handle, key, hintval, &dummycomm); MPIR_Comm_get_ptr(dummycomm, dummycomm_ptr); *newcomm_ptr = dummycomm_ptr; goto fn_exit; #endif /* fall through to the "not supported" case if ROMIO was not * enabled for some reason */ } /* we don't work with other hints yet, but if we did (e.g. * nbhd_network, nbhd_partition), we'd do so here */ /* In the mean time, the user passed in COMM_TYPE_NEIGHBORHOOD * but did not give us an info we know how to work with. * Throw up our hands and treat it like UNDEFINED. This will * result in MPI_COMM_NULL being returned to the user. */ split_type = MPI_UNDEFINED; } if (MPIR_Comm_fns == NULL || MPIR_Comm_fns->split_type == NULL) { int color = (split_type == MPI_COMM_TYPE_SHARED) ? comm_ptr->rank : MPI_UNDEFINED; /* The default implementation is to either pass MPI_UNDEFINED * or the local rank as the color (in which case a dup of * MPI_COMM_SELF is returned) */ mpi_errno = MPIR_Comm_split_impl(comm_ptr, color, key, newcomm_ptr); } else { mpi_errno = MPIR_Comm_fns->split_type(comm_ptr, split_type, key, info_ptr, newcomm_ptr); } if (mpi_errno) MPIR_ERR_POP(mpi_errno); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }