int MPIR_Dist_graph_neighbors_impl(MPIR_Comm * comm_ptr, int maxindegree, int sources[], int sourceweights[], int maxoutdegree, int destinations[], int destweights[]) { int mpi_errno = MPI_SUCCESS; MPIR_Topology *topo_ptr = NULL; topo_ptr = MPIR_Topology_get(comm_ptr); MPIR_ERR_CHKANDJUMP(!topo_ptr || topo_ptr->kind != MPI_DIST_GRAPH, mpi_errno, MPI_ERR_TOPOLOGY, "**notdistgraphtopo"); MPIR_Memcpy(sources, topo_ptr->topo.dist_graph.in, maxindegree * sizeof(int)); MPIR_Memcpy(destinations, topo_ptr->topo.dist_graph.out, maxoutdegree * sizeof(int)); if (sourceweights != MPI_UNWEIGHTED && topo_ptr->topo.dist_graph.is_weighted) { MPIR_Memcpy(sourceweights, topo_ptr->topo.dist_graph.in_weights, maxindegree * sizeof(int)); } if (destweights != MPI_UNWEIGHTED && topo_ptr->topo.dist_graph.is_weighted) { MPIR_Memcpy(destweights, topo_ptr->topo.dist_graph.out_weights, maxoutdegree * sizeof(int)); } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Graph_neighbors_impl(MPID_Comm *comm_ptr, int rank, int maxneighbors, int neighbors[]) { int mpi_errno = MPI_SUCCESS; MPIR_Topology *graph_ptr; int i, is, ie; graph_ptr = MPIR_Topology_get(comm_ptr); MPIU_ERR_CHKANDJUMP((!graph_ptr || graph_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo"); MPIU_ERR_CHKANDJUMP2((rank < 0 || rank >= graph_ptr->topo.graph.nnodes), mpi_errno, MPI_ERR_RANK, "**rank", "**rank %d %d", rank, graph_ptr->topo.graph.nnodes); /* Get location in edges array of the neighbors of the specified rank */ if (rank == 0) is = 0; else is = graph_ptr->topo.graph.index[rank-1]; ie = graph_ptr->topo.graph.index[rank]; /* Get neighbors */ for (i=is; i<ie; i++) *neighbors++ = graph_ptr->topo.graph.edges[i]; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Cart_shift_impl(MPID_Comm *comm_ptr, int direction, int disp, int *rank_source, int *rank_dest) { int mpi_errno = MPI_SUCCESS; MPIR_Topology *cart_ptr; int i; int pos[MAX_CART_DIM]; cart_ptr = MPIR_Topology_get( comm_ptr ); MPIU_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo"); MPIU_ERR_CHKANDJUMP((cart_ptr->topo.cart.ndims == 0), mpi_errno, MPI_ERR_TOPOLOGY, "**dimszero"); MPIU_ERR_CHKANDJUMP2((direction >= cart_ptr->topo.cart.ndims), mpi_errno, MPI_ERR_ARG, "**dimsmany", "**dimsmany %d %d", cart_ptr->topo.cart.ndims, direction); /* Check for the case of a 0 displacement */ if (disp == 0) { *rank_source = *rank_dest = comm_ptr->rank; } else { /* To support advanced implementations that support MPI_Cart_create, we compute the new position and call PMPI_Cart_rank to get the source and destination. We could bypass that step if we know that the mapping is trivial. Copy the current position. */ for (i=0; i<cart_ptr->topo.cart.ndims; i++) { pos[i] = cart_ptr->topo.cart.position[i]; } /* We must return MPI_PROC_NULL if shifted over the edge of a non-periodic mesh */ pos[direction] += disp; if (!cart_ptr->topo.cart.periodic[direction] && (pos[direction] >= cart_ptr->topo.cart.dims[direction] || pos[direction] < 0)) { *rank_dest = MPI_PROC_NULL; } else { MPIR_Cart_rank_impl( cart_ptr, pos, rank_dest ); } pos[direction] = cart_ptr->topo.cart.position[direction] - disp; if (!cart_ptr->topo.cart.periodic[direction] && (pos[direction] >= cart_ptr->topo.cart.dims[direction] || pos[direction] < 0)) { *rank_source = MPI_PROC_NULL; } else { MPIR_Cart_rank_impl( cart_ptr, pos, rank_source ); } } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Graph_neighbors_count_impl(MPID_Comm *comm_ptr, int rank, int *nneighbors) { int mpi_errno = MPI_SUCCESS; MPIR_Topology *graph_ptr; graph_ptr = MPIR_Topology_get( comm_ptr ); MPIR_ERR_CHKANDJUMP((!graph_ptr || graph_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo"); MPIR_ERR_CHKANDJUMP2((rank < 0 || rank >= graph_ptr->topo.graph.nnodes), mpi_errno, MPI_ERR_RANK, "**rank", "**rank %d %d", rank, graph_ptr->topo.graph.nnodes ); if ( rank == 0 ) *nneighbors = graph_ptr->topo.graph.index[rank]; else *nneighbors = graph_ptr->topo.graph.index[rank] - graph_ptr->topo.graph.index[rank-1]; fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
/*@ MPI_Cart_shift - Returns the shifted source and destination ranks, given a shift direction and amount Input Parameters: + comm - communicator with cartesian structure (handle) . direction - coordinate dimension of shift (integer) - displ - displacement (> 0: upwards shift, < 0: downwards shift) (integer) Output Parameters: + source - rank of source process (integer) - dest - rank of destination process (integer) Notes: The 'direction' argument is in the range '[0,n-1]' for an n-dimensional Cartesian mesh. .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Cart_shift(MPI_Comm comm, int direction, int displ, int *source, int *dest) { int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPIR_Topology *cart_ptr; int i; int pos[MAX_CART_DIM]; int rank; MPID_MPI_STATE_DECL(MPID_STATE_MPI_CART_SHIFT); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_CART_SHIFT); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); /* If comm_ptr is not valid, it will be reset to null */ MPIR_ERRTEST_ARGNULL( source, "source", mpi_errno ); MPIR_ERRTEST_ARGNULL( dest, "dest", mpi_errno ); MPIR_ERRTEST_ARGNEG( direction, "direction", mpi_errno ); /* Nothing in the standard indicates that a zero displacement is not valid, so we don't check for a zero shift */ if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ cart_ptr = MPIR_Topology_get( comm_ptr ); MPIU_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo"); MPIU_ERR_CHKANDJUMP((cart_ptr->topo.cart.ndims == 0), mpi_errno, MPI_ERR_TOPOLOGY, "**dimszero"); MPIU_ERR_CHKANDJUMP2((direction >= cart_ptr->topo.cart.ndims), mpi_errno, MPI_ERR_ARG, "**dimsmany", "**dimsmany %d %d", cart_ptr->topo.cart.ndims, direction); /* Check for the case of a 0 displacement */ rank = comm_ptr->rank; if (displ == 0) { *source = *dest = rank; } else { /* To support advanced implementations that support MPI_Cart_create, we compute the new position and call PMPI_Cart_rank to get the source and destination. We could bypass that step if we know that the mapping is trivial. Copy the current position. */ for (i=0; i<cart_ptr->topo.cart.ndims; i++) { pos[i] = cart_ptr->topo.cart.position[i]; } /* We must return MPI_PROC_NULL if shifted over the edge of a non-periodic mesh */ pos[direction] += displ; if (!cart_ptr->topo.cart.periodic[direction] && (pos[direction] >= cart_ptr->topo.cart.dims[direction] || pos[direction] < 0)) { *dest = MPI_PROC_NULL; } else { MPIR_Cart_rank_impl( cart_ptr, pos, dest ); } pos[direction] = cart_ptr->topo.cart.position[direction] - displ; if (!cart_ptr->topo.cart.periodic[direction] && (pos[direction] >= cart_ptr->topo.cart.dims[direction] || pos[direction] < 0)) { *source = MPI_PROC_NULL; } else { MPIR_Cart_rank_impl( cart_ptr, pos, source ); } } /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_CART_SHIFT); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_cart_shift", "**mpi_cart_shift %C %d %d %p %p", comm, direction, displ, source, dest); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Cart_rank - Determines process rank in communicator given Cartesian location Input Parameters: + comm - communicator with cartesian structure (handle) - coords - integer array (of size 'ndims', the number of dimensions of the Cartesian topology associated with 'comm') specifying the cartesian coordinates of a process Output Parameters: . rank - rank of specified process (integer) Notes: Out-of-range coordinates are erroneous for non-periodic dimensions. Versions of MPICH before 1.2.2 returned 'MPI_PROC_NULL' for the rank in this case. .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_RANK .N MPI_ERR_ARG @*/ int MPI_Cart_rank(MPI_Comm comm, const int coords[], int *rank) { int mpi_errno = MPI_SUCCESS; MPIR_Comm *comm_ptr = NULL; MPIR_Topology *cart_ptr; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_CART_RANK); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_CART_RANK); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPIR_Comm_get_ptr( comm, comm_ptr ); # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPIR_Comm_valid_ptr( comm_ptr, mpi_errno, TRUE ); if (mpi_errno) goto fn_fail; /* If comm_ptr is not valid, it will be reset to null */ MPIR_ERRTEST_ARGNULL(rank,"rank",mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ cart_ptr = MPIR_Topology_get( comm_ptr ); MPIR_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo"); /* Validate coordinates */ # ifdef HAVE_ERROR_CHECKING { int i, ndims, coord; MPID_BEGIN_ERROR_CHECKS; { ndims = cart_ptr->topo.cart.ndims; if (ndims != 0) { MPIR_ERRTEST_ARGNULL(coords,"coords",mpi_errno); } for (i=0; i<ndims; i++) { if (!cart_ptr->topo.cart.periodic[i]) { coord = coords[i]; MPIR_ERR_CHKANDJUMP3( (coord < 0 || coord >= cart_ptr->topo.cart.dims[i] ), mpi_errno, MPI_ERR_ARG, "**cartcoordinvalid", "**cartcoordinvalid %d %d %d",i, coords[i], cart_ptr->topo.cart.dims[i]-1 ); } } } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ MPIR_Cart_rank_impl(cart_ptr, coords, rank); /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_CART_RANK); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_cart_rank", "**mpi_cart_rank %C %p %p", comm, coords, rank); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Dist_graph_neighbors - Provides adjacency information for a distributed graph topology. Input Parameters: + comm - communicator with distributed graph topology (handle) . maxindegree - size of sources and sourceweights arrays (non-negative integer) - maxoutdegree - size of destinations and destweights arrays (non-negative integer) Output Parameters: + sources - processes for which the calling process is a destination (array of non-negative integers) . sourceweights - weights of the edges into the calling process (array of non-negative integers) . destinations - processes for which the calling process is a source (array of non-negative integers) - destweights - weights of the edges out of the calling process (array of non-negative integers) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS @*/ int MPI_Dist_graph_neighbors(MPI_Comm comm, int maxindegree, int sources[], int sourceweights[], int maxoutdegree, int destinations[], int destweights[]) { int mpi_errno = MPI_SUCCESS; MPIR_Comm *comm_ptr = NULL; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_DIST_GRAPH_NEIGHBORS); MPIR_ERRTEST_INITIALIZED_ORDIE(); /* FIXME: Why does this routine need a CS */ MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_DIST_GRAPH_NEIGHBORS); /* Validate parameters, especially handles needing to be converted */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* Convert MPI object handles to object pointers */ MPIR_Comm_get_ptr(comm, comm_ptr); /* Validate parameters */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_Topology *topo_ptr = NULL; topo_ptr = MPIR_Topology_get(comm_ptr); MPIR_ERR_CHKANDJUMP(!topo_ptr || topo_ptr->kind != MPI_DIST_GRAPH, mpi_errno, MPI_ERR_TOPOLOGY, "**notdistgraphtopo"); MPIR_ERRTEST_ARGNEG(maxindegree, "maxindegree", mpi_errno); MPIR_ERRTEST_ARGNEG(maxoutdegree, "maxoutdegree", mpi_errno); MPIR_ERR_CHKANDJUMP3((maxindegree < topo_ptr->topo.dist_graph.indegree), mpi_errno, MPI_ERR_ARG, "**argtoosmall", "**argtoosmall %s %d %d", "maxindegree", maxindegree, topo_ptr->topo.dist_graph.indegree); MPIR_ERR_CHKANDJUMP3((maxoutdegree < topo_ptr->topo.dist_graph.outdegree), mpi_errno, MPI_ERR_ARG, "**argtoosmall", "**argtoosmall %s %d %d", "maxoutdegree", maxoutdegree, topo_ptr->topo.dist_graph.outdegree); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPIR_Dist_graph_neighbors_impl(comm_ptr, maxindegree, sources, sourceweights, maxoutdegree, destinations, destweights); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_DIST_GRAPH_NEIGHBORS); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ #ifdef HAVE_ERROR_CHECKING mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_dist_graph_neighbors", "**mpi_dist_graph_neighbors %C %d %p %p %d %p %p", comm, maxindegree, sources, sourceweights, maxoutdegree, destinations, destweights); #endif mpi_errno = MPIR_Err_return_comm(comm_ptr, FCNAME, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Topo_test - Determines the type of topology (if any) associated with a communicator Input Parameters: . comm - communicator (handle) Output Parameters: . status - topology type of communicator 'comm' (integer). If the communicator has no associated topology, returns 'MPI_UNDEFINED'. .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_ARG .seealso: MPI_Graph_create, MPI_Cart_create @*/ int MPI_Topo_test(MPI_Comm comm, int *status) { #ifdef HAVE_ERROR_CHECKING static const char FCNAME[] = "MPI_Topo_test"; #endif int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPIR_Topology *topo_ptr; MPID_MPI_STATE_DECL(MPID_STATE_MPI_TOPO_TEST); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_TOPO_TEST); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno, TRUE ); if (mpi_errno) goto fn_fail; /* If comm_ptr is not valid, it will be reset to null */ MPIR_ERRTEST_ARGNULL(status, "status", mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ topo_ptr = MPIR_Topology_get( comm_ptr ); if (topo_ptr) { *status = (int)(topo_ptr->kind); } else { *status = MPI_UNDEFINED; } /* ... end of body of routine ... */ #ifdef HAVE_ERROR_CHECKING fn_exit: #endif MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_TOPO_TEST); return mpi_errno; /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING fn_fail: { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_topo_test", "**mpi_topo_test %C %p", comm, status); } mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; # endif /* --END ERROR HANDLING-- */ }
/*@ MPI_Cartdim_get - Retrieves Cartesian topology information associated with a communicator Input Parameters: . comm - communicator with cartesian structure (handle) Output Parameters: . ndims - number of dimensions of the cartesian structure (integer) .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Cartdim_get(MPI_Comm comm, int *ndims) { static const char FCNAME[] = "MPI_Cartdim_get"; int mpi_errno = MPI_SUCCESS; MPIR_Comm *comm_ptr = NULL; MPIR_Topology *cart_ptr; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_CARTDIM_GET); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_CARTDIM_GET); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPIR_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_ARGNULL(ndims,"ndims",mpi_errno); /* Validate comm_ptr */ MPIR_Comm_valid_ptr( comm_ptr, mpi_errno, TRUE ); /* If comm_ptr is not valid, it will be reset to null */ if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ cart_ptr = MPIR_Topology_get( comm_ptr ); MPIR_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo"); *ndims = cart_ptr->topo.cart.ndims; /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_CARTDIM_GET); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_cartdim_get", "**mpi_cartdim_get %C %p", comm, ndims); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Cart_coords - Determines process coords in cartesian topology given rank in group Input Parameters: + comm - communicator with cartesian structure (handle) . rank - rank of a process within group of 'comm' (integer) - maxdims - length of vector 'coords' in the calling program (integer) Output Parameter: . coords - integer array (of size 'ndims') containing the Cartesian coordinates of specified process (integer) .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_RANK .N MPI_ERR_DIMS .N MPI_ERR_ARG @*/ int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int *coords) { static const char FCNAME[] = "MPI_Cart_coords"; int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPIR_Topology *cart_ptr; int i, nnodes; MPID_MPI_STATE_DECL(MPID_STATE_MPI_CART_COORDS); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_CART_COORDS); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); /* If comm_ptr is not valid, it will be reset to null */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_ERRTEST_RANK(comm_ptr, rank, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ cart_ptr = MPIR_Topology_get( comm_ptr ); # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIU_ERR_CHKANDJUMP((!cart_ptr || cart_ptr->kind != MPI_CART), mpi_errno, MPI_ERR_TOPOLOGY, "**notcarttopo"); MPIU_ERR_CHKANDJUMP2((cart_ptr->topo.cart.ndims > maxdims), mpi_errno, MPI_ERR_ARG, "**dimsmany", "**dimsmany %d %d", cart_ptr->topo.cart.ndims, maxdims); if (cart_ptr->topo.cart.ndims) { MPIR_ERRTEST_ARGNULL(coords,"coords",mpi_errno); if (mpi_errno) goto fn_fail; } } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ /* Calculate coords */ nnodes = cart_ptr->topo.cart.nnodes; for ( i=0; i < cart_ptr->topo.cart.ndims; i++ ) { nnodes = nnodes / cart_ptr->topo.cart.dims[i]; coords[i] = rank / nnodes; rank = rank % nnodes; } /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_CART_COORDS); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_cart_coords", "**mpi_cart_coords %C %d %d %p", comm, rank, maxdims, coords); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Graph_get - Retrieves graph topology information associated with a communicator Input Parameters: + comm - communicator with graph structure (handle) . maxindex - length of vector 'indx' in the calling program (integer) - maxedges - length of vector 'edges' in the calling program (integer) Output Parameters: + indx - array of integers containing the graph structure (for details see the definition of 'MPI_GRAPH_CREATE') - edges - array of integers containing the graph structure .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, int indx[], int edges[]) { int mpi_errno = MPI_SUCCESS; MPIR_Comm *comm_ptr = NULL; MPIR_Topology *topo_ptr; int i, n, *vals; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_GRAPH_GET); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_GRAPH_GET); /* Validate parameters, especially handles needing to be converted */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* Convert MPI object handles to object pointers */ MPIR_Comm_get_ptr(comm, comm_ptr); /* Validate parameters and objects (post conversion) */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPIR_Comm_valid_ptr(comm_ptr, mpi_errno, TRUE); if (mpi_errno) goto fn_fail; /* If comm_ptr is not valid, it will be reset to null */ MPIR_ERRTEST_ARGNULL(edges, "edges", mpi_errno); MPIR_ERRTEST_ARGNULL(indx, "indx", mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ topo_ptr = MPIR_Topology_get(comm_ptr); MPIR_ERR_CHKANDJUMP((!topo_ptr || topo_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo"); MPIR_ERR_CHKANDJUMP3((topo_ptr->topo.graph.nnodes > maxindex), mpi_errno, MPI_ERR_ARG, "**argtoosmall", "**argtoosmall %s %d %d", "maxindex", maxindex, topo_ptr->topo.graph.nnodes); MPIR_ERR_CHKANDJUMP3((topo_ptr->topo.graph.nedges > maxedges), mpi_errno, MPI_ERR_ARG, "**argtoosmall", "**argtoosmall %s %d %d", "maxedges", maxedges, topo_ptr->topo.graph.nedges); /* Get index */ n = topo_ptr->topo.graph.nnodes; vals = topo_ptr->topo.graph.index; for (i = 0; i < n; i++) *indx++ = *vals++; /* Get edges */ n = topo_ptr->topo.graph.nedges; vals = topo_ptr->topo.graph.edges; for (i = 0; i < n; i++) *edges++ = *vals++; /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_GRAPH_GET); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ #ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_graph_get", "**mpi_graph_get %C %d %d %p %p", comm, maxindex, maxedges, indx, edges); } #endif mpi_errno = MPIR_Err_return_comm(comm_ptr, FCNAME, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Cart_sub - Partitions a communicator into subgroups which form lower-dimensional cartesian subgrids Input Parameters: + comm - communicator with cartesian structure (handle) - remain_dims - the 'i'th entry of remain_dims specifies whether the 'i'th dimension is kept in the subgrid (true) or is dropped (false) (logical vector) Output Parameters: . newcomm - communicator containing the subgrid that includes the calling process (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm) { int mpi_errno = MPI_SUCCESS, all_false; int ndims, key, color, ndims_in_subcomm, nnodes_in_subcomm, i, j, rank; MPID_Comm *comm_ptr = NULL, *newcomm_ptr; MPIR_Topology *topo_ptr, *toponew_ptr; MPIU_CHKPMEM_DECL(4); MPID_MPI_STATE_DECL(MPID_STATE_MPI_CART_SUB); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_CART_SUB); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE ); /* If comm_ptr is not valid, it will be reset to null */ if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ /* Check that the communicator already has a Cartesian topology */ topo_ptr = MPIR_Topology_get( comm_ptr ); MPIU_ERR_CHKANDJUMP(!topo_ptr,mpi_errno,MPI_ERR_TOPOLOGY,"**notopology"); MPIU_ERR_CHKANDJUMP(topo_ptr->kind != MPI_CART,mpi_errno,MPI_ERR_TOPOLOGY, "**notcarttopo"); ndims = topo_ptr->topo.cart.ndims; all_false = 1; /* all entries in remain_dims are false */ for (i=0; i<ndims; i++) { if (remain_dims[i]) { /* any 1 is true, set flag to 0 and break */ all_false = 0; break; } } if (all_false) { /* ndims=0, or all entries in remain_dims are false. MPI 2.1 says return a 0D Cartesian topology. */ mpi_errno = MPIR_Cart_create_impl(comm_ptr, 0, NULL, NULL, 0, newcomm); if (mpi_errno) MPIU_ERR_POP(mpi_errno); } else { /* Determine the number of remaining dimensions */ ndims_in_subcomm = 0; nnodes_in_subcomm = 1; for (i=0; i<ndims; i++) { if (remain_dims[i]) { ndims_in_subcomm ++; nnodes_in_subcomm *= topo_ptr->topo.cart.dims[i]; } } /* Split this communicator. Do this even if there are no remaining dimensions so that the topology information is attached */ key = 0; color = 0; for (i=0; i<ndims; i++) { if (remain_dims[i]) { key = (key * topo_ptr->topo.cart.dims[i]) + topo_ptr->topo.cart.position[i]; } else { color = (color * topo_ptr->topo.cart.dims[i]) + topo_ptr->topo.cart.position[i]; } } mpi_errno = MPIR_Comm_split_impl( comm_ptr, color, key, &newcomm_ptr ); if (mpi_errno) MPIU_ERR_POP(mpi_errno); *newcomm = newcomm_ptr->handle; /* Save the topology of this new communicator */ MPIU_CHKPMEM_MALLOC(toponew_ptr,MPIR_Topology*,sizeof(MPIR_Topology), mpi_errno,"toponew_ptr"); toponew_ptr->kind = MPI_CART; toponew_ptr->topo.cart.ndims = ndims_in_subcomm; toponew_ptr->topo.cart.nnodes = nnodes_in_subcomm; if (ndims_in_subcomm) { MPIU_CHKPMEM_MALLOC(toponew_ptr->topo.cart.dims,int*, ndims_in_subcomm*sizeof(int),mpi_errno,"cart.dims"); MPIU_CHKPMEM_MALLOC(toponew_ptr->topo.cart.periodic,int*, ndims_in_subcomm*sizeof(int),mpi_errno,"cart.periodic"); MPIU_CHKPMEM_MALLOC(toponew_ptr->topo.cart.position,int*, ndims_in_subcomm*sizeof(int),mpi_errno,"cart.position"); } else { toponew_ptr->topo.cart.dims = 0; toponew_ptr->topo.cart.periodic = 0; toponew_ptr->topo.cart.position = 0; } j = 0; for (i=0; i<ndims; i++) { if (remain_dims[i]) { toponew_ptr->topo.cart.dims[j] = topo_ptr->topo.cart.dims[i]; toponew_ptr->topo.cart.periodic[j] = topo_ptr->topo.cart.periodic[i]; j++; } } /* Compute the position of this process in the new communicator */ rank = newcomm_ptr->rank; for (i=0; i<ndims_in_subcomm; i++) { nnodes_in_subcomm /= toponew_ptr->topo.cart.dims[i]; toponew_ptr->topo.cart.position[i] = rank / nnodes_in_subcomm; rank = rank % nnodes_in_subcomm; } mpi_errno = MPIR_Topology_put( newcomm_ptr, toponew_ptr ); if (mpi_errno) goto fn_fail; }
/*@ MPI_Graphdims_get - Retrieves graph topology information associated with a communicator Input Parameters: . comm - communicator for group with graph structure (handle) Output Parameters: + nnodes - number of nodes in graph (integer) - nedges - number of edges in graph (integer) .N SignalSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Graphdims_get(MPI_Comm comm, int *nnodes, int *nedges) { static const char FCNAME[] = "MPI_Graphdims_get"; int mpi_errno = MPI_SUCCESS; MPID_Comm *comm_ptr = NULL; MPIR_Topology *topo_ptr; MPID_MPI_STATE_DECL(MPID_STATE_MPI_GRAPHDIMS_GET); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_GRAPHDIMS_GET); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate comm_ptr */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; /* If comm_ptr is not valid, it will be reset to null */ MPIR_ERRTEST_ARGNULL(nnodes, "nnodes", mpi_errno ); MPIR_ERRTEST_ARGNULL(nedges, "nedges", mpi_errno ); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ topo_ptr = MPIR_Topology_get( comm_ptr ); MPIU_ERR_CHKANDJUMP((!topo_ptr || topo_ptr->kind != MPI_GRAPH), mpi_errno, MPI_ERR_TOPOLOGY, "**notgraphtopo"); /* Set nnodes */ *nnodes = topo_ptr->topo.graph.nnodes; /* Set nedges */ *nedges = topo_ptr->topo.graph.nedges; /* ... end of body of routine ... */ fn_exit: MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_GRAPHDIMS_GET); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_graphdims_get", "**mpi_graphdims_get %C %p %p", comm, nnodes, nedges); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }