/*@ MPI_Op_free - Frees a user-defined combination function handle Input Parameter: . op - operation (handle) Notes: 'op' is set to 'MPI_OP_NULL' on exit. .N NULL .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_PERM_OP .seealso: MPI_Op_create @*/ EXPORT_MPI_API int MPI_Op_free( MPI_Op *op ) { int mpi_errno = MPI_SUCCESS; struct MPIR_OP *old; static char myname[] = "MPI_OP_FREE"; #ifndef MPIR_NO_ERROR_CHECKING /* Freeing a NULL op should not return successfully */ MPIR_TEST_ARG(op); if ( (*op) == MPI_OP_NULL ) { mpi_errno = MPIR_ERRCLASS_TO_CODE(MPI_ERR_OP,MPIR_ERR_OP_NULL); } if (mpi_errno) return MPIR_ERROR(MPIR_COMM_WORLD, mpi_errno, myname ); #endif old = MPIR_GET_OP_PTR( *op ); MPIR_TEST_MPI_OP(*op,old,MPIR_COMM_WORLD,myname); /* We can't free permanent objects unless finalize has been called */ if ( ( old->permanent == 1 ) && (MPIR_Has_been_initialized == 1) ) return MPIR_ERROR( MPIR_COMM_WORLD, MPIR_ERRCLASS_TO_CODE(MPI_ERR_ARG,MPIR_ERR_PERM_OP),myname ); MPIR_CLR_COOKIE(old); FREE( old ); MPIR_RmPointer( *op ); (*op) = MPI_OP_NULL; TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Graph_get - Retrieves graph topology information associated with a communicator Input Parameters: + comm - communicator with graph structure (handle) . maxindex - length of vector 'index' in the calling program (integer) - maxedges - length of vector 'edges' in the calling program (integer) Output Parameter: + index - array of integers containing the graph structure (for details see the definition of 'MPI_GRAPH_CREATE') - edges - array of integers containing the graph structure .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Graph_get ( MPI_Comm comm, int maxindex, int maxedges, int *index, int *edges ) { int i, num, flag; int *array; int mpi_errno = MPI_SUCCESS; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_GRAPH_GET"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_ARG(index); MPIR_TEST_ARG(edges); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* Get topology information from the communicator */ MPIR_ERROR_PUSH(comm_ptr); mpi_errno = MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag ); MPIR_ERROR_POP(comm_ptr); if ( ( (flag != 1) && (mpi_errno = MPI_ERR_TOPOLOGY) ) || ( (topo->type != MPI_GRAPH) && (mpi_errno = MPI_ERR_TOPOLOGY) ) ) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); /* Get index */ num = topo->graph.nnodes; array = topo->graph.index; if ( index != (int *)0 ) for ( i=0; (i<maxindex) && (i<num); i++ ) (*index++) = (*array++); /* Get edges */ num = topo->graph.nedges; array = topo->graph.edges; if ( edges != (int *)0 ) for ( i=0; (i<maxedges) && (i<num); i++ ) (*edges++) = (*array++); TR_POP; return (mpi_errno); }
/*@ MPI_Graphdims_get - Retrieves graph topology information associated with a communicator Input Parameters: . comm - communicator for group with graph structure (handle) Output Parameter: + nnodes - number of nodes in graph (integer) - nedges - number of edges in graph (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ EXPORT_MPI_API int MPI_Graphdims_get ( MPI_Comm comm, int *nnodes, int *nedges ) { int mpi_errno = MPI_SUCCESS, flag; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_GRAPHDIMS_GET"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_ARG(nnodes); MPIR_TEST_ARG(nedges); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* Get topology information from the communicator */ MPIR_ERROR_PUSH( comm_ptr ); mpi_errno = MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag ); MPIR_ERROR_POP( comm_ptr ); if (mpi_errno) { return MPIR_ERROR( comm_ptr, mpi_errno, myname ); } /* Set nnodes */ if ( nnodes != (int *)0 ) if ( (flag == 1) && (topo->type == MPI_GRAPH) ) (*nnodes) = topo->graph.nnodes; else (*nnodes) = MPI_UNDEFINED; /* Set nedges */ if ( nedges != (int *)0 ) if ( (flag == 1) && (topo->type == MPI_GRAPH) ) (*nedges) = topo->graph.nedges; else (*nedges) = MPI_UNDEFINED; TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Graph_map - Maps process to graph topology information Input Parameters: + comm - input communicator (handle) . nnodes - number of graph nodes (integer) . index - integer array specifying the graph structure, see 'MPI_GRAPH_CREATE' - edges - integer array specifying the graph structure Output Parameter: . newrank - reordered rank of the calling process; 'MPI_UNDEFINED' if the calling process does not belong to graph (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ EXPORT_MPI_API int MPI_Graph_map ( MPI_Comm comm_old, int nnodes, int *index, int *edges, int *newrank ) { int rank, size; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_old_ptr; static char myname[] = "MPI_GRAPH_MAP"; TR_PUSH(myname); comm_old_ptr = MPIR_GET_COMM_PTR(comm_old); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname); if (nnodes < 1) mpi_errno = MPI_ERR_ARG; MPIR_TEST_ARG(newrank); MPIR_TEST_ARG(index); MPIR_TEST_ARG(edges); if (mpi_errno) return MPIR_ERROR(comm_old_ptr, mpi_errno, myname ); #endif /* Test that the communicator is large enough */ MPIR_Comm_size( comm_old_ptr, &size ); if (size < nnodes) { return MPIR_ERROR( comm_old_ptr, MPI_ERR_ARG, myname ); } /* Am I in this topology? */ MPIR_Comm_rank ( comm_old_ptr, &rank ); if ( rank < nnodes ) (*newrank) = rank; else (*newrank) = MPI_UNDEFINED; TR_POP; return (mpi_errno); }
/*@ MPI_Keyval_free - Frees attribute key for communicator cache attribute Input Parameter: . keyval - Frees the integer key value (integer) Note: Key values are global (they can be used with any and all communicators) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_PERM_KEY .seealso: MPI_Keyval_create @*/ EXPORT_MPI_API int MPI_Keyval_free ( int *keyval ) { int mpi_errno = MPI_SUCCESS; MPIR_Attr_key *attr_key; static char myname[] = "MPI_KEYVAL_FREE"; #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_ARG(keyval); if (*keyval == MPI_KEYVAL_INVALID) { /* Can't free an invalid keyval */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_KEYVAL, myname, (char *)0, (char *)0 ); } if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); #endif attr_key = MPIR_GET_KEYVAL_PTR( *keyval ); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_KEYVAL(*keyval,attr_key,MPIR_COMM_WORLD,myname); if ( (attr_key->permanent == 1) && (MPIR_Has_been_initialized == 1) ){ mpi_errno = MPIR_ERRCLASS_TO_CODE(MPI_ERR_ARG,MPIR_ERR_PERM_KEY); } if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); #endif if (attr_key->ref_count <= 1) { MPIR_CLR_COOKIE(attr_key); FREE ( attr_key ); MPIR_RmPointer( *keyval ); } else { MPIR_REF_DECR(attr_key); #ifdef FOO /* Debugging only */ if (MPIR_Has_been_initialized != 1) PRINTF( "attr_key count is %d\n", attr_key->ref_count ); #endif } (*keyval) = MPI_KEYVAL_INVALID; return (MPI_SUCCESS); }
/*@ MPI_Type_size - Return the number of bytes occupied by entries in the datatype Input Parameters: . datatype - datatype (handle) Output Parameter: . size - datatype size (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_size ( MPI_Datatype datatype, int *size ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_SIZE"; TR_PUSH(myname); MPIR_TEST_ARG(size); if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /* Assign the size and return */ (*size) = (int)(dtype_ptr->size); TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Type_ub - Returns the upper bound of a datatype Input Parameters: . datatype - datatype (handle) Output Parameter: . displacement - displacement of upper bound from origin, in bytes (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_ub ( MPI_Datatype datatype, MPI_Aint *displacement ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_UB"; TR_PUSH(myname); MPIR_TEST_ARG(displacement); if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); /* Assign the ub and return */ dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD, myname); (*displacement) = dtype_ptr->ub; TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Type_create_subarray - Creates a datatype describing a subarray of a multidimensional array Input Parameters: + ndims - number of array dimensions (positive integer) . array_of_sizes - number of elements of type oldtype in each dimension of the full array (array of positive integers) . array_of_subsizes - number of elements of type oldtype in each dimension of the subarray (array of positive integers) . array_of_starts - starting coordinates of the subarray in each dimension (array of nonnegative integers) . order - array storage order flag (state) - oldtype - old datatype (handle) Output Parameters: . newtype - new datatype (handle) .N fortran @*/ EXPORT_MPI_API int MPI_Type_create_subarray( int ndims, int *array_of_sizes, int *array_of_subsizes, int *array_of_starts, int order, MPI_Datatype oldtype, MPI_Datatype *newtype) { MPI_Aint extent, disps[3], size; int i, blklens[3]; MPI_Datatype tmp1, tmp2, types[3]; int mpi_errno = 0; static char myname[] = "MPI_TYPE_CREATE_SUBARRAY"; if (ndims <= 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_NAMED, myname, (char *)0, "Invalid %s argument = %d", "ndims", ndims ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } MPIR_TEST_ARG(array_of_sizes); MPIR_TEST_ARG(array_of_subsizes); MPIR_TEST_ARG(array_of_starts); if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); for (i=0; i<ndims; i++) { if (array_of_sizes[i] <= 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ARRAY_VAL, myname, (char *)0, (char *)0, "array_of_sizes", i, array_of_sizes[i] ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } if (array_of_subsizes[i] <= 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ARRAY_VAL, myname, (char *)0, (char *)0, "array_of_subsizes", i, array_of_subsizes[i] ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } if (array_of_starts[i] < 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ARRAY_VAL, myname, (char *)0, (char *)0, "array_of_starts", i, array_of_starts[i] ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } } /* order argument checked below */ if (oldtype == MPI_DATATYPE_NULL) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_TYPE, MPIR_ERR_TYPE_NULL, myname, (char *)0, (char *)0 ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } MPI_Type_extent(oldtype, &extent); if (order == MPI_ORDER_FORTRAN) { /* dimension 0 changes fastest */ if (ndims == 1) MPI_Type_contiguous(array_of_subsizes[0], oldtype, &tmp1); else { MPI_Type_vector(array_of_subsizes[1], array_of_subsizes[0], array_of_sizes[0], oldtype, &tmp1); size = array_of_sizes[0]*extent; for (i=2; i<ndims; i++) { size *= array_of_sizes[i-1]; MPI_Type_hvector(array_of_subsizes[i], 1, size, tmp1, &tmp2); MPI_Type_free(&tmp1); tmp1 = tmp2; } } /* add displacement and UB */ disps[1] = array_of_starts[0]; size = 1; for (i=1; i<ndims; i++) { size *= array_of_sizes[i-1]; disps[1] += size*array_of_starts[i]; } /* rest done below for both Fortran and C order */ } else if (order == MPI_ORDER_C) { /* dimension ndims-1 changes fastest */ if (ndims == 1) MPI_Type_contiguous(array_of_subsizes[0], oldtype, &tmp1); else { MPI_Type_vector(array_of_subsizes[ndims-2], array_of_subsizes[ndims-1], array_of_sizes[ndims-1], oldtype, &tmp1); size = array_of_sizes[ndims-1]*extent; for (i=ndims-3; i>=0; i--) { size *= array_of_sizes[i+1]; MPI_Type_hvector(array_of_subsizes[i], 1, size, tmp1, &tmp2); MPI_Type_free(&tmp1); tmp1 = tmp2; } } /* add displacement and UB */ disps[1] = array_of_starts[ndims-1]; size = 1; for (i=ndims-2; i>=0; i--) { size *= array_of_sizes[i+1]; disps[1] += size*array_of_starts[i]; } } else { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ORDER, myname, (char *)0, (char *)0, order ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } disps[1] *= extent; disps[2] = extent; for (i=0; i<ndims; i++) disps[2] *= array_of_sizes[i]; disps[0] = 0; blklens[0] = blklens[1] = blklens[2] = 1; types[0] = MPI_LB; types[1] = tmp1; types[2] = MPI_UB; MPI_Type_struct(3, blklens, disps, types, newtype); MPI_Type_free(&tmp1); return MPI_SUCCESS; }
/*@ MPI_Cart_create - Makes a new communicator to which topology information has been attached Input Parameters: + comm_old - input communicator (handle) . ndims - number of dimensions of cartesian grid (integer) . dims - integer array of size ndims specifying the number of processes in each dimension . periods - logical array of size ndims specifying whether the grid is periodic (true) or not (false) in each dimension - reorder - ranking may be reordered (true) or not (false) (logical) Output Parameter: . comm_cart - communicator with new cartesian topology (handle) Algorithm: We ignore 'reorder' info currently. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_DIMS .N MPI_ERR_ARG @*/ int MPI_Cart_create ( MPI_Comm comm_old, int ndims, int *dims, int *periods, int reorder, MPI_Comm *comm_cart ) { int range[1][3]; MPI_Group group_old, group; int i, rank, num_ranks = 1; int mpi_errno = MPI_SUCCESS; int flag, size; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_old_ptr; static char myname[] = "MPI_CART_CREATE"; TR_PUSH(myname); comm_old_ptr = MPIR_GET_COMM_PTR(comm_old); /* Check validity of arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname); MPIR_TEST_ARG(comm_cart); MPIR_TEST_ARG(periods); if (ndims < 1 || dims == (int *)0) mpi_errno = MPI_ERR_DIMS; if (mpi_errno) return MPIR_ERROR(comm_old_ptr, mpi_errno, myname ); /* Check for Intra-communicator */ MPI_Comm_test_inter ( comm_old, &flag ); if (flag) return MPIR_ERROR(comm_old_ptr, MPIR_ERRCLASS_TO_CODE(MPI_ERR_COMM,MPIR_ERR_COMM_INTER), myname ); #endif /* Determine number of ranks in topology */ for ( i=0; i<ndims; i++ ) num_ranks *= (dims[i]>0)?dims[i]:-dims[i]; if ( num_ranks < 1 ) { (*comm_cart) = MPI_COMM_NULL; return MPIR_ERROR( comm_old_ptr, MPI_ERR_TOPOLOGY, myname ); } /* Is the old communicator big enough? */ MPIR_Comm_size (comm_old_ptr, &size); if (num_ranks > size) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_TOPOLOGY, MPIR_ERR_TOPO_TOO_LARGE, myname, "Topology size is larger than size of communicator", "Topology size %d is greater than communicator size %d", num_ranks, size ); return MPIR_ERROR(comm_old_ptr, mpi_errno, myname ); } /* Make new comm */ range[0][0] = 0; range[0][1] = num_ranks - 1; range[0][2] = 1; MPI_Comm_group ( comm_old, &group_old ); MPI_Group_range_incl ( group_old, 1, range, &group ); MPI_Comm_create ( comm_old, group, comm_cart ); MPI_Group_free( &group ); MPI_Group_free( &group_old ); /* Store topology information in new communicator */ if ( (*comm_cart) != MPI_COMM_NULL ) { MPIR_ALLOC(topo,(MPIR_TOPOLOGY *) MPIR_SBalloc ( MPIR_topo_els ), comm_old_ptr,MPI_ERR_EXHAUSTED,myname); MPIR_SET_COOKIE(&topo->cart,MPIR_CART_TOPOL_COOKIE) topo->cart.type = MPI_CART; topo->cart.nnodes = num_ranks; topo->cart.ndims = ndims; MPIR_ALLOC(topo->cart.dims,(int *)MALLOC( sizeof(int) * 3 * ndims ), comm_old_ptr,MPI_ERR_EXHAUSTED,myname); topo->cart.periods = topo->cart.dims + ndims; topo->cart.position = topo->cart.periods + ndims; for ( i=0; i<ndims; i++ ) { topo->cart.dims[i] = dims[i]; topo->cart.periods[i] = periods[i]; } /* Compute my position */ MPI_Comm_rank ( (*comm_cart), &rank ); for ( i=0; i < ndims; i++ ) { num_ranks = num_ranks / dims[i]; topo->cart.position[i] = rank / num_ranks; rank = rank % num_ranks; } /* cache topology information */ MPI_Attr_put ( (*comm_cart), MPIR_TOPOLOGY_KEYVAL, (void *)topo ); } TR_POP; return (mpi_errno); }