/* Build a new type */ static int MPIR_Create_unnamed_predefined(MPI_Datatype old, int combiner, int r, int p, MPI_Datatype * new_ptr) { int mpi_errno; /* Create a contiguous type from one instance of the named type */ mpi_errno = MPIR_Type_contiguous(1, old, new_ptr); /* Initialize the contents data */ if (mpi_errno == MPI_SUCCESS) { MPIR_Datatype *new_dtp; int vals[2]; int nvals = 0; switch (combiner) { case MPI_COMBINER_F90_INTEGER: nvals = 1; vals[0] = r; break; case MPI_COMBINER_F90_REAL: case MPI_COMBINER_F90_COMPLEX: nvals = 2; vals[0] = p; vals[1] = r; break; } MPIR_Datatype_get_ptr(*new_ptr, new_dtp); mpi_errno = MPIR_Datatype_set_contents(new_dtp, combiner, nvals, 0, 0, vals, NULL, NULL); } return mpi_errno; }
int MPIR_Type_contiguous_impl(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) { int mpi_errno = MPI_SUCCESS; MPIR_Datatype *new_dtp; MPI_Datatype new_handle; mpi_errno = MPIR_Type_contiguous(count, oldtype, &new_handle); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_Datatype_get_ptr(new_handle, new_dtp); mpi_errno = MPIR_Datatype_set_contents(new_dtp, MPI_COMBINER_CONTIGUOUS, 1, /* ints (count) */ 0, 1, &count, NULL, &oldtype); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_OBJ_PUBLISH_HANDLE(*newtype, new_handle); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Type_indexed_impl(int count, const int *array_of_blocklengths, const int *array_of_displacements, MPI_Datatype oldtype, MPI_Datatype * newtype) { int mpi_errno = MPI_SUCCESS; MPI_Datatype new_handle; MPIR_Datatype *new_dtp; int i, *ints; MPIR_CHKLMEM_DECL(1); mpi_errno = MPIR_Type_indexed(count, array_of_blocklengths, array_of_displacements, 0, /* displacements not in bytes */ oldtype, &new_handle); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* copy all integer values into a temporary buffer; this * includes the count, the blocklengths, and the displacements. */ MPIR_CHKLMEM_MALLOC(ints, int *, (2 * count + 1) * sizeof(int), mpi_errno, "contents integer array", MPL_MEM_BUFFER); ints[0] = count; for (i = 0; i < count; i++) { ints[i + 1] = array_of_blocklengths[i]; } for (i = 0; i < count; i++) { ints[i + count + 1] = array_of_displacements[i]; } MPIR_Datatype_get_ptr(new_handle, new_dtp); mpi_errno = MPIR_Datatype_set_contents(new_dtp, MPI_COMBINER_INDEXED, 2 * count + 1, /* ints */ 0, /* aints */ 1, /* types */ ints, NULL, &oldtype); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_OBJ_PUBLISH_HANDLE(*newtype, new_handle); fn_exit: MPIR_CHKLMEM_FREEALL(); return mpi_errno; fn_fail: goto fn_exit; }
int MPIR_Create_unnamed_predefined(MPI_Datatype old, int combiner, int r, int p, MPI_Datatype * new_ptr) { int i; int mpi_errno = MPI_SUCCESS; F90Predefined *type; *new_ptr = MPI_DATATYPE_NULL; /* Has this type been defined already? */ for (i = 0; i < nAlloc; i++) { type = &f90Types[i]; if (type->combiner == combiner && type->r == r && type->p == p) { *new_ptr = type->d; return mpi_errno; } } /* Create a new type and remember it */ if (nAlloc >= MAX_F90_TYPES) { return MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, "MPIF_Create_unnamed_predefined", __LINE__, MPI_ERR_INTERN, "**f90typetoomany", 0); } if (nAlloc == 0) { /* Install the finalize callback that frees these datatyeps. * Set the priority high enough that this will be executed * before the handle allocation check */ MPIR_Add_finalize(MPIR_FreeF90Datatypes, 0, 2); } type = &f90Types[nAlloc++]; type->combiner = combiner; type->r = r; type->p = p; /* Create a contiguous type from one instance of the named type */ mpi_errno = MPIR_Type_contiguous(1, old, &type->d); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Initialize the contents data */ { MPIR_Datatype *new_dtp = NULL; int vals[2]; int nvals = 0; switch (combiner) { case MPI_COMBINER_F90_INTEGER: nvals = 1; vals[0] = r; break; case MPI_COMBINER_F90_REAL: case MPI_COMBINER_F90_COMPLEX: nvals = 2; vals[0] = p; vals[1] = r; break; } MPIR_Datatype_get_ptr(type->d, new_dtp); mpi_errno = MPIR_Datatype_set_contents(new_dtp, combiner, nvals, 0, 0, vals, NULL, NULL); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* FIXME should we be setting type->is_permanent=TRUE here too? If so, * will the cleanup code handle it correctly and not freak out? */ #ifndef NDEBUG { MPI_Datatype old_basic = MPI_DATATYPE_NULL; MPI_Datatype new_basic = MPI_DATATYPE_NULL; /* we used MPIR_Type_contiguous and then stomped it's contents * information, so make sure that the basic_type is usable by * MPIR_Type_commit */ MPIR_Datatype_get_basic_type(old, old_basic); MPIR_Datatype_get_basic_type(new_dtp->handle, new_basic); MPIR_Assert(new_basic == old_basic); } #endif /* the MPI Standard requires that these types are pre-committed * (MPI-2.2, sec 16.2.5, pg 492) */ mpi_errno = MPIR_Type_commit(&type->d); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } *new_ptr = type->d; fn_fail: return mpi_errno; }
/*@ MPI_Type_dup - Duplicate a datatype Input Parameters: . oldtype - datatype (handle) Output Parameters: . newtype - copy of type (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE @*/ int MPI_Type_dup(MPI_Datatype oldtype, MPI_Datatype * newtype) { int mpi_errno = MPI_SUCCESS; MPI_Datatype new_handle; MPIR_Datatype *datatype_ptr = NULL; MPIR_Datatype *new_dtp; MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_TYPE_DUP); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_TYPE_DUP); /* Validate parameters, especially handles needing to be converted */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_DATATYPE(oldtype, "datatype", mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* Convert MPI object handles to object pointers */ MPIR_Datatype_get_ptr(oldtype, datatype_ptr); /* Convert MPI object handles to object pointers */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate datatype_ptr */ MPIR_Datatype_valid_ptr(datatype_ptr, mpi_errno); /* If comm_ptr is not valid, it will be reset to null */ MPIR_ERRTEST_ARGNULL(newtype, "newtype", mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ MPIR_Assert(datatype_ptr != NULL); /* ... body of routine ... */ mpi_errno = MPIR_Type_dup(oldtype, &new_handle); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_Datatype_get_ptr(new_handle, new_dtp); mpi_errno = MPIR_Datatype_set_contents(new_dtp, MPI_COMBINER_DUP, 0, /* ints */ 0, /* aints */ 1, /* types */ NULL, NULL, &oldtype); mpi_errno = MPIR_Type_commit(&new_handle); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } /* Copy attributes, executing the attribute copy functions */ /* This accesses the attribute dup function through the perprocess * structure to prevent type_dup from forcing the linking of the * attribute functions. The actual function is (by default) * MPIR_Attr_dup_list */ if (mpi_errno == MPI_SUCCESS && MPIR_Process.attr_dup) { new_dtp->attributes = 0; mpi_errno = MPIR_Process.attr_dup(oldtype, datatype_ptr->attributes, &new_dtp->attributes); if (mpi_errno) { MPIR_Datatype_ptr_release(new_dtp); goto fn_fail; } } MPIR_OBJ_PUBLISH_HANDLE(*newtype, new_handle); /* ... end of body of routine ... */ fn_exit: MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_TYPE_DUP); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ *newtype = MPI_DATATYPE_NULL; #ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_OTHER, "**mpi_type_dup", "**mpi_type_dup %D %p", oldtype, newtype); } #endif mpi_errno = MPIR_Err_return_comm(NULL, __func__, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Type_create_darray - Create a datatype representing a distributed array Input Parameters: + size - size of process group (positive integer) . rank - rank in process group (nonnegative integer) . ndims - number of array dimensions as well as process grid dimensions (positive integer) . array_of_gsizes - number of elements of type oldtype in each dimension of global array (array of positive integers) . array_of_distribs - distribution of array in each dimension (array of state) . array_of_dargs - distribution argument in each dimension (array of positive integers) . array_of_psizes - size of process grid in each dimension (array of positive integers) . order - array storage order flag (state) - oldtype - old datatype (handle) Output Parameters: . newtype - new datatype (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_create_darray(int size, int rank, int ndims, const int array_of_gsizes[], const int array_of_distribs[], const int array_of_dargs[], const int array_of_psizes[], int order, MPI_Datatype oldtype, MPI_Datatype * newtype) { int mpi_errno = MPI_SUCCESS, i; MPI_Datatype new_handle; int procs, tmp_rank, tmp_size, *coords; MPI_Aint *st_offsets, orig_extent, disps[3]; MPI_Datatype type_old, type_new = MPI_DATATYPE_NULL, tmp_type; #ifdef HAVE_ERROR_CHECKING MPI_Aint size_with_aint; MPI_Offset size_with_offset; #endif int *ints; MPIR_Datatype *datatype_ptr = NULL; MPIR_CHKLMEM_DECL(3); MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_TYPE_CREATE_DARRAY); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_TYPE_CREATE_DARRAY); /* Validate parameters, especially handles needing to be converted */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_DATATYPE(oldtype, "datatype", mpi_errno); } MPID_END_ERROR_CHECKS; } #endif /* Convert MPI object handles to object pointers */ MPIR_Datatype_get_ptr(oldtype, datatype_ptr); MPIR_Datatype_get_extent_macro(oldtype, orig_extent); /* Validate parameters and objects (post conversion) */ #ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Check parameters */ MPIR_ERRTEST_ARGNONPOS(size, "size", mpi_errno, MPI_ERR_ARG); /* use MPI_ERR_RANK class for PE-MPI compatibility */ MPIR_ERR_CHKANDJUMP3((rank < 0 || rank >= size), mpi_errno, MPI_ERR_RANK, "**argrange", "**argrange %s %d %d", "rank", rank, (size - 1)); MPIR_ERRTEST_ARGNONPOS(ndims, "ndims", mpi_errno, MPI_ERR_DIMS); MPIR_ERRTEST_ARGNULL(array_of_gsizes, "array_of_gsizes", mpi_errno); MPIR_ERRTEST_ARGNULL(array_of_distribs, "array_of_distribs", mpi_errno); MPIR_ERRTEST_ARGNULL(array_of_dargs, "array_of_dargs", mpi_errno); MPIR_ERRTEST_ARGNULL(array_of_psizes, "array_of_psizes", mpi_errno); if (order != MPI_ORDER_C && order != MPI_ORDER_FORTRAN) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_ARG, "**arg", "**arg %s", "order"); goto fn_fail; } tmp_size = 1; for (i = 0; mpi_errno == MPI_SUCCESS && i < ndims; i++) { MPIR_ERRTEST_ARGNONPOS(array_of_gsizes[i], "gsize", mpi_errno, MPI_ERR_ARG); MPIR_ERRTEST_ARGNONPOS(array_of_psizes[i], "psize", mpi_errno, MPI_ERR_ARG); if ((array_of_distribs[i] != MPI_DISTRIBUTE_NONE) && (array_of_distribs[i] != MPI_DISTRIBUTE_BLOCK) && (array_of_distribs[i] != MPI_DISTRIBUTE_CYCLIC)) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_ARG, "**darrayunknown", 0); goto fn_fail; } if ((array_of_dargs[i] != MPI_DISTRIBUTE_DFLT_DARG) && (array_of_dargs[i] <= 0)) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_ARG, "**arg", "**arg %s", "array_of_dargs"); goto fn_fail; } if ((array_of_distribs[i] == MPI_DISTRIBUTE_NONE) && (array_of_psizes[i] != 1)) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_ARG, "**darraydist", "**darraydist %d %d", i, array_of_psizes[i]); goto fn_fail; } tmp_size *= array_of_psizes[i]; } MPIR_ERR_CHKANDJUMP1((tmp_size != size), mpi_errno, MPI_ERR_ARG, "**arg", "**arg %s", "array_of_psizes"); /* TODO: GET THIS CHECK IN ALSO */ /* check if MPI_Aint is large enough for size of global array. * if not, complain. */ size_with_aint = orig_extent; for (i = 0; i < ndims; i++) size_with_aint *= array_of_gsizes[i]; size_with_offset = orig_extent; for (i = 0; i < ndims; i++) size_with_offset *= array_of_gsizes[i]; if (size_with_aint != size_with_offset) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, __func__, __LINE__, MPI_ERR_ARG, "**darrayoverflow", "**darrayoverflow %L", size_with_offset); goto fn_fail; } /* Validate datatype_ptr */ MPIR_Datatype_valid_ptr(datatype_ptr, mpi_errno); /* If datatype_ptr is not valid, it will be reset to null */ /* --BEGIN ERROR HANDLING-- */ if (mpi_errno) goto fn_fail; /* --END ERROR HANDLING-- */ } MPID_END_ERROR_CHECKS; } #endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ /* calculate position in Cartesian grid as MPI would (row-major ordering) */ MPIR_CHKLMEM_MALLOC_ORJUMP(coords, int *, ndims * sizeof(int), mpi_errno, "position is Cartesian grid", MPL_MEM_COMM); procs = size; tmp_rank = rank; for (i = 0; i < ndims; i++) { procs = procs / array_of_psizes[i]; coords[i] = tmp_rank / procs; tmp_rank = tmp_rank % procs; } MPIR_CHKLMEM_MALLOC_ORJUMP(st_offsets, MPI_Aint *, ndims * sizeof(MPI_Aint), mpi_errno, "st_offsets", MPL_MEM_COMM); type_old = oldtype; if (order == MPI_ORDER_FORTRAN) { /* dimension 0 changes fastest */ for (i = 0; i < ndims; i++) { switch (array_of_distribs[i]) { case MPI_DISTRIBUTE_BLOCK: mpi_errno = MPIR_Type_block(array_of_gsizes, i, ndims, array_of_psizes[i], coords[i], array_of_dargs[i], order, orig_extent, type_old, &type_new, st_offsets + i); break; case MPI_DISTRIBUTE_CYCLIC: mpi_errno = MPIR_Type_cyclic(array_of_gsizes, i, ndims, array_of_psizes[i], coords[i], array_of_dargs[i], order, orig_extent, type_old, &type_new, st_offsets + i); break; case MPI_DISTRIBUTE_NONE: /* treat it as a block distribution on 1 process */ mpi_errno = MPIR_Type_block(array_of_gsizes, i, ndims, 1, 0, MPI_DISTRIBUTE_DFLT_DARG, order, orig_extent, type_old, &type_new, st_offsets + i); break; } if (i) { MPIR_Type_free_impl(&type_old); } type_old = type_new; /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* --END ERROR HANDLING-- */ } /* add displacement and UB */ disps[1] = st_offsets[0]; tmp_size = 1; for (i = 1; i < ndims; i++) { tmp_size *= array_of_gsizes[i - 1]; disps[1] += (MPI_Aint) tmp_size *st_offsets[i]; } /* rest done below for both Fortran and C order */ } else { /* order == MPI_ORDER_C */ /* dimension ndims-1 changes fastest */ for (i = ndims - 1; i >= 0; i--) { switch (array_of_distribs[i]) { case MPI_DISTRIBUTE_BLOCK: mpi_errno = MPIR_Type_block(array_of_gsizes, i, ndims, array_of_psizes[i], coords[i], array_of_dargs[i], order, orig_extent, type_old, &type_new, st_offsets + i); break; case MPI_DISTRIBUTE_CYCLIC: mpi_errno = MPIR_Type_cyclic(array_of_gsizes, i, ndims, array_of_psizes[i], coords[i], array_of_dargs[i], order, orig_extent, type_old, &type_new, st_offsets + i); break; case MPI_DISTRIBUTE_NONE: /* treat it as a block distribution on 1 process */ mpi_errno = MPIR_Type_block(array_of_gsizes, i, ndims, array_of_psizes[i], coords[i], MPI_DISTRIBUTE_DFLT_DARG, order, orig_extent, type_old, &type_new, st_offsets + i); break; } if (i != ndims - 1) { MPIR_Type_free_impl(&type_old); } type_old = type_new; /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* --END ERROR HANDLING-- */ } /* add displacement and UB */ disps[1] = st_offsets[ndims - 1]; tmp_size = 1; for (i = ndims - 2; i >= 0; i--) { tmp_size *= array_of_gsizes[i + 1]; disps[1] += (MPI_Aint) tmp_size *st_offsets[i]; } } disps[1] *= orig_extent; disps[2] = orig_extent; for (i = 0; i < ndims; i++) disps[2] *= (MPI_Aint) (array_of_gsizes[i]); disps[0] = 0; /* Instead of using MPI_LB/MPI_UB, which have been removed from MPI in MPI-3, use MPI_Type_create_resized. Use hindexed_block to set the starting displacement of the datatype (disps[1]) and type_create_resized to set lb to 0 (disps[0]) and extent to disps[2], which makes ub = disps[2]. */ mpi_errno = MPIR_Type_blockindexed(1, 1, &disps[1], 1, /* 1 means disp is in bytes */ type_new, &tmp_type); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* --END ERROR HANDLING-- */ mpi_errno = MPIR_Type_create_resized(tmp_type, 0, disps[2], &new_handle); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* --END ERROR HANDLING-- */ MPIR_Type_free_impl(&tmp_type); MPIR_Type_free_impl(&type_new); /* at this point we have the new type, and we've cleaned up any * intermediate types created in the process. we just need to save * all our contents/envelope information. */ /* Save contents */ MPIR_CHKLMEM_MALLOC_ORJUMP(ints, int *, (4 * ndims + 4) * sizeof(int), mpi_errno, "content description", MPL_MEM_BUFFER); ints[0] = size; ints[1] = rank; ints[2] = ndims; for (i = 0; i < ndims; i++) { ints[i + 3] = array_of_gsizes[i]; } for (i = 0; i < ndims; i++) { ints[i + ndims + 3] = array_of_distribs[i]; } for (i = 0; i < ndims; i++) { ints[i + 2 * ndims + 3] = array_of_dargs[i]; } for (i = 0; i < ndims; i++) { ints[i + 3 * ndims + 3] = array_of_psizes[i]; } ints[4 * ndims + 3] = order; MPIR_Datatype_get_ptr(new_handle, datatype_ptr); mpi_errno = MPIR_Datatype_set_contents(datatype_ptr, MPI_COMBINER_DARRAY, 4 * ndims + 4, 0, 1, ints, NULL, &oldtype); /* --BEGIN ERROR HANDLING-- */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* --END ERROR HANDLING-- */ MPIR_OBJ_PUBLISH_HANDLE(*newtype, new_handle); /* ... end of body of routine ... */ fn_exit: MPIR_CHKLMEM_FREEALL(); MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_TYPE_CREATE_DARRAY); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ #ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_OTHER, "**mpi_type_create_darray", "**mpi_type_create_darray %d %d %d %p %p %p %p %d %D %p", size, rank, ndims, array_of_gsizes, array_of_distribs, array_of_dargs, array_of_psizes, order, oldtype, newtype); } #endif mpi_errno = MPIR_Err_return_comm(NULL, __func__, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Type_create_hindexed - Create a datatype for an indexed datatype with displacements in bytes Input Parameters: + count - number of blocks --- also number of entries in array_of_displacements and array_of_blocklengths (integer) . array_of_blocklengths - number of elements in each block (array of nonnegative integers) . array_of_displacements - byte displacement of each block (array of address integers) - oldtype - old datatype (handle) Output Parameters: . newtype - new datatype (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_create_hindexed(int count, const int array_of_blocklengths[], const MPI_Aint array_of_displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) { static const char FCNAME[] = "MPI_Type_create_hindexed"; int mpi_errno = MPI_SUCCESS; MPI_Datatype new_handle; MPIR_Datatype *new_dtp; int i, *ints; MPIR_CHKLMEM_DECL(1); MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_TYPE_CREATE_HINDEXED); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_TYPE_CREATE_HINDEXED); # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { int j; MPIR_Datatype *datatype_ptr = NULL; MPIR_ERRTEST_COUNT(count, mpi_errno); if (count > 0) { MPIR_ERRTEST_ARGNULL(array_of_blocklengths, "array_of_blocklengths", mpi_errno); MPIR_ERRTEST_ARGNULL(array_of_displacements, "array_of_displacements", mpi_errno); } MPIR_ERRTEST_DATATYPE(oldtype, "datatype", mpi_errno); if (HANDLE_GET_KIND(oldtype) != HANDLE_KIND_BUILTIN) { MPIR_Datatype_get_ptr(oldtype, datatype_ptr); MPIR_Datatype_valid_ptr(datatype_ptr, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } for (j=0; j < count; j++) { MPIR_ERRTEST_ARGNEG(array_of_blocklengths[j], "blocklength", mpi_errno); } } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPIR_Type_indexed(count, array_of_blocklengths, array_of_displacements, 1, /* displacements in bytes */ oldtype, &new_handle); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_CHKLMEM_MALLOC_ORJUMP(ints, int *, (count + 1) * sizeof(int), mpi_errno, "content description", MPL_MEM_BUFFER); ints[0] = count; for (i=0; i < count; i++) { ints[i+1] = array_of_blocklengths[i]; } MPIR_Datatype_get_ptr(new_handle, new_dtp); mpi_errno = MPIR_Datatype_set_contents(new_dtp, MPI_COMBINER_HINDEXED, count+1, /* ints (count, blocklengths) */ count, /* aints (displacements) */ 1, /* types */ ints, array_of_displacements, &oldtype); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIR_OBJ_PUBLISH_HANDLE(*newtype, new_handle); /* ... end of body of routine ... */ fn_exit: MPIR_CHKLMEM_FREEALL(); MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_TYPE_CREATE_HINDEXED); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_type_create_hindexed", "**mpi_type_create_hindexed %d %p %p %D %p", count, array_of_blocklengths, array_of_displacements, oldtype, newtype); } # endif mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno); goto fn_exit; /* --END ERROR HANDLING-- */ }