/*@ MPI_Get_count - Gets the number of "top level" elements Input Parameters: + status - return status of receive operation (Status) - datatype - datatype of each receive buffer element (handle) Output Parameter: . count - number of received elements (integer) Notes: If the size of the datatype is zero, this routine will return a count of zero. If the amount of data in 'status' is not an exact multiple of the size of 'datatype' (so that 'count' would not be integral), a 'count' of 'MPI_UNDEFINED' is returned instead. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE @*/ int MPI_Get_count( MPI_Status *status, MPI_Datatype datatype, int *count ) { struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_GET_COUNT"; int mpi_errno = MPI_SUCCESS; TR_PUSH(myname); #ifdef MPID_HAS_GET_COUNT mpi_errno = MPID_Get_count( status, datatype, count ); #else dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /* Check for correct number of bytes */ if (dtype_ptr->size == 0) { if (status->count > 0) (*count) = MPI_UNDEFINED; else /* This is ambiguous */ (*count) = 0; } else { if ((status->count % (dtype_ptr->size)) != 0) (*count) = MPI_UNDEFINED; else (*count) = status->count / (dtype_ptr->size); } #endif TR_POP; MPIR_RETURN( MPIR_COMM_WORLD, mpi_errno, myname ); }
/*@ MPI_Send - Performs a basic send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (nonnegative integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Notes: This routine may block until the message is received. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .seealso: MPI_Isend, MPI_Bsend @*/ EXPORT_MPI_API int MPI_Send( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_SEND"; if (dest == MPI_PROC_NULL) return MPI_SUCCESS; comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* This COULD test for the contiguous homogeneous case first .... */ MPID_SendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], &mpi_errno ); MPIR_RETURN(comm_ptr, mpi_errno, myname ); }
int MPID_Type_commit( MPI_Datatype datatype) { int rc; struct MPIR_DATATYPE * dtype_ptr; DEBUG_FN_ENTRY(DEBUG_MODULE_TYPES); DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_ARGS, ("datatype=%d\n", datatype)); rc = MPI_SUCCESS; dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPID_Type_validate(dtype_ptr); MPID_Type_validate_vmpi(dtype_ptr); /* * Do not commit basic/permanent types; these should already have been * committed by MPID_Type_permanent_setup() */ if (!dtype_ptr->permanent) { MPID_Type_validate_vmpi(dtype_ptr); rc = vmpi_error_to_mpich_error( mp_type_commit(dtype_ptr->vmpi_type)); } /* fn_exit: */ DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_RC, ("rc=%d\n", rc)); DEBUG_FN_EXIT(DEBUG_MODULE_TYPES); return rc; }
/*@ MPI_Reduce_scatter - Combines values and scatters the results Input Parameters: + sendbuf - starting address of send buffer (choice) . recvcounts - integer array specifying the number of elements in result distributed to each process. Array must be identical on all calling processes. . datatype - data type of elements of input buffer (handle) . op - operation (handle) - comm - communicator (handle) Output Parameter: . recvbuf - starting address of receive buffer (choice) .N fortran .N collops .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER .N MPI_ERR_OP .N MPI_ERR_BUFFER_ALIAS @*/ EXPORT_MPI_API int MPI_Reduce_scatter ( void *sendbuf, void *recvbuf, int *recvcnts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_REDUCE_SCATTER"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_TEST_ALIAS(recvbuf,sendbuf); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Reduce_scatter(sendbuf, recvbuf, recvcnts, dtype_ptr, op, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Gatherv - Gathers into specified locations from all processes in a group Input Parameters: + sendbuf - starting address of send buffer (choice) . sendcount - number of elements in send buffer (integer) . sendtype - data type of send buffer elements (handle) . recvcounts - integer array (of length group size) containing the number of elements that are received from each process (significant only at 'root') . displs - integer array (of length group size). Entry 'i' specifies the displacement relative to recvbuf at which to place the incoming data from process 'i' (significant only at root) . recvtype - data type of recv buffer elements (significant only at 'root') (handle) . root - rank of receiving process (integer) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice, significant only at 'root') .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Gatherv ( void *sendbuf, int sendcnt, MPI_Datatype sendtype, void *recvbuf, int *recvcnts, int *displs, MPI_Datatype recvtype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; int rank; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *stype_ptr, *rtype_ptr = 0; MPIR_ERROR_DECL; static char myname[] = "MPI_GATHERV"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname); MPIR_TEST_COUNT(sendcnt); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname ); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* rtype is significant only at root */ (void) MPIR_Comm_rank ( comm_ptr, &rank ); if (rank == root) { rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname ); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif } MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Gatherv( sendbuf, sendcnt, stype_ptr, recvbuf, recvcnts, displs, rtype_ptr, root, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Type_indexed - Creates an indexed datatype Input Parameters: + count - number of blocks -- also number of entries in indices and blocklens . blocklens - number of elements in each block (array of nonnegative integers) . indices - displacement of each block in multiples of old_type (array of integers) - old_type - old datatype (handle) Output Parameter: . newtype - new datatype (handle) .N fortran The indices are displacements, and are based on a zero origin. A common error is to do something like to following .vb integer a(100) integer blens(10), indices(10) do i=1,10 blens(i) = 1 10 indices(i) = 1 + (i-1)*10 call MPI_TYPE_INDEXED(10,blens,indices,MPI_INTEGER,newtype,ierr) call MPI_TYPE_COMMIT(newtype,ierr) call MPI_SEND(a,1,newtype,...) .ve expecting this to send 'a(1),a(11),...' because the indices have values '1,11,...'. Because these are `displacements` from the beginning of 'a', it actually sends 'a(1+1),a(1+11),...'. If you wish to consider the displacements as indices into a Fortran array, consider declaring the Fortran array with a zero origin .vb integer a(0:99) .ve .N Errors .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_ARG .N MPI_ERR_EXHAUSTED @*/ EXPORT_MPI_API int MPI_Type_indexed( int count, int blocklens[], int indices[], MPI_Datatype old_type, MPI_Datatype *newtype ) { MPI_Aint *hindices; int i, mpi_errno = MPI_SUCCESS; int total_count; struct MPIR_DATATYPE *old_dtype_ptr; static char myname[] = "MPI_TYPE_INDEXED"; MPIR_ERROR_DECL; TR_PUSH(myname); /* Check for bad arguments */ old_dtype_ptr = MPIR_GET_DTYPE_PTR(old_type); MPIR_TEST_DTYPE(old_type,old_dtype_ptr,MPIR_COMM_WORLD,myname); if ( ( (count < 0) && (mpi_errno = MPI_ERR_COUNT) ) || ( (old_dtype_ptr->dte_type == MPIR_UB) && (mpi_errno = MPI_ERR_TYPE) ) || ( (old_dtype_ptr->dte_type == MPIR_LB) && (mpi_errno = MPI_ERR_TYPE) ) ) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno,myname); /* Are we making a null datatype? */ total_count = 0; for (i=0; i<count; i++) { total_count += blocklens[i]; if (blocklens[i] < 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ARRAY_VAL, myname, (char *)0, (char *)0, "blocklens", i, blocklens[i] ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno,myname); } } if (total_count == 0) { return MPI_Type_contiguous( 0, MPI_INT, newtype ); } /* Generate a call to MPI_Type_hindexed instead. This means allocating a temporary displacement array, multiplying all displacements by extent(old_type), and using that */ MPIR_ALLOC(hindices,(MPI_Aint *)MALLOC(count*sizeof(MPI_Aint)), MPIR_COMM_WORLD,MPI_ERR_EXHAUSTED,myname); for (i=0; i<count; i++) { hindices[i] = (MPI_Aint)indices[i] * old_dtype_ptr->extent; } MPIR_ERROR_PUSH(MPIR_COMM_WORLD); mpi_errno = MPI_Type_hindexed( count, blocklens, hindices, old_type, newtype ); MPIR_ERROR_POP(MPIR_COMM_WORLD); FREE(hindices); TR_POP; MPIR_RETURN(MPIR_COMM_WORLD,mpi_errno, myname); }
/*@ MPI_Issend - Starts a nonblocking synchronous send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .N MPI_ERR_EXHAUSTED @*/ int MPI_Issend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_SHANDLE *shandle; static char myname[] = "MPI_ISSEND"; int mpi_errno = MPI_SUCCESS; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ALLOCFN(shandle,MPID_SendAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( shandle, MPIR_SEND ); MPIR_REMEMBER_SEND( shandle, buf, count, datatype, dest, tag, comm_ptr); if (dest == MPI_PROC_NULL) { shandle->is_complete = 1; revertSignal(); return MPI_SUCCESS; } /* This COULD test for the contiguous homogeneous case first .... */ MPID_IssendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], *request, &mpi_errno ); if (mpi_errno) { revertSignal(); return MPIR_ERROR( comm_ptr, mpi_errno, myname ); } TR_POP; revertSignal(); return MPI_SUCCESS; }
int MPID_Type_hvector( int count, int blocklen, MPI_Aint stride, MPI_Datatype oldtype, MPI_Datatype newtype) { int rc; struct MPIR_DATATYPE * oldtype_ptr; struct MPIR_DATATYPE * newtype_ptr; DEBUG_FN_ENTRY(DEBUG_MODULE_TYPES); DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_ARGS, ("newtype=%d\n", newtype)); rc = MPI_SUCCESS; oldtype_ptr = MPIR_GET_DTYPE_PTR(oldtype); MPID_Type_validate(oldtype_ptr); MPID_Type_validate_vmpi(oldtype_ptr); newtype_ptr = MPIR_GET_DTYPE_PTR(newtype); MPID_Type_validate(newtype_ptr); rc = vmpi_error_to_mpich_error( mp_type_hvector(count, blocklen, stride, oldtype_ptr->vmpi_type, newtype_ptr->vmpi_type)); if (rc == MPI_SUCCESS) { newtype_ptr->vmpi_cookie = MPID_DATATYPE_COOKIE; } /* fn_exit: */ DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_RC, ("rc=%d\n", rc)); DEBUG_FN_EXIT(DEBUG_MODULE_TYPES); return rc; }
/*@ MPI_Ssend_init - Builds a handle for a synchronous send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements sent (integer) . datatype - type of each element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK @*/ int MPI_Ssend_init( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_SSEND_INIT"; MPIR_PSHANDLE *shandle; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif /* This is IDENTICAL to the create_send code except for the send function */ MPIR_ALLOCFN(shandle,MPID_PSendAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( &(shandle->shandle), MPIR_PERSISTENT_SEND ); /* Save the information about the operation, being careful with ref-counted items */ dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_REF_INCR(dtype_ptr); shandle->perm_datatype = dtype_ptr; shandle->perm_tag = tag; shandle->perm_dest = dest; shandle->perm_count = count; shandle->perm_buf = buf; MPIR_REF_INCR(comm_ptr); shandle->perm_comm = comm_ptr; shandle->active = 0; shandle->send = MPID_IssendDatatype; /* dest of MPI_PROC_NULL handled in start */ TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Type_extent - Returns the extent of a datatype Input Parameters: . datatype - datatype (handle) Output Parameter: . extent - datatype extent (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE @*/ int MPI_Type_extent( MPI_Datatype datatype, MPI_Aint *extent ) { struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_EXTENT"; int mpi_errno = MPI_SUCCESS; dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /* Assign the extent and return */ (*extent) = dtype_ptr->extent; return (MPI_SUCCESS); }
/*@ MPI_Scatter - Sends data from one task to all other tasks in a group Input Parameters: + sendbuf - address of send buffer (choice, significant only at 'root') . sendcount - number of elements sent to each process (integer, significant only at 'root') . sendtype - data type of send buffer elements (significant only at 'root') (handle) . recvcount - number of elements in receive buffer (integer) . recvtype - data type of receive buffer elements (handle) . root - rank of sending process (integer) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Scatter ( void *sendbuf, int sendcnt, MPI_Datatype sendtype, void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *stype_ptr = NULL; struct MPIR_DATATYPE *rtype_ptr = NULL; static char myname[] = "MPI_SCATTER"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); /* Significant only at root */ if (root == comm_ptr->local_rank) { stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr,myname); } rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr,myname); MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Scatter(sendbuf, sendcnt, stype_ptr, recvbuf, recvcnt, rtype_ptr, root, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Recv_init - Builds a handle for a receive Input Parameters: + buf - initial address of receive buffer (choice) . count - number of elements received (integer) . datatype - type of each element (handle) . source - rank of source or 'MPI_ANY_SOURCE' (integer) . tag - message tag or 'MPI_ANY_TAG' (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_RANK .N MPI_ERR_TAG .N MPI_ERR_COMM .N MPI_ERR_EXHAUSTED .seealso: MPI_Start, MPI_Request_free @*/ int MPI_Recv_init( void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_RECV_INIT"; MPIR_PRHANDLE *rhandle; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ALLOCFN(rhandle,MPID_PRecvAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)rhandle; MPID_Request_init( &(rhandle->rhandle), MPIR_PERSISTENT_RECV ); /* Save the information about the operation, being careful with ref-counted items */ MPIR_REF_INCR(dtype_ptr); rhandle->perm_datatype = dtype_ptr; rhandle->perm_tag = tag; rhandle->perm_source = source; rhandle->perm_count = count; rhandle->perm_buf = buf; MPIR_REF_INCR(comm_ptr); rhandle->perm_comm = comm_ptr; rhandle->active = 0; /* dest of MPI_PROC_NULL handled in start */ TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Irsend - Starts a nonblocking ready send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .N MPI_ERR_EXHAUSTED @*/ EXPORT_MPI_API int MPI_Irsend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_SHANDLE *shandle; static char myname[] = "MPI_IRSEND"; int mpi_errno = MPI_SUCCESS; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ALLOCFN(shandle,MPID_Send_alloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( (MPI_Request)shandle, MPIR_SEND ); /* we need the rank of dest in MPI_COMM_ALL in MPID_Gateway_SendCancelPacket(), so we save it here */ shandle->partner_grank = comm_ptr->lrank_to_grank[dest]; MPIR_REMEMBER_SEND(shandle, buf, count, datatype, dest, tag, comm_ptr); if (dest == MPI_PROC_NULL) { shandle->is_complete = 1; return MPI_SUCCESS; } /* This COULD test for the contiguous homogeneous case first .... */ MPID_IrsendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], *request, &mpi_errno, 1 ); if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); TR_POP; return MPI_SUCCESS; }
/*@ MPI_Allgather - Gathers data from all tasks and distribute it to all Input Parameters: + sendbuf - starting address of send buffer (choice) . sendcount - number of elements in send buffer (integer) . sendtype - data type of send buffer elements (handle) . recvcount - number of elements received from any process (integer) . recvtype - data type of receive buffer elements (handle) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice) Notes: The MPI standard (1.0 and 1.1) says that The jth block of data sent from each proess is received by every process and placed in the jth block of the buffer 'recvbuf'. This is misleading; a better description is The block of data sent from the jth process is received by every process and placed in the jth block of the buffer 'recvbuf'. This text was suggested by Rajeev Thakur. .N fortran .N Errors .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Allgather ( void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr=0; struct MPIR_DATATYPE *stype_ptr=0, *rtype_ptr=0; MPIR_ERROR_DECL; static char myname[] = "MPI_ALLGATHER"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname ); MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname ); MPIR_TEST_COUNT(sendcount); MPIR_TEST_COUNT(recvcount); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Allgather( sendbuf, sendcount, stype_ptr, recvbuf, recvcount, rtype_ptr, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Allreduce - Combines values from all processes and distribute the result back to all processes Input Parameters: + sendbuf - starting address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - data type of elements of send buffer (handle) . op - operation (handle) - comm - communicator (handle) Output Parameter: . recvbuf - starting address of receive buffer (choice) .N fortran .N collops .N Errors .N MPI_ERR_BUFFER .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_OP .N MPI_ERR_COMM @*/ EXPORT_MPI_API int MPI_Allreduce ( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_ERROR_DECL; static char myname[] = "MPI_ALLREDUCE"; #ifdef RED_DEBUG char zahl[10]; static int callcount=0; callcount++; DBG("Entering Allreduce()"); DBG(itoa(callcount,zahl,10)); #endif TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_TEST_COUNT(count); MPIR_TEST_ALIAS(sendbuf,recvbuf); if (mpi_errno) { #ifdef RED_DEBUG DBG("Leaving Allreduce Error No"); DBG(itoa(mpi_errno,zahl,10)); #endif return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ERROR_PUSH(comm_ptr); /* Test for intercommunicator is done when collops is assigned */ mpi_errno = comm_ptr->collops->Allreduce(sendbuf, recvbuf, count, dtype_ptr, op, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; #ifdef RED_DEBUG DBG("Leaving Allreduce"); #endif MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Recv - Basic receive Output Parameters: + buf - initial address of receive buffer (choice) - status - status object (Status) Input Parameters: + count - maximum number of elements in receive buffer (integer) . datatype - datatype of each receive buffer element (handle) . source - rank of source (integer) . tag - message tag (integer) - comm - communicator (handle) Notes: The 'count' argument indicates the maximum length of a message; the actual number can be determined with 'MPI_Get_count'. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TYPE .N MPI_ERR_COUNT .N MPI_ERR_TAG .N MPI_ERR_RANK @*/ int MPI_Recv( void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_RECV"; int mpi_errno = MPI_SUCCESS; /* Because this is a very common routine, we show how it can be optimized to be run "inline"; In addition, this lets us exploit features in the ADI to simplify the execution of blocking receive calls. */ if (source != MPI_PROC_NULL) { disableSignal(); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPID_RecvDatatype( comm_ptr, buf, count, dtype_ptr, source, tag, comm_ptr->recv_context, status, &mpi_errno ); revertSignal(); MPIR_RETURN(comm_ptr, mpi_errno, myname ); } else { if (status != MPI_STATUS_IGNORE) { /* See MPI standard section 3.11 */ MPID_ZERO_STATUS_COUNT(status); status->MPI_SOURCE = MPI_PROC_NULL; status->MPI_TAG = MPI_ANY_TAG; } } return MPI_SUCCESS; }
/*@ MPI_Type_size - Return the number of bytes occupied by entries in the datatype Input Parameters: . datatype - datatype (handle) Output Parameter: . size - datatype size (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_size ( MPI_Datatype datatype, int *size ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_SIZE"; TR_PUSH(myname); MPIR_TEST_ARG(size); if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /* Assign the size and return */ (*size) = (int)(dtype_ptr->size); TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Type_ub - Returns the upper bound of a datatype Input Parameters: . datatype - datatype (handle) Output Parameter: . displacement - displacement of upper bound from origin, in bytes (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_ub ( MPI_Datatype datatype, MPI_Aint *displacement ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_UB"; TR_PUSH(myname); MPIR_TEST_ARG(displacement); if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); /* Assign the ub and return */ dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD, myname); (*displacement) = dtype_ptr->ub; TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Irecv - Begins a nonblocking receive Input Parameters: + buf - initial address of receive buffer (choice) . count - number of elements in receive buffer (integer) . datatype - datatype of each receive buffer element (handle) . source - rank of source (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran @*/ EXPORT_MPI_API int MPI_Irecv( void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_RHANDLE *rhandle; static char myname[] = "MPI_IRECV"; int mpi_errno = MPI_SUCCESS; comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ALLOCFN(rhandle,MPID_Recv_alloc,comm_ptr, MPI_ERR_EXHAUSTED,myname); MPID_Request_init ((MPI_Request)rhandle, MPIR_RECV ); *request = (MPI_Request) rhandle; if (source == MPI_PROC_NULL) { rhandle->s.MPI_TAG = MPI_ANY_TAG; rhandle->s.MPI_SOURCE = MPI_PROC_NULL; rhandle->s.count = 0; rhandle->is_complete = 1; return MPI_SUCCESS; } MPID_IrecvDatatype( comm_ptr, buf, count, dtype_ptr, source, tag, comm_ptr->recv_context, *request, &mpi_errno ); if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); return MPI_SUCCESS; }
/*@ MPI_Bcast - Broadcasts a message from the process with rank "root" to all other processes of the group. Input/output Parameters: + buffer - starting address of buffer (choice) . count - number of entries in buffer (integer) . datatype - data type of buffer (handle) . root - rank of broadcast root (integer) - comm - communicator (handle) Algorithm: If the underlying device does not take responsibility, this function uses a tree-like algorithm to broadcast the message to blocks of processes. A linear algorithm is then used to broadcast the message from the first process in a block to all other processes. 'MPIR_BCAST_BLOCK_SIZE' determines the size of blocks. If this is set to 1, then this function is equivalent to using a pure tree algorithm. If it is set to the size of the group or greater, it is a pure linear algorithm. The value should be adjusted to determine the most efficient value on different machines. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER .N MPI_ERR_ROOT @*/ EXPORT_MPI_API int MPI_Bcast ( void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_BCAST"; MPIR_ERROR_DECL; TR_PUSH(myname) comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); /* If an intercomm broadcast, the root can also be MPI_ROOT or MPI_PROC_NULL */ if (root < MPI_ROOT || root >= comm_ptr->np) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ROOT, MPIR_ERR_DEFAULT, myname, (char *)0, (char *)0, root ); } MPIR_TEST_COUNT(count); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* See the overview in Collection Operations for why this is ok */ if (count == 0) return MPI_SUCCESS; MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Bcast(buffer, count, dtype_ptr, root, comm_ptr); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
int MPID_Type_permanent_setup( MPI_Datatype datatype) { int rc; struct MPIR_DATATYPE * dtype_ptr; DEBUG_FN_ENTRY(DEBUG_MODULE_TYPES); DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_ARGS, ("datatype=%d\n", datatype)); rc = MPI_SUCCESS; dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPID_Type_validate(dtype_ptr); if (!dtype_ptr->permanent) { MPID_Abort(NULL, 0, "MPICH-G2 (internal error)", "MPID_Type_permanent_setup() - MPICH didn't mark " "this as a permanent type!"); } rc = vmpi_error_to_mpich_error( mp_type_permanent_setup(dtype_ptr->vmpi_type, mpich_type_to_vmpi_type(datatype))); if (rc == MPI_SUCCESS) { dtype_ptr->vmpi_cookie = MPID_DATATYPE_COOKIE; } /* fn_exit: */ DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_RC, ("rc=%d\n", rc)); DEBUG_FN_EXIT(DEBUG_MODULE_TYPES); return rc; }
int MPID_Type_free( MPI_Datatype datatype) { int rc; struct MPIR_DATATYPE * dtype_ptr; DEBUG_FN_ENTRY(DEBUG_MODULE_TYPES); DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_ARGS, ("datatype=%d\n", datatype)); rc = MPI_SUCCESS; dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPID_Type_validate(dtype_ptr); MPID_Type_validate_vmpi(dtype_ptr); MPID_Type_validate_vmpi(dtype_ptr); if (dtype_ptr->permanent) { rc = vmpi_error_to_mpich_error( mp_type_permanent_free(dtype_ptr->vmpi_type, mpich_type_to_vmpi_type(datatype))); } else { rc = vmpi_error_to_mpich_error( mp_type_free(dtype_ptr->vmpi_type)); } dtype_ptr->vmpi_cookie = 0; /* fn_exit: */ DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_RC, ("rc=%d\n", rc)); DEBUG_FN_EXIT(DEBUG_MODULE_TYPES); return rc; }
int MPID_Type_struct( int count, int blocklens[], MPI_Aint indices[], MPI_Datatype oldtypes[], MPI_Datatype newtype) { int rc; int i; struct MPIR_DATATYPE * newtype_ptr; globus_byte_t * old_vmpi_types; DEBUG_FN_ENTRY(DEBUG_MODULE_TYPES); DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_ARGS, ("newtype=%d\n", newtype)); rc = MPI_SUCCESS; old_vmpi_types = (globus_byte_t *) globus_libc_malloc(count * VENDOR_MPI_DATATYPE_SIZE); if (old_vmpi_types == NULL) { rc = MPI_ERR_EXHAUSTED; goto fn_exit; } newtype_ptr = MPIR_GET_DTYPE_PTR(newtype); MPID_Type_validate(newtype_ptr); for (i = 0; i < count; i++) { struct MPIR_DATATYPE * dtype_ptr; dtype_ptr = MPIR_GET_DTYPE_PTR(oldtypes[i]); MPID_Type_validate(dtype_ptr); MPID_Type_validate_vmpi(dtype_ptr); memcpy(old_vmpi_types + i * VENDOR_MPI_DATATYPE_SIZE, dtype_ptr->vmpi_type, VENDOR_MPI_DATATYPE_SIZE); } rc = vmpi_error_to_mpich_error( mp_type_struct(count, blocklens, indices, old_vmpi_types, newtype_ptr->vmpi_type)); globus_libc_free(old_vmpi_types); if (rc == MPI_SUCCESS) { newtype_ptr->vmpi_cookie = MPID_DATATYPE_COOKIE; } fn_exit: DEBUG_PRINTF(DEBUG_MODULE_TYPES, DEBUG_INFO_RC, ("rc=%d\n", rc)); DEBUG_FN_EXIT(DEBUG_MODULE_TYPES); return rc; }
/* * MPID_Get_elements * * return into 'elements' the number of basic datatypes that are in * the buffer described by status. for complex 'datatype' this requires * counting how many basic datatypes there are, which includes counting those * basic datatypes that appear in a potentially partially-filled last datatype. * * there is a potentially weird scenario: * - sizeof(datatype) == 0, in this case the "correct" count cannot be * determined ... *count could be set to anything from 0-infinity, * the MPI standard does not discuss this case (at least i couldn't * find anything on it) so we look at the number of bytes in the * data buffer, * - if sizeof(databuff) == 0 then we take a guess and set * *elements = 0, rc = MPI_SUCCESS, and hope that's what * the user expected. * - if sizeof(databuff) > 0 then things are REALLY messed up and we * give up by simply returning rc = MPI_ERR_INTERN. */ int MPID_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *elements) { struct MPIR_DATATYPE *dtype_ptr = (struct MPIR_DATATYPE *) MPIR_GET_DTYPE_PTR(datatype); # if defined(VMPI) if (STATUS_INFO_IS_COUNT_VMPI(*status)) { MPID_Type_validate_vmpi(dtype_ptr); return vmpi_error_to_mpich_error( mp_get_elements(STATUS_INFO_GET_VMPI_PTR(*status), dtype_ptr->vmpi_type, elements)); } else # endif /* defined(VMPI) */ if (status->count == 0) { /* * this is more than just an optimization. if the app calls * MPI_{Recv,Irecv} from MPI_PROC_NULL, then the MPICH code * simply sets status->count=0 and does NOT call our * MPID_{Recv,Irecv}, and therefore we don't get to set * status->private_count to ISLOCAL or ISDATAORIGIN. * without that setting, the rest of the code in this * function will fail. */ *elements = 0; } else if (dtype_ptr->size <= 0) { /* * this is weird ... we're being asked to count how many * 0-byte data elements are in a non-empty buffer ... the * "correct" answer is anywhere from 0-inifinite ... (probably * _countably_ infinite, if that helps ;-)) */ return MPI_ERR_INTERN; } else { int unit_size; int format; int nbytes_remaining; if (STATUS_INFO_IS_COUNT_LOCAL(*status)) { /* status->count is the number of bytes in local format */ format = GLOBUS_DC_FORMAT_LOCAL; unit_size = dtype_ptr->size; } else if (STATUS_INFO_IS_COUNT_REMOTE(*status)) { /* status->count is the number of bytes in remote format */ format = STATUS_INFO_GET_FORMAT(*status); if ((unit_size = remote_size(1, dtype_ptr, format)) <= 0) { globus_libc_fprintf(stderr, "ERROR: MPID_Get_elements: datatype %d local size %d, " "remote size %d\n", dtype_ptr->dte_type, dtype_ptr->size, unit_size); return MPI_ERR_INTERN; } /* endif */ } else { globus_libc_fprintf(stderr, "ERROR: MPID_Get_elements: could not interpret " "status->private_count %d\n", status->extra[0]); return MPI_ERR_INTERN; } /* endif */ /* count the basic datatypes in 'full' ones */ *elements = (status->count / unit_size) * dtype_ptr->elements; if ((nbytes_remaining = status->count-(*elements * unit_size)) > 0) { /* last element is only partially filled ... need */ /* to count the basic datatypes in that one too */ globus_bool_t done = GLOBUS_FALSE; if (get_elements_from_partial(1, dtype_ptr, format, &nbytes_remaining, elements, &done)) /* something bad happened */ return MPI_ERR_INTERN; else if (nbytes_remaining > 0) { /* * after counting all the basic element types we can, * decrementing nbytes_remaining along the way, there * are STILL residual bytes left over that could not be * accounted for based on the 'datatype' we were passed. * still going to return *elements and MPI_SUCCESS, but * printing warning message (stderr) here. */ globus_libc_fprintf(stderr, "WARNING: MPID_Get_elements counted all the basic " "datatypes it could based\n"); globus_libc_fprintf(stderr, " the specified datatype, but still had %d " "residual bytes that\n", nbytes_remaining); globus_libc_fprintf(stderr, " could not be accounted for.\n"); } /* endif */ } /* endif */ } /* endif */ return MPI_SUCCESS; } /* end MPID_Get_elements() */
/*@ MPI_Type_struct - Creates a struct datatype Input Parameters: + count - number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths . blocklens - number of elements in each block (array) . indices - byte displacement of each block (array) - old_types - type of elements in each block (array of handles to datatype objects) Output Parameter: . newtype - new datatype (handle) Notes: If an upperbound is set explicitly by using the MPI datatype 'MPI_UB', the corresponding index must be positive. The MPI standard originally made vague statements about padding and alignment; this was intended to allow the simple definition of structures that could be sent with a count greater than one. For example, .vb struct { int a; char b; } foo; .ve may have 'sizeof(foo) > sizeof(int) + sizeof(char)'; for example, 'sizeof(foo) == 2*sizeof(int)'. The initial version of the MPI standard defined the extent of a datatype as including an `epsilon` that would have allowed an implementation to make the extent an MPI datatype for this structure equal to '2*sizeof(int)'. However, since different systems might define different paddings, there was much discussion by the MPI Forum about what was the correct value of epsilon, and one suggestion was to define epsilon as zero. This would have been the best thing to do in MPI 1.0, particularly since the 'MPI_UB' type allows the user to easily set the end of the structure. Unfortunately, this change did not make it into the final document. Currently, this routine does not add any padding, since the amount of padding needed is determined by the compiler that the user is using to build their code, not the compiler used to construct the MPI library. A later version of MPICH may provide for some natural choices of padding (e.g., multiple of the size of the largest basic member), but users are advised to never depend on this, even with vendor MPI implementations. Instead, if you define a structure datatype and wish to send or receive multiple items, you should explicitly include an 'MPI_UB' entry as the last member of the structure. For example, the following code can be used for the structure foo .vb blen[0] = 1; indices[0] = 0; oldtypes[0] = MPI_INT; blen[1] = 1; indices[1] = &foo.b - &foo; oldtypes[1] = MPI_CHAR; blen[2] = 1; indices[2] = sizeof(foo); oldtypes[2] = MPI_UB; MPI_Type_struct( 3, blen, indices, oldtypes, &newtype ); .ve .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_COUNT .N MPI_ERR_EXHAUSTED @*/ int MPI_Type_struct( int count, int blocklens[], MPI_Aint indices[], MPI_Datatype old_types[], MPI_Datatype *newtype ) { struct MPIR_DATATYPE* dteptr; MPI_Aint ub, lb, high, low, real_ub, real_lb, real_init; int high_init = 0, low_init = 0; int i, mpi_errno = MPI_SUCCESS; MPI_Aint ub_marker = 0, lb_marker = 0; /* to suppress warnings */ MPI_Aint ub_found = 0, lb_found = 0; int size, total_count; static char myname[] = "MPI_TYPE_STRUCT"; disableSignal(); /* Check for bad arguments */ if ( count < 0 ) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_COUNT, MPIR_ERR_DEFAULT, myname, (char *)0, (char *)0, count ); revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } if (count == 0) { revertSignal(); return MPI_Type_contiguous( 0, MPI_INT, newtype ); } /* Check blocklens and old_types arrays and find number of bound */ /* markers */ total_count = 0; for (i=0; i<count; i++) { total_count += blocklens[i]; if ( blocklens[i] < 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ARRAY_VAL, myname, (char *)0, (char *)0, "blocklens", i, blocklens[i] ); revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno,myname); } if ( old_types[i] == MPI_DATATYPE_NULL ) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_TYPE, MPIR_ERR_TYPE_ARRAY_NULL, myname, (char *)0, (char *)0, "old_types", i ); revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } } if (total_count == 0) { revertSignal(); return MPI_Type_contiguous( 0, MPI_INT, newtype ); } /* Create and fill in the datatype */ MPIR_ALLOC(dteptr,(struct MPIR_DATATYPE *) MPIR_SBalloc( MPIR_dtes ),MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, myname ); *newtype = (MPI_Datatype) MPIR_FromPointer( dteptr ); dteptr->self = *newtype; MPIR_SET_COOKIE(dteptr,MPIR_DATATYPE_COOKIE) dteptr->dte_type = MPIR_STRUCT; dteptr->committed = 0; dteptr->basic = 0; dteptr->permanent = 0; dteptr->is_contig = 0; dteptr->ref_count = 1; dteptr->count = count; dteptr->elements = 0; dteptr->size = 0; dteptr->align = 1; dteptr->has_ub = 0; dteptr->has_lb = 0; dteptr->self = *newtype; /* Create indices and blocklens arrays and fill them */ dteptr->indices = ( MPI_Aint * ) MALLOC( count * sizeof( MPI_Aint ) ); dteptr->blocklens = ( int * ) MALLOC( count * sizeof( int ) ); dteptr->old_types = ( struct MPIR_DATATYPE ** )MALLOC(count*sizeof(struct MPIR_DATATYPE *)); if (!dteptr->indices || !dteptr->blocklens || !dteptr->old_types) { revertSignal(); return MPIR_ERROR( MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_TYPE_STRUCT" ); } high = low = ub = lb = 0; real_ub = real_lb = 0; real_init = 0; /* If data alignment is 2, 4, or 8, then assign dteptr->align to that value. If 0, then assign dteptr->align to the maximal alignment requirement. (done below) */ if (ALIGNMENT_VALUE > 0) dteptr->align = ALIGNMENT_VALUE; for (i = 0; i < count; i++) { struct MPIR_DATATYPE *old_dtype_ptr; old_dtype_ptr = MPIR_GET_DTYPE_PTR(old_types[i]); MPIR_TEST_DTYPE(old_types[i],old_dtype_ptr,MPIR_COMM_WORLD, "MPI_TYPE_STRUCT"); dteptr->old_types[i] = MPIR_Type_dup (old_dtype_ptr); dteptr->indices[i] = indices[i]; dteptr->blocklens[i] = blocklens[i]; /* Keep track of maximal alignment requirement */ if (ALIGNMENT_VALUE == 0) { if (dteptr->align < old_dtype_ptr->align) dteptr->align = old_dtype_ptr->align; } if ( old_dtype_ptr->dte_type == MPIR_UB ) { if (ub_found) { if (indices[i] > ub_marker) ub_marker = indices[i]; } else { ub_marker = indices[i]; ub_found = 1; } } else if ( old_dtype_ptr->dte_type == MPIR_LB ) { if (lb_found) { if ( indices[i] < lb_marker ) { lb_marker = indices[i]; } } else { lb_marker = indices[i]; lb_found = 1; } } else { /* Since the datatype is NOT a UB or LB, save the real limits */ if (!real_init) { real_init = 1; real_lb = old_dtype_ptr->real_lb; real_ub = old_dtype_ptr->real_ub; } else { if (old_dtype_ptr->real_lb < real_lb) real_lb = old_dtype_ptr->real_lb; if (old_dtype_ptr->real_ub > real_ub) real_ub = old_dtype_ptr->real_ub; } /* Next, check to see if datatype has an MPI_LB or MPI_UB within it... Make sure to adjust the ub by the selected displacement and blocklens (blocklens is like Type_contiguous) */ if (old_dtype_ptr->has_ub) { MPI_Aint ub_test; ub_test = old_dtype_ptr->ub + indices[i] + (blocklens[i] - 1) * old_dtype_ptr->extent; if (ub_marker < ub_test || !ub_found) ub_marker = ub_test; ub_found = 1; } if (old_dtype_ptr->has_lb) { if (!lb_found || lb_marker > (old_dtype_ptr->lb) + indices[i] ) lb_marker = old_dtype_ptr->lb + indices[i]; lb_found = 1; } /* Get the ub/lb from the datatype (if a MPI_UB or MPI_LB was found, then these values will be ignored). We use the lb of the old type and add the indices value to it */ lb = indices[i] + old_dtype_ptr->lb; ub = lb + (blocklens[i] * old_dtype_ptr->extent) ; if (!high_init) { high = ub; high_init = 1; } else if (ub > high) high = ub; if (!low_init ) { low = lb; low_init = 1; } else if (lb < low) low = lb; if (ub > lb) { if ( high < ub ) high = ub; if ( low > lb ) low = lb; } else { if ( high < lb ) high = lb; if ( low > ub ) low = ub; } dteptr->elements += (blocklens[i] * old_dtype_ptr->elements); } /* end else */ if (i < count - 1) { size = old_dtype_ptr->size * blocklens[i]; dteptr->size += size; } else { dteptr->size += (blocklens[i] * old_dtype_ptr->size); } } /* end for loop */ /* Set the upper/lower bounds and the extent and size */ if (lb_found) { dteptr->lb = lb_marker; dteptr->has_lb = 1; } else dteptr->lb = (low_init ? low : 0); if (ub_found) { dteptr->ub = ub_marker; dteptr->has_ub = 1; } else dteptr->ub = (high_init ? high: 0); dteptr->extent = dteptr->ub - dteptr->lb ; dteptr->real_ub = real_ub; dteptr->real_lb = real_lb; /* If there is no explicit ub/lb marker, make the extent/ub fit the alignment of the largest basic item, if that structure alignment is chosen */ if (!lb_found && !ub_found) { MPI_Aint eps_offset; /* Since data is always offset by the extent, is the extent that we must adjust. */ eps_offset = dteptr->extent % dteptr->align; if (eps_offset > 0) { dteptr->ub += (dteptr->align - eps_offset); dteptr->extent = dteptr->ub - dteptr->lb; } } # if defined(MPID_HAS_TYPE_STRUCT) { mpi_errno = MPID_Type_struct(count, blocklens, indices, old_types, *newtype); } # endif revertSignal(); return (mpi_errno); }
/*@ MPI_Type_commit - Commits the datatype Input Parameter: . datatype - datatype (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE @*/ int MPI_Type_commit ( MPI_Datatype *datatype ) { struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_COMMIT"; int mpi_errno = MPI_SUCCESS; disableSignal(); dtype_ptr = MPIR_GET_DTYPE_PTR(*datatype); MPIR_TEST_DTYPE(*datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /* We could also complain about committing twice, but we chose not to, based on the view that it isn't obviously an error. */ /* Test for predefined datatypes */ if (dtype_ptr->basic) { revertSignal(); return MPI_SUCCESS; } /* Just do the simplest conversion to contiguous where possible */ #if defined(MPID_HAS_HETERO) if (!MPID_IS_HETERO) #endif { if (!(dtype_ptr)->is_contig) { /* I want to add a test for the struct { contig, UB } form of variable count strided vectors; this will not have size == extent. Because of this, using the simple test of size == extent as a filter is not useful. */ int j, is_contig; MPI_Aint offset; if ((MPI_Aint)dtype_ptr->size == dtype_ptr->extent) { switch (dtype_ptr->dte_type) { case MPIR_STRUCT: offset = dtype_ptr->indices[0]; /* If the initial offset is not 0, then mark as non-contiguous. This is because many of the quick tests for valid buffers depend on the initial address being valid if is_contig is set */ is_contig = (offset == 0); for (j=0;is_contig && j<dtype_ptr->count-1; j++) { if (!dtype_ptr->old_types[j]->is_contig) { is_contig = 0; break; } if (offset + dtype_ptr->old_types[j]->extent * (MPI_Aint)dtype_ptr->blocklens[j] != dtype_ptr->indices[j+1]) { is_contig = 0; break; } offset += dtype_ptr->old_types[j]->extent * (MPI_Aint)dtype_ptr->blocklens[j]; } if (!dtype_ptr->old_types[dtype_ptr->count-1]->is_contig) is_contig = 0; if (is_contig) { /* Note that since commit is passed the ADDRESS of the datatype, we can replace it. Unfortunately, the initialization code depends on commit NOT changing the datatype value (in the case that it is a predefined datatype). We could fix this, but it seems easier to just call a common "free struct datatype fields" routine */ /* MPI_Type_contiguous( ) */ /* MPIR_Free_struct_internals( dtype_ptr ); */ dtype_ptr->is_contig = 1; dtype_ptr->old_type = 0; /* If we don't set to null, then the code in type_contig.c will use the extent of type->old_types[0] */ /* dtype_ptr->old_type = dtype_ptr->old_types[0]; */ /* PRINTF( "Making structure type contiguous..." ); */ /* Should free all old structure members ... */ } break; default: /* Just to indicate that we want all the other types to be ignored */ break; } } } } /* Nothing else to do yet */ (dtype_ptr)->committed = 1; # if defined(MPID_HAS_TYPE_COMMIT) { /* Give the device a chance to initialization any additional data structures it requires in order to be able to process derived types */ mpi_errno = MPID_Type_commit(*datatype); revertSignal(); return mpi_errno; } # else { revertSignal(); return MPI_SUCCESS; } # endif }
/*@ MPI_Get_elements - Returns the number of basic elements in a datatype Input Parameters: + status - return status of receive operation (Status) - datatype - datatype used by receive operation (handle) Output Parameter: . count - number of received basic elements (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE @*/ EXPORT_MPI_API int MPI_Get_elements ( MPI_Status *status, MPI_Datatype datatype, int *elements ) { int count; int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_GET_ELEMENTS"; dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /*********** Check to see if datatype is committed ******** *********** Debbie Swider - 11/17/97 *********************/ if (!dtype_ptr->committed) { return MPIR_ERROR( MPIR_COMM_WORLD, MPIR_ERRCLASS_TO_CODE(MPI_ERR_TYPE,MPIR_ERR_UNCOMMITTED), myname ); } #ifdef MPID_HAS_GET_ELEMENTS mpi_errno = MPID_Get_elements( status, datatype, elements ); #else /* Find the number of elements */ MPI_Get_count (status, datatype, &count); if (count == MPI_UNDEFINED) { /* To do this correctly, we need to run through the datatype, processing basic types until we run out of data. We can do this in part by computing how many full versions of datatype will fit, and make use of the datatype->elements field. If there isn't an EXACT fit, we need to look into the datatype for more details about the exact mapping to elements. We do this with MPIR_Unpack2. */ #ifdef FOO *elements = count; /* HACK ALERT -- the code in this if is not correct */ /* but for now ... */ double cnt = (double) status->count / (double) dtype_ptr->size; (*elements) = (int) ( cnt * (double) dtype_ptr->elements ); #endif { int srclen, destlen, used_len; int i_dummy; srclen = status->count; /* Need to set count so that we'll exit when we run out of items. It could be ceil(status->count/dtype_ptr->size) . Alternately, we could check that used_len >= srclen - epsilon (in case there isn't enough for the last item). Why isn't this correct? */ if (dtype_ptr->size > 0) count = 1 + (srclen / dtype_ptr->size); else { *elements = srclen ? MPI_UNDEFINED : 0; return MPI_SUCCESS; } *elements = 0; used_len = 0; MPIR_Unpack2( (char *)&i_dummy, count, dtype_ptr, MPIR_Elementcnt, (void *)elements, (char *)&i_dummy, srclen, &destlen, &used_len ); /* If anything is left, return undefined */ if (used_len != srclen) *elements = MPI_UNDEFINED; } } else (*elements) = count * dtype_ptr->elements; #endif MPIR_RETURN( MPIR_COMM_WORLD, mpi_errno, myname ); }
void MPIR_BXOR ( void *invec, void *inoutvec, int *Len, MPI_Datatype *type ) { int i, len = *Len; struct MPIR_DATATYPE *dtype = MPIR_GET_DTYPE_PTR(*type); switch ((dtype)->dte_type) { case MPIR_LOGICAL: { MPI_Fint *a = (MPI_Fint *)inoutvec; MPI_Fint *b = (MPI_Fint *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } case MPIR_INT: { int *a = (int *)inoutvec; int *b = (int *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } case MPIR_UINT: { unsigned *a = (unsigned *)inoutvec; unsigned *b = (unsigned *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } case MPIR_LONG: { long *a = (long *)inoutvec; long *b = (long *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } #if defined(HAVE_LONG_LONG_INT) case MPIR_LONGLONGINT: { long long *a = (long long *)inoutvec; long long *b = (long long *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } #endif case MPIR_ULONG: { unsigned long *a = (unsigned long *)inoutvec; unsigned long *b = (unsigned long *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } case MPIR_SHORT: { short *a = (short *)inoutvec; short *b = (short *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } case MPIR_USHORT: { unsigned short *a = (unsigned short *)inoutvec; unsigned short *b = (unsigned short *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } case MPIR_CHAR: { char *a = (char *)inoutvec; char *b = (char *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } case MPIR_BYTE: case MPIR_UCHAR: { unsigned char *a = (unsigned char *)inoutvec; unsigned char *b = (unsigned char *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LBXOR(a[i],b[i]); break; } default: MPIR_Op_errno = MPIR_ERR_OP_NOT_DEFINED; MPIR_ERROR(MPIR_COMM_WORLD,MPIR_ERR_OP_NOT_DEFINED, "MPI_BXOR" ); break; } }
void MPIR_PROD ( void *invec, void *inoutvec, int *Len, MPI_Datatype *type ) { int i, len = *Len; struct MPIR_DATATYPE *dtype = MPIR_GET_DTYPE_PTR(*type); switch ((dtype)->dte_type) { case MPIR_INT: { int *a = (int *)inoutvec; int *b = (int *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_UINT: { unsigned *a = (unsigned *)inoutvec; unsigned *b = (unsigned *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_LONG: { long *a = (long *)inoutvec; long *b = (long *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } #if defined(HAVE_LONG_LONG_INT) case MPIR_LONGLONGINT: { long long *a = (long long *)inoutvec; long long *b = (long long *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } #endif case MPIR_ULONG: { unsigned long *a = (unsigned long *)inoutvec; unsigned long *b = (unsigned long *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_SHORT: { short *a = (short *)inoutvec; short *b = (short *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_USHORT: { unsigned short *a = (unsigned short *)inoutvec; unsigned short *b = (unsigned short *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_CHAR: { char *a = (char *)inoutvec; char *b = (char *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_BYTE: case MPIR_UCHAR: { unsigned char *a = (unsigned char *)inoutvec; unsigned char *b = (unsigned char *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_FLOAT: { float *a = (float *)inoutvec; float *b = (float *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } case MPIR_DOUBLE: { double *a = (double *)inoutvec; double *b = (double *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } #if defined(HAVE_LONG_DOUBLE) case MPIR_LONGDOUBLE: { long double *a = (long double *)inoutvec; long double *b = (long double *)invec; for ( i=0; i<len; i++ ) a[i] = MPIR_LPROD(a[i],b[i]); break; } #endif case MPIR_COMPLEX: { s_complex *a = (s_complex *)inoutvec; s_complex *b = (s_complex *)invec; for ( i=0; i<len; i++ ) { s_complex c; c.re = a[i].re; c.im = a[i].im; a[i].re = c.re*b[i].re - c.im*b[i].im; a[i].im = c.im*b[i].re + c.re*b[i].im; } break; } case MPIR_DOUBLE_COMPLEX: { d_complex *a = (d_complex *)inoutvec; d_complex *b = (d_complex *)invec; for ( i=0; i<len; i++ ) { d_complex c; c.re = a[i].re; c.im = a[i].im; a[i].re = c.re*b[i].re - c.im*b[i].im; a[i].im = c.im*b[i].re + c.re*b[i].im; } break; } default: MPIR_Op_errno = MPIR_ERR_OP_NOT_DEFINED; MPIR_ERROR(MPIR_COMM_WORLD,MPIR_ERR_OP_NOT_DEFINED, "MPI_PROD" ); break; } }
void MPIR_MINLOC( void *invec, void *inoutvec, int *Len, MPI_Datatype *type ) { int i, len = *Len; struct MPIR_DATATYPE *dtype = MPIR_GET_DTYPE_PTR(*type); if ((dtype)->dte_type == MPIR_STRUCT) { /* Perform the operation based on the type of the first type in */ /* struct */ switch ((dtype)->old_types[0]->dte_type) { case MPIR_INT: { MPIR_2int_loctype *a = (MPIR_2int_loctype *)inoutvec; MPIR_2int_loctype *b = (MPIR_2int_loctype *)invec; for (i=0; i<len; i++) { if (a[i].value == b[i].value) a[i].loc = MPIR_MIN(a[i].loc,b[i].loc); else if (a[i].value > b[i].value) { a[i].value = b[i].value; a[i].loc = b[i].loc; } } break; } case MPIR_FLOAT: { MPIR_floatint_loctype *a = (MPIR_floatint_loctype *)inoutvec; MPIR_floatint_loctype *b = (MPIR_floatint_loctype *)invec; for (i=0; i<len; i++) { if (a[i].value == b[i].value) a[i].loc = MPIR_MIN(a[i].loc,b[i].loc); else if (a[i].value > b[i].value) { a[i].value = b[i].value; a[i].loc = b[i].loc; } } break; } case MPIR_LONG: { MPIR_longint_loctype *a = (MPIR_longint_loctype *)inoutvec; MPIR_longint_loctype *b = (MPIR_longint_loctype *)invec; for (i=0; i<len; i++) { if (a[i].value == b[i].value) a[i].loc = MPIR_MIN(a[i].loc,b[i].loc); else if (a[i].value > b[i].value) { a[i].value = b[i].value; a[i].loc = b[i].loc; } } break; } #if defined(HAVE_LONG_LONG_INT) case MPIR_LONGLONGINT: { MPIR_longlongint_loctype *a = (MPIR_longlongint_loctype *)inoutvec; MPIR_longlongint_loctype *b = (MPIR_longlongint_loctype *)invec; for (i=0; i<len; i++) { if (a[i].value == b[i].value) a[i].loc = MPIR_MIN(a[i].loc,b[i].loc); else if (a[i].value > b[i].value) { a[i].value = b[i].value; a[i].loc = b[i].loc; } } break; } #endif case MPIR_SHORT: { MPIR_shortint_loctype *a = (MPIR_shortint_loctype *)inoutvec; MPIR_shortint_loctype *b = (MPIR_shortint_loctype *)invec; for (i=0; i<len; i++) { if (a[i].value == b[i].value) a[i].loc = MPIR_MIN(a[i].loc,b[i].loc); else if (a[i].value > b[i].value) { a[i].value = b[i].value; a[i].loc = b[i].loc; } } break; } case MPIR_DOUBLE: { MPIR_doubleint_loctype *a = (MPIR_doubleint_loctype *)inoutvec; MPIR_doubleint_loctype *b = (MPIR_doubleint_loctype *)invec; for (i=0; i<len; i++) { if (a[i].value == b[i].value) a[i].loc = MPIR_MIN(a[i].loc,b[i].loc); else if (a[i].value > b[i].value) { a[i].value = b[i].value; a[i].loc = b[i].loc; } } break; } #if defined(HAVE_LONG_DOUBLE) case MPIR_LONGDOUBLE: { MPIR_longdoubleint_loctype *a = (MPIR_longdoubleint_loctype *)inoutvec; MPIR_longdoubleint_loctype *b = (MPIR_longdoubleint_loctype *)invec; for (i=0; i<len; i++) { if (a[i].value == b[i].value) a[i].loc = MPIR_MIN(a[i].loc,b[i].loc); else if (a[i].value > b[i].value) { a[i].value = b[i].value; a[i].loc = b[i].loc; } } break; } #endif default: MPIR_Op_errno = MPIR_ERR_OP_NOT_DEFINED; MPIR_ERROR(MPIR_COMM_WORLD, MPIR_ERR_OP_NOT_DEFINED, "MPI_MINLOC" ); } } else if ((dtype)->dte_type == MPIR_CONTIG && ((dtype)->count == 2)) { struct MPIR_DATATYPE *oldtype = (dtype)->old_type; /* Set the actual length */ len = len * (dtype)->count; /* Perform the operation */ switch (oldtype->dte_type) { case MPIR_INT: { int *a = (int *)inoutvec; int *b = (int *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } case MPIR_LONG: { long *a = (long *)inoutvec; long *b = (long *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } #if defined(HAVE_LONG_LONG_INT) case MPIR_LONGLONGINT: { long long *a = (long long *)inoutvec; long long *b = (long long *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } #endif case MPIR_SHORT: { short *a = (short *)inoutvec; short *b = (short *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } case MPIR_CHAR: { char *a = (char *)inoutvec; char *b = (char *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } case MPIR_FLOAT: { float *a = (float *)inoutvec; float *b = (float *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } case MPIR_DOUBLE: { double *a = (double *)inoutvec; double *b = (double *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } #ifdef HAVE_LONG_DOUBLE case MPIR_LONGDOUBLE: { long double *a = (long double *)inoutvec; long double *b = (long double *)invec; for ( i=0; i<len; i+=2 ) { if (a[i] == b[i]) a[i+1] = MPIR_MIN(a[i+1],b[i+1]); else if (a[i] > b[i]) { a[i] = b[i]; a[i+1] = b[i+1]; } } break; } #endif default: MPIR_Op_errno = MPIR_ERR_OP_NOT_DEFINED; MPIR_ERROR(MPIR_COMM_WORLD, MPIR_ERR_OP_NOT_DEFINED, "MPI_MINLOC" ); break; } } else { MPIR_Op_errno = MPIR_ERR_OP_NOT_DEFINED; MPIR_ERROR(MPIR_COMM_WORLD, MPIR_ERR_OP_NOT_DEFINED, "MPI_MINLOC" ); } }