/*@ MPI_Send - Performs a basic send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (nonnegative integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Notes: This routine may block until the message is received. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .seealso: MPI_Isend, MPI_Bsend @*/ EXPORT_MPI_API int MPI_Send( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_SEND"; if (dest == MPI_PROC_NULL) return MPI_SUCCESS; comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* This COULD test for the contiguous homogeneous case first .... */ MPID_SendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], &mpi_errno ); MPIR_RETURN(comm_ptr, mpi_errno, myname ); }
/*@ MPI_Issend - Starts a nonblocking synchronous send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .N MPI_ERR_EXHAUSTED @*/ int MPI_Issend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_SHANDLE *shandle; static char myname[] = "MPI_ISSEND"; int mpi_errno = MPI_SUCCESS; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ALLOCFN(shandle,MPID_SendAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( shandle, MPIR_SEND ); MPIR_REMEMBER_SEND( shandle, buf, count, datatype, dest, tag, comm_ptr); if (dest == MPI_PROC_NULL) { shandle->is_complete = 1; revertSignal(); return MPI_SUCCESS; } /* This COULD test for the contiguous homogeneous case first .... */ MPID_IssendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], *request, &mpi_errno ); if (mpi_errno) { revertSignal(); return MPIR_ERROR( comm_ptr, mpi_errno, myname ); } TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Ssend_init - Builds a handle for a synchronous send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements sent (integer) . datatype - type of each element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK @*/ int MPI_Ssend_init( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_SSEND_INIT"; MPIR_PSHANDLE *shandle; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif /* This is IDENTICAL to the create_send code except for the send function */ MPIR_ALLOCFN(shandle,MPID_PSendAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( &(shandle->shandle), MPIR_PERSISTENT_SEND ); /* Save the information about the operation, being careful with ref-counted items */ dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_REF_INCR(dtype_ptr); shandle->perm_datatype = dtype_ptr; shandle->perm_tag = tag; shandle->perm_dest = dest; shandle->perm_count = count; shandle->perm_buf = buf; MPIR_REF_INCR(comm_ptr); shandle->perm_comm = comm_ptr; shandle->active = 0; shandle->send = MPID_IssendDatatype; /* dest of MPI_PROC_NULL handled in start */ TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Recv_init - Builds a handle for a receive Input Parameters: + buf - initial address of receive buffer (choice) . count - number of elements received (integer) . datatype - type of each element (handle) . source - rank of source or 'MPI_ANY_SOURCE' (integer) . tag - message tag or 'MPI_ANY_TAG' (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_RANK .N MPI_ERR_TAG .N MPI_ERR_COMM .N MPI_ERR_EXHAUSTED .seealso: MPI_Start, MPI_Request_free @*/ int MPI_Recv_init( void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_RECV_INIT"; MPIR_PRHANDLE *rhandle; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ALLOCFN(rhandle,MPID_PRecvAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)rhandle; MPID_Request_init( &(rhandle->rhandle), MPIR_PERSISTENT_RECV ); /* Save the information about the operation, being careful with ref-counted items */ MPIR_REF_INCR(dtype_ptr); rhandle->perm_datatype = dtype_ptr; rhandle->perm_tag = tag; rhandle->perm_source = source; rhandle->perm_count = count; rhandle->perm_buf = buf; MPIR_REF_INCR(comm_ptr); rhandle->perm_comm = comm_ptr; rhandle->active = 0; /* dest of MPI_PROC_NULL handled in start */ TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Irsend - Starts a nonblocking ready send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .N MPI_ERR_EXHAUSTED @*/ EXPORT_MPI_API int MPI_Irsend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_SHANDLE *shandle; static char myname[] = "MPI_IRSEND"; int mpi_errno = MPI_SUCCESS; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ALLOCFN(shandle,MPID_Send_alloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( (MPI_Request)shandle, MPIR_SEND ); /* we need the rank of dest in MPI_COMM_ALL in MPID_Gateway_SendCancelPacket(), so we save it here */ shandle->partner_grank = comm_ptr->lrank_to_grank[dest]; MPIR_REMEMBER_SEND(shandle, buf, count, datatype, dest, tag, comm_ptr); if (dest == MPI_PROC_NULL) { shandle->is_complete = 1; return MPI_SUCCESS; } /* This COULD test for the contiguous homogeneous case first .... */ MPID_IrsendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], *request, &mpi_errno, 1 ); if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); TR_POP; return MPI_SUCCESS; }
/*@ MPI_Allgather - Gathers data from all tasks and distribute it to all Input Parameters: + sendbuf - starting address of send buffer (choice) . sendcount - number of elements in send buffer (integer) . sendtype - data type of send buffer elements (handle) . recvcount - number of elements received from any process (integer) . recvtype - data type of receive buffer elements (handle) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice) Notes: The MPI standard (1.0 and 1.1) says that The jth block of data sent from each proess is received by every process and placed in the jth block of the buffer 'recvbuf'. This is misleading; a better description is The block of data sent from the jth process is received by every process and placed in the jth block of the buffer 'recvbuf'. This text was suggested by Rajeev Thakur. .N fortran .N Errors .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Allgather ( void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr=0; struct MPIR_DATATYPE *stype_ptr=0, *rtype_ptr=0; MPIR_ERROR_DECL; static char myname[] = "MPI_ALLGATHER"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname ); MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname ); MPIR_TEST_COUNT(sendcount); MPIR_TEST_COUNT(recvcount); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Allgather( sendbuf, sendcount, stype_ptr, recvbuf, recvcount, rtype_ptr, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Recv - Basic receive Output Parameters: + buf - initial address of receive buffer (choice) - status - status object (Status) Input Parameters: + count - maximum number of elements in receive buffer (integer) . datatype - datatype of each receive buffer element (handle) . source - rank of source (integer) . tag - message tag (integer) - comm - communicator (handle) Notes: The 'count' argument indicates the maximum length of a message; the actual number can be determined with 'MPI_Get_count'. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TYPE .N MPI_ERR_COUNT .N MPI_ERR_TAG .N MPI_ERR_RANK @*/ int MPI_Recv( void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_RECV"; int mpi_errno = MPI_SUCCESS; /* Because this is a very common routine, we show how it can be optimized to be run "inline"; In addition, this lets us exploit features in the ADI to simplify the execution of blocking receive calls. */ if (source != MPI_PROC_NULL) { disableSignal(); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPID_RecvDatatype( comm_ptr, buf, count, dtype_ptr, source, tag, comm_ptr->recv_context, status, &mpi_errno ); revertSignal(); MPIR_RETURN(comm_ptr, mpi_errno, myname ); } else { if (status != MPI_STATUS_IGNORE) { /* See MPI standard section 3.11 */ MPID_ZERO_STATUS_COUNT(status); status->MPI_SOURCE = MPI_PROC_NULL; status->MPI_TAG = MPI_ANY_TAG; } } return MPI_SUCCESS; }
/*@ MPI_Allreduce - Combines values from all processes and distribute the result back to all processes Input Parameters: + sendbuf - starting address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - data type of elements of send buffer (handle) . op - operation (handle) - comm - communicator (handle) Output Parameter: . recvbuf - starting address of receive buffer (choice) .N fortran .N collops .N Errors .N MPI_ERR_BUFFER .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_OP .N MPI_ERR_COMM @*/ EXPORT_MPI_API int MPI_Allreduce ( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_ERROR_DECL; static char myname[] = "MPI_ALLREDUCE"; #ifdef RED_DEBUG char zahl[10]; static int callcount=0; callcount++; DBG("Entering Allreduce()"); DBG(itoa(callcount,zahl,10)); #endif TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_TEST_COUNT(count); MPIR_TEST_ALIAS(sendbuf,recvbuf); if (mpi_errno) { #ifdef RED_DEBUG DBG("Leaving Allreduce Error No"); DBG(itoa(mpi_errno,zahl,10)); #endif return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ERROR_PUSH(comm_ptr); /* Test for intercommunicator is done when collops is assigned */ mpi_errno = comm_ptr->collops->Allreduce(sendbuf, recvbuf, count, dtype_ptr, op, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; #ifdef RED_DEBUG DBG("Leaving Allreduce"); #endif MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Gatherv - Gathers into specified locations from all processes in a group Input Parameters: + sendbuf - starting address of send buffer (choice) . sendcount - number of elements in send buffer (integer) . sendtype - data type of send buffer elements (handle) . recvcounts - integer array (of length group size) containing the number of elements that are received from each process (significant only at 'root') . displs - integer array (of length group size). Entry 'i' specifies the displacement relative to recvbuf at which to place the incoming data from process 'i' (significant only at root) . recvtype - data type of recv buffer elements (significant only at 'root') (handle) . root - rank of receiving process (integer) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice, significant only at 'root') .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Gatherv ( void *sendbuf, int sendcnt, MPI_Datatype sendtype, void *recvbuf, int *recvcnts, int *displs, MPI_Datatype recvtype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; int rank; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *stype_ptr, *rtype_ptr = 0; MPIR_ERROR_DECL; static char myname[] = "MPI_GATHERV"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname); MPIR_TEST_COUNT(sendcnt); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname ); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* rtype is significant only at root */ (void) MPIR_Comm_rank ( comm_ptr, &rank ); if (rank == root) { rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname ); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif } MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Gatherv( sendbuf, sendcnt, stype_ptr, recvbuf, recvcnts, displs, rtype_ptr, root, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Irecv - Begins a nonblocking receive Input Parameters: + buf - initial address of receive buffer (choice) . count - number of elements in receive buffer (integer) . datatype - datatype of each receive buffer element (handle) . source - rank of source (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran @*/ EXPORT_MPI_API int MPI_Irecv( void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_RHANDLE *rhandle; static char myname[] = "MPI_IRECV"; int mpi_errno = MPI_SUCCESS; comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ALLOCFN(rhandle,MPID_Recv_alloc,comm_ptr, MPI_ERR_EXHAUSTED,myname); MPID_Request_init ((MPI_Request)rhandle, MPIR_RECV ); *request = (MPI_Request) rhandle; if (source == MPI_PROC_NULL) { rhandle->s.MPI_TAG = MPI_ANY_TAG; rhandle->s.MPI_SOURCE = MPI_PROC_NULL; rhandle->s.count = 0; rhandle->is_complete = 1; return MPI_SUCCESS; } MPID_IrecvDatatype( comm_ptr, buf, count, dtype_ptr, source, tag, comm_ptr->recv_context, *request, &mpi_errno ); if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); return MPI_SUCCESS; }
/*@ MPI_Bcast - Broadcasts a message from the process with rank "root" to all other processes of the group. Input/output Parameters: + buffer - starting address of buffer (choice) . count - number of entries in buffer (integer) . datatype - data type of buffer (handle) . root - rank of broadcast root (integer) - comm - communicator (handle) Algorithm: If the underlying device does not take responsibility, this function uses a tree-like algorithm to broadcast the message to blocks of processes. A linear algorithm is then used to broadcast the message from the first process in a block to all other processes. 'MPIR_BCAST_BLOCK_SIZE' determines the size of blocks. If this is set to 1, then this function is equivalent to using a pure tree algorithm. If it is set to the size of the group or greater, it is a pure linear algorithm. The value should be adjusted to determine the most efficient value on different machines. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER .N MPI_ERR_ROOT @*/ EXPORT_MPI_API int MPI_Bcast ( void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_BCAST"; MPIR_ERROR_DECL; TR_PUSH(myname) comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); /* If an intercomm broadcast, the root can also be MPI_ROOT or MPI_PROC_NULL */ if (root < MPI_ROOT || root >= comm_ptr->np) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ROOT, MPIR_ERR_DEFAULT, myname, (char *)0, (char *)0, root ); } MPIR_TEST_COUNT(count); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* See the overview in Collection Operations for why this is ok */ if (count == 0) return MPI_SUCCESS; MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Bcast(buffer, count, dtype_ptr, root, comm_ptr); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Bsend - Basic send with user-specified buffering Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (nonnegative integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Notes: This send is provided as a convenience function; it allows the user to send messages without worring about where they are buffered (because the user `must` have provided buffer space with 'MPI_Buffer_attach'). In deciding how much buffer space to allocate, remember that the buffer space is not available for reuse by subsequent 'MPI_Bsend's unless you are certain that the message has been received (not just that it should have been received). For example, this code does not allocate enough buffer space .vb MPI_Buffer_attach( b, n*sizeof(double) + MPI_BSEND_OVERHEAD ); for (i=0; i<m; i++) { MPI_Bsend( buf, n, MPI_DOUBLE, ... ); } .ve because only enough buffer space is provided for a single send, and the loop may start a second 'MPI_Bsend' before the first is done making use of the buffer. In C, you can force the messages to be delivered by .vb MPI_Buffer_detach( &b, &n ); MPI_Buffer_attach( b, n ); .ve (The 'MPI_Buffer_detach' will not complete until all buffered messages are delivered.) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_RANK .N MPI_ERR_TAG .seealso: MPI_Buffer_attach, MPI_Ibsend, MPI_Bsend_init @*/ int MPI_Bsend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm ) { MPI_Request handle; MPI_Status status; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; MPIR_ERROR_DECL; static char myname[] = "MPI_BSEND"; disableSignal(); TR_PUSH(myname); if (dest != MPI_PROC_NULL) { /* We should let Ibsend find the errors, but we will soon add a special case for faster Bsend and we'll need these tests then */ comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif /* ? BsendDatatype? MPID_BsendContig( comm, buf, len, src_lrank, tag, context_id, dest_grank, msgrep, &mpi_errno ); if (!mpi_errno) return MPI_SUCCESS; if (mpi_errno != MPIR_ERR_MAY_BLOCK) return MPIR_ERROR( comm, mpi_errno, myname ); */ MPIR_ERROR_PUSH(comm_ptr); /* We don't use MPIR_CALL_POP so that we can free the handle */ handle = MPI_REQUEST_NULL; if ((mpi_errno = MPI_Ibsend( buf, count, datatype, dest, tag, comm, &handle ))) { MPIR_ERROR_POP(comm_ptr); if (handle != MPI_REQUEST_NULL) MPID_SendFree( handle ); revertSignal(); return MPIR_ERROR(comm_ptr,mpi_errno,myname); } /* This Wait only completes the transfer of data into the buffer area. The test/wait in util/bsendutil.c completes the actual transfer */ MPIR_CALL_POP(MPI_Wait( &handle, &status ),comm_ptr,myname); MPIR_ERROR_POP(comm_ptr); } TR_POP; revertSignal(); return mpi_errno; }