/*@ MPI_Comm_dup - Duplicates an existing communicator with all its cached information Input Parameter: . comm - communicator (handle) Output Parameter: . newcomm - A new communicator over the same group as 'comm' but with a new context. See notes. (handle) Notes: This routine is used to create a new communicator that has a new communication context but contains the same group of processes as the input communicator. Since all MPI communication is performed within a communicator (specifies as the group of processes `plus` the context), this routine provides an effective way to create a private communicator for use by a software module or library. In particular, no library routine should use 'MPI_COMM_WORLD' as the communicator; instead, a duplicate of a user-specified communicator should always be used. For more information, see Using MPI, 2nd edition. Because this routine essentially produces a copy of a communicator, it also copies any attributes that have been defined on the input communicator, using the attribute copy function specified by the 'copy_function' argument to 'MPI_Keyval_create'. This is particularly useful for (a) attributes that describe some property of the group associated with the communicator, such as its interconnection topology and (b) communicators that are given back to the user; the attibutes in this case can track subsequent 'MPI_Comm_dup' operations on this communicator. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_EXHAUSTED .seealso: MPI_Comm_free, MPI_Keyval_create, MPI_Attr_set, MPI_Attr_delete @*/ int MPI_Comm_dup ( MPI_Comm comm, MPI_Comm *comm_out ) { struct MPIR_COMMUNICATOR *new_comm, *comm_ptr; int mpi_errno; MPIR_ERROR_DECL; static char myname[] = "MPI_COMM_DUP"; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); /* Check for non-null communicator */ if ( MPIR_TEST_COMM_NOTOK(comm,comm_ptr) ) { (*comm_out) = MPI_COMM_NULL; revertSignal(); return MPIR_ERROR( comm_ptr, MPI_ERR_COMM, myname); } /* Duplicate the communicator */ MPIR_ALLOC(new_comm,NEW(struct MPIR_COMMUNICATOR),comm_ptr,MPI_ERR_EXHAUSTED, "MPI_COMM_DUP" ); MPIR_Comm_init( new_comm, comm_ptr, comm_ptr->comm_type ); MPIR_Group_dup ( comm_ptr->group, &(new_comm->group) ); MPIR_Group_dup ( comm_ptr->local_group, &(new_comm->local_group) ); new_comm->local_rank = new_comm->local_group->local_rank; new_comm->lrank_to_grank = new_comm->group->lrank_to_grank; new_comm->np = new_comm->group->np; new_comm->comm_name = 0; DBG(FPRINTF(OUTFILE,"Dup:About to copy attr for comm %ld\n",(long)comm);) /* Also free at least some of the parts of the commuicator */ if ((mpi_errno = MPIR_Attr_copy ( comm_ptr, new_comm ) )) {
/*@ MPI_Get_count - Gets the number of "top level" elements Input Parameters: + status - return status of receive operation (Status) - datatype - datatype of each receive buffer element (handle) Output Parameter: . count - number of received elements (integer) Notes: If the size of the datatype is zero, this routine will return a count of zero. If the amount of data in 'status' is not an exact multiple of the size of 'datatype' (so that 'count' would not be integral), a 'count' of 'MPI_UNDEFINED' is returned instead. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE @*/ int MPI_Get_count( MPI_Status *status, MPI_Datatype datatype, int *count ) { struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_GET_COUNT"; int mpi_errno = MPI_SUCCESS; TR_PUSH(myname); #ifdef MPID_HAS_GET_COUNT mpi_errno = MPID_Get_count( status, datatype, count ); #else dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /* Check for correct number of bytes */ if (dtype_ptr->size == 0) { if (status->count > 0) (*count) = MPI_UNDEFINED; else /* This is ambiguous */ (*count) = 0; } else { if ((status->count % (dtype_ptr->size)) != 0) (*count) = MPI_UNDEFINED; else (*count) = status->count / (dtype_ptr->size); } #endif TR_POP; MPIR_RETURN( MPIR_COMM_WORLD, mpi_errno, myname ); }
/*@ MPI_Reduce_scatter - Combines values and scatters the results Input Parameters: + sendbuf - starting address of send buffer (choice) . recvcounts - integer array specifying the number of elements in result distributed to each process. Array must be identical on all calling processes. . datatype - data type of elements of input buffer (handle) . op - operation (handle) - comm - communicator (handle) Output Parameter: . recvbuf - starting address of receive buffer (choice) .N fortran .N collops .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER .N MPI_ERR_OP .N MPI_ERR_BUFFER_ALIAS @*/ EXPORT_MPI_API int MPI_Reduce_scatter ( void *sendbuf, void *recvbuf, int *recvcnts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_REDUCE_SCATTER"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_TEST_ALIAS(recvbuf,sendbuf); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Reduce_scatter(sendbuf, recvbuf, recvcnts, dtype_ptr, op, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@C MPI_Attr_get - Retrieves attribute value by key Input Parameters: + comm - communicator to which attribute is attached (handle) - keyval - key value (integer) Output Parameters: + attr_value - attribute value, unless 'flag' = false - flag - true if an attribute value was extracted; false if no attribute is associated with the key Notes: Attributes must be extracted from the same language as they were inserted in with 'MPI_ATTR_PUT'. The notes for C and Fortran below explain why. Notes for C: Even though the 'attr_value' arguement is declared as 'void *', it is really the address of a void pointer. See the rationale in the standard for more details. .N fortran The 'attr_value' in Fortran is a pointer to a Fortran integer, not a pointer to a 'void *'. .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_KEYVAL @*/ int MPI_Attr_get ( MPI_Comm comm, int keyval, void *attr_value, int *flag ) { MPIR_HBT_node *attr; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_ATTR_GET"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); if ( ( (keyval == MPI_KEYVAL_INVALID) && (mpi_errno = MPI_ERR_OTHER) ) ) return MPIR_ERROR(comm_ptr, mpi_errno, myname); MPIR_HBT_lookup(comm_ptr->attr_cache, keyval, &attr); if ( attr == (MPIR_HBT_node *)0 ) { (*flag) = 0; (*(void **)attr_value) = (void *)0; } else { (*flag) = 1; /* Device may want to update attribute */ MPID_ATTR_GET(comm_ptr,keyval,&attr->value); (*(void **)attr_value) = attr->value; } TR_POP; return(mpi_errno); }
/*@ MPI_Iprobe - Nonblocking test for a message Input Parameters: + source - source rank, or 'MPI_ANY_SOURCE' (integer) . tag - tag value or 'MPI_ANY_TAG' (integer) - comm - communicator (handle) Output Parameter: + flag - (logical) - status - status object (Status) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TAG .N MPI_ERR_RANK @*/ EXPORT_MPI_API int MPI_Iprobe( int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status ) { MPI_Status __status; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_IPROBE"; TR_PUSH(myname); if(status == MPI_STATUS_IGNORE) status = &__status; comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif if (source == MPI_PROC_NULL) { status->MPI_SOURCE = MPI_PROC_NULL; status->MPI_TAG = MPI_ANY_TAG; MPID_ZERO_STATUS_COUNT(status); return MPI_SUCCESS; } MPID_Iprobe( comm_ptr, tag, comm_ptr->recv_context, source, flag, &mpi_errno, status ); TR_POP; MPIR_RETURN( comm_ptr, mpi_errno, myname ); }
/*@ MPI_Finalize - Terminates MPI execution environment Notes: All processes must call this routine before exiting. The number of processes running `after` this routine is called is undefined; it is best not to perform much more than a 'return rc' after calling 'MPI_Finalize'. .N fortran @*/ int MPI_Finalize() { TR_PUSH("MPI_Finalize"); DBG(FPRINTF( stderr, "Entering system finalize\n" ); fflush(stderr);) /* Complete any remaining buffered sends first */ { void *a; int b;
/*@ MPI_Attr_delete - Deletes attribute value associated with a key Input Parameters: + comm - communicator to which attribute is attached (handle) - keyval - The key value of the deleted attribute (integer) .N fortran .N Errors .N MPI_ERR_COMM .N MPI_ERR_PERM_KEY @*/ EXPORT_MPI_API int MPI_Attr_delete ( MPI_Comm comm, int keyval ) { MPIR_HBT_node *attr; MPIR_Attr_key *attr_key; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_ATTR_DELETE"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); if ( ( (keyval == MPI_KEYVAL_INVALID) && (mpi_errno = MPI_ERR_OTHER) ) ) return MPIR_ERROR(comm_ptr, mpi_errno, myname); attr_key = MPIR_GET_KEYVAL_PTR( keyval ); MPIR_TEST_MPI_KEYVAL(keyval,attr_key,comm_ptr,myname); if (comm == MPI_COMM_WORLD && attr_key->permanent) return MPIR_ERROR( comm_ptr, MPIR_ERRCLASS_TO_CODE(MPI_ERR_ARG,MPIR_ERR_PERM_KEY),myname ); MPIR_HBT_lookup(comm_ptr->attr_cache, keyval, &attr); if (attr != (MPIR_HBT_node *)0) { if ( attr_key->delete_fn.c_delete_fn ) { if (attr_key->FortranCalling) { MPI_Aint invall = (MPI_Aint)attr->value; int inval = (int)invall; (*attr_key->delete_fn.f77_delete_fn)(comm, &keyval, &inval, attr_key->extra_state, &mpi_errno ); attr->value = (void *)(MPI_Aint)inval; } else mpi_errno = (*attr_key->delete_fn.c_delete_fn)(comm, keyval, attr->value, attr_key->extra_state ); if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); } MPIR_HBT_delete(comm_ptr->attr_cache, keyval, &attr); /* We will now have one less reference to keyval */ MPIR_REF_DECR(attr_key); if ( attr != (MPIR_HBT_node *)0 ) (void) MPIR_HBT_free_node ( attr ); } else { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_NOKEY, myname, "Key not in communicator", "Key %d not in communicator", keyval ); return MPIR_ERROR( comm_ptr, mpi_errno, myname ); /* "Error in MPI_ATTR_DELETE: key not in communicator" ); */ } TR_POP; return(mpi_errno); }
/*@ MPI_Type_indexed - Creates an indexed datatype Input Parameters: + count - number of blocks -- also number of entries in indices and blocklens . blocklens - number of elements in each block (array of nonnegative integers) . indices - displacement of each block in multiples of old_type (array of integers) - old_type - old datatype (handle) Output Parameter: . newtype - new datatype (handle) .N fortran The indices are displacements, and are based on a zero origin. A common error is to do something like to following .vb integer a(100) integer blens(10), indices(10) do i=1,10 blens(i) = 1 10 indices(i) = 1 + (i-1)*10 call MPI_TYPE_INDEXED(10,blens,indices,MPI_INTEGER,newtype,ierr) call MPI_TYPE_COMMIT(newtype,ierr) call MPI_SEND(a,1,newtype,...) .ve expecting this to send 'a(1),a(11),...' because the indices have values '1,11,...'. Because these are `displacements` from the beginning of 'a', it actually sends 'a(1+1),a(1+11),...'. If you wish to consider the displacements as indices into a Fortran array, consider declaring the Fortran array with a zero origin .vb integer a(0:99) .ve .N Errors .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_ARG .N MPI_ERR_EXHAUSTED @*/ EXPORT_MPI_API int MPI_Type_indexed( int count, int blocklens[], int indices[], MPI_Datatype old_type, MPI_Datatype *newtype ) { MPI_Aint *hindices; int i, mpi_errno = MPI_SUCCESS; int total_count; struct MPIR_DATATYPE *old_dtype_ptr; static char myname[] = "MPI_TYPE_INDEXED"; MPIR_ERROR_DECL; TR_PUSH(myname); /* Check for bad arguments */ old_dtype_ptr = MPIR_GET_DTYPE_PTR(old_type); MPIR_TEST_DTYPE(old_type,old_dtype_ptr,MPIR_COMM_WORLD,myname); if ( ( (count < 0) && (mpi_errno = MPI_ERR_COUNT) ) || ( (old_dtype_ptr->dte_type == MPIR_UB) && (mpi_errno = MPI_ERR_TYPE) ) || ( (old_dtype_ptr->dte_type == MPIR_LB) && (mpi_errno = MPI_ERR_TYPE) ) ) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno,myname); /* Are we making a null datatype? */ total_count = 0; for (i=0; i<count; i++) { total_count += blocklens[i]; if (blocklens[i] < 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_ARG_ARRAY_VAL, myname, (char *)0, (char *)0, "blocklens", i, blocklens[i] ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno,myname); } } if (total_count == 0) { return MPI_Type_contiguous( 0, MPI_INT, newtype ); } /* Generate a call to MPI_Type_hindexed instead. This means allocating a temporary displacement array, multiplying all displacements by extent(old_type), and using that */ MPIR_ALLOC(hindices,(MPI_Aint *)MALLOC(count*sizeof(MPI_Aint)), MPIR_COMM_WORLD,MPI_ERR_EXHAUSTED,myname); for (i=0; i<count; i++) { hindices[i] = (MPI_Aint)indices[i] * old_dtype_ptr->extent; } MPIR_ERROR_PUSH(MPIR_COMM_WORLD); mpi_errno = MPI_Type_hindexed( count, blocklens, hindices, old_type, newtype ); MPIR_ERROR_POP(MPIR_COMM_WORLD); FREE(hindices); TR_POP; MPIR_RETURN(MPIR_COMM_WORLD,mpi_errno, myname); }
/*@ MPI_Issend - Starts a nonblocking synchronous send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .N MPI_ERR_EXHAUSTED @*/ int MPI_Issend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_SHANDLE *shandle; static char myname[] = "MPI_ISSEND"; int mpi_errno = MPI_SUCCESS; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ALLOCFN(shandle,MPID_SendAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( shandle, MPIR_SEND ); MPIR_REMEMBER_SEND( shandle, buf, count, datatype, dest, tag, comm_ptr); if (dest == MPI_PROC_NULL) { shandle->is_complete = 1; revertSignal(); return MPI_SUCCESS; } /* This COULD test for the contiguous homogeneous case first .... */ MPID_IssendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], *request, &mpi_errno ); if (mpi_errno) { revertSignal(); return MPIR_ERROR( comm_ptr, mpi_errno, myname ); } TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Ssend_init - Builds a handle for a synchronous send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements sent (integer) . datatype - type of each element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK @*/ int MPI_Ssend_init( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_SSEND_INIT"; MPIR_PSHANDLE *shandle; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif /* This is IDENTICAL to the create_send code except for the send function */ MPIR_ALLOCFN(shandle,MPID_PSendAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( &(shandle->shandle), MPIR_PERSISTENT_SEND ); /* Save the information about the operation, being careful with ref-counted items */ dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_REF_INCR(dtype_ptr); shandle->perm_datatype = dtype_ptr; shandle->perm_tag = tag; shandle->perm_dest = dest; shandle->perm_count = count; shandle->perm_buf = buf; MPIR_REF_INCR(comm_ptr); shandle->perm_comm = comm_ptr; shandle->active = 0; shandle->send = MPID_IssendDatatype; /* dest of MPI_PROC_NULL handled in start */ TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Recv_init - Builds a handle for a receive Input Parameters: + buf - initial address of receive buffer (choice) . count - number of elements received (integer) . datatype - type of each element (handle) . source - rank of source or 'MPI_ANY_SOURCE' (integer) . tag - message tag or 'MPI_ANY_TAG' (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_RANK .N MPI_ERR_TAG .N MPI_ERR_COMM .N MPI_ERR_EXHAUSTED .seealso: MPI_Start, MPI_Request_free @*/ int MPI_Recv_init( void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_RECV_INIT"; MPIR_PRHANDLE *rhandle; disableSignal(); TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_RECV_TAG(tag); MPIR_TEST_RECV_RANK(comm_ptr,source); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ALLOCFN(rhandle,MPID_PRecvAlloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)rhandle; MPID_Request_init( &(rhandle->rhandle), MPIR_PERSISTENT_RECV ); /* Save the information about the operation, being careful with ref-counted items */ MPIR_REF_INCR(dtype_ptr); rhandle->perm_datatype = dtype_ptr; rhandle->perm_tag = tag; rhandle->perm_source = source; rhandle->perm_count = count; rhandle->perm_buf = buf; MPIR_REF_INCR(comm_ptr); rhandle->perm_comm = comm_ptr; rhandle->active = 0; /* dest of MPI_PROC_NULL handled in start */ TR_POP; revertSignal(); return MPI_SUCCESS; }
/*@ MPI_Irsend - Starts a nonblocking ready send Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Output Parameter: . request - communication request (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_TAG .N MPI_ERR_RANK .N MPI_ERR_EXHAUSTED @*/ EXPORT_MPI_API int MPI_Irsend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_SHANDLE *shandle; static char myname[] = "MPI_IRSEND"; int mpi_errno = MPI_SUCCESS; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ALLOCFN(shandle,MPID_Send_alloc, comm_ptr,MPI_ERR_EXHAUSTED,myname ); *request = (MPI_Request)shandle; MPID_Request_init( (MPI_Request)shandle, MPIR_SEND ); /* we need the rank of dest in MPI_COMM_ALL in MPID_Gateway_SendCancelPacket(), so we save it here */ shandle->partner_grank = comm_ptr->lrank_to_grank[dest]; MPIR_REMEMBER_SEND(shandle, buf, count, datatype, dest, tag, comm_ptr); if (dest == MPI_PROC_NULL) { shandle->is_complete = 1; return MPI_SUCCESS; } /* This COULD test for the contiguous homogeneous case first .... */ MPID_IrsendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], *request, &mpi_errno, 1 ); if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); TR_POP; return MPI_SUCCESS; }
/*@ MPI_Graph_get - Retrieves graph topology information associated with a communicator Input Parameters: + comm - communicator with graph structure (handle) . maxindex - length of vector 'index' in the calling program (integer) - maxedges - length of vector 'edges' in the calling program (integer) Output Parameter: + index - array of integers containing the graph structure (for details see the definition of 'MPI_GRAPH_CREATE') - edges - array of integers containing the graph structure .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Graph_get ( MPI_Comm comm, int maxindex, int maxedges, int *index, int *edges ) { int i, num, flag; int *array; int mpi_errno = MPI_SUCCESS; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_GRAPH_GET"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_ARG(index); MPIR_TEST_ARG(edges); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* Get topology information from the communicator */ MPIR_ERROR_PUSH(comm_ptr); mpi_errno = MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag ); MPIR_ERROR_POP(comm_ptr); if ( ( (flag != 1) && (mpi_errno = MPI_ERR_TOPOLOGY) ) || ( (topo->type != MPI_GRAPH) && (mpi_errno = MPI_ERR_TOPOLOGY) ) ) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); /* Get index */ num = topo->graph.nnodes; array = topo->graph.index; if ( index != (int *)0 ) for ( i=0; (i<maxindex) && (i<num); i++ ) (*index++) = (*array++); /* Get edges */ num = topo->graph.nedges; array = topo->graph.edges; if ( edges != (int *)0 ) for ( i=0; (i<maxedges) && (i<num); i++ ) (*edges++) = (*array++); TR_POP; return (mpi_errno); }
/*@ MPI_Allreduce - Combines values from all processes and distribute the result back to all processes Input Parameters: + sendbuf - starting address of send buffer (choice) . count - number of elements in send buffer (integer) . datatype - data type of elements of send buffer (handle) . op - operation (handle) - comm - communicator (handle) Output Parameter: . recvbuf - starting address of receive buffer (choice) .N fortran .N collops .N Errors .N MPI_ERR_BUFFER .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_OP .N MPI_ERR_COMM @*/ EXPORT_MPI_API int MPI_Allreduce ( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; MPIR_ERROR_DECL; static char myname[] = "MPI_ALLREDUCE"; #ifdef RED_DEBUG char zahl[10]; static int callcount=0; callcount++; DBG("Entering Allreduce()"); DBG(itoa(callcount,zahl,10)); #endif TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); MPIR_TEST_COUNT(count); MPIR_TEST_ALIAS(sendbuf,recvbuf); if (mpi_errno) { #ifdef RED_DEBUG DBG("Leaving Allreduce Error No"); DBG(itoa(mpi_errno,zahl,10)); #endif return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif MPIR_ERROR_PUSH(comm_ptr); /* Test for intercommunicator is done when collops is assigned */ mpi_errno = comm_ptr->collops->Allreduce(sendbuf, recvbuf, count, dtype_ptr, op, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; #ifdef RED_DEBUG DBG("Leaving Allreduce"); #endif MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Graphdims_get - Retrieves graph topology information associated with a communicator Input Parameters: . comm - communicator for group with graph structure (handle) Output Parameter: + nnodes - number of nodes in graph (integer) - nedges - number of edges in graph (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ EXPORT_MPI_API int MPI_Graphdims_get ( MPI_Comm comm, int *nnodes, int *nedges ) { int mpi_errno = MPI_SUCCESS, flag; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_GRAPHDIMS_GET"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_ARG(nnodes); MPIR_TEST_ARG(nedges); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* Get topology information from the communicator */ MPIR_ERROR_PUSH( comm_ptr ); mpi_errno = MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag ); MPIR_ERROR_POP( comm_ptr ); if (mpi_errno) { return MPIR_ERROR( comm_ptr, mpi_errno, myname ); } /* Set nnodes */ if ( nnodes != (int *)0 ) if ( (flag == 1) && (topo->type == MPI_GRAPH) ) (*nnodes) = topo->graph.nnodes; else (*nnodes) = MPI_UNDEFINED; /* Set nedges */ if ( nedges != (int *)0 ) if ( (flag == 1) && (topo->type == MPI_GRAPH) ) (*nedges) = topo->graph.nedges; else (*nedges) = MPI_UNDEFINED; TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Gatherv - Gathers into specified locations from all processes in a group Input Parameters: + sendbuf - starting address of send buffer (choice) . sendcount - number of elements in send buffer (integer) . sendtype - data type of send buffer elements (handle) . recvcounts - integer array (of length group size) containing the number of elements that are received from each process (significant only at 'root') . displs - integer array (of length group size). Entry 'i' specifies the displacement relative to recvbuf at which to place the incoming data from process 'i' (significant only at root) . recvtype - data type of recv buffer elements (significant only at 'root') (handle) . root - rank of receiving process (integer) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice, significant only at 'root') .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Gatherv ( void *sendbuf, int sendcnt, MPI_Datatype sendtype, void *recvbuf, int *recvcnts, int *displs, MPI_Datatype recvtype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; int rank; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *stype_ptr, *rtype_ptr = 0; MPIR_ERROR_DECL; static char myname[] = "MPI_GATHERV"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname); MPIR_TEST_COUNT(sendcnt); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname ); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* rtype is significant only at root */ (void) MPIR_Comm_rank ( comm_ptr, &rank ); if (rank == root) { rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname ); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif } MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Gatherv( sendbuf, sendcnt, stype_ptr, recvbuf, recvcnts, displs, rtype_ptr, root, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Type_size - Return the number of bytes occupied by entries in the datatype Input Parameters: . datatype - datatype (handle) Output Parameter: . size - datatype size (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_size ( MPI_Datatype datatype, int *size ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_SIZE"; TR_PUSH(myname); MPIR_TEST_ARG(size); if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD,myname); /* Assign the size and return */ (*size) = (int)(dtype_ptr->size); TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Type_ub - Returns the upper bound of a datatype Input Parameters: . datatype - datatype (handle) Output Parameter: . displacement - displacement of upper bound from origin, in bytes (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TYPE .N MPI_ERR_ARG @*/ int MPI_Type_ub ( MPI_Datatype datatype, MPI_Aint *displacement ) { int mpi_errno = MPI_SUCCESS; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_TYPE_UB"; TR_PUSH(myname); MPIR_TEST_ARG(displacement); if (mpi_errno) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); /* Assign the ub and return */ dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); MPIR_TEST_DTYPE(datatype,dtype_ptr,MPIR_COMM_WORLD, myname); (*displacement) = dtype_ptr->ub; TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Buffer_attach - Attaches a user-defined buffer for sending Input Parameters: + buffer - initial buffer address (choice) - size - buffer size, in bytes (integer) Notes: The size given should be the sum of the sizes of all outstanding Bsends that you intend to have, plus a few hundred bytes for each Bsend that you do. For the purposes of calculating size, you should use 'MPI_Pack_size'. In other words, in the code .vb MPI_Buffer_attach( buffer, size ); MPI_Bsend( ..., count=20, datatype=type1, ... ); ... MPI_Bsend( ..., count=40, datatype=type2, ... ); .ve the value of 'size' in the MPI_Buffer_attach call should be greater than the value computed by .vb MPI_Pack_size( 20, type1, comm, &s1 ); MPI_Pack_size( 40, type2, comm, &s2 ); size = s1 + s2 + 2 * MPI_BSEND_OVERHEAD; .ve The 'MPI_BSEND_OVERHEAD' gives the maximum amount of space that may be used in the buffer for use by the BSEND routines in using the buffer. This value is in 'mpi.h' (for C) and 'mpif.h' (for Fortran). .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_BUFFER .N MPI_ERR_INTERN .seealso: MPI_Buffer_detach, MPI_Bsend @*/ EXPORT_MPI_API int MPI_Buffer_attach( void *buffer, int size ) { int mpi_errno; static char myname[] = "MPI_BUFFER_ATTACH"; TR_PUSH(myname); #ifndef MPIR_NO_ERROR_CHECKING if (size < 0) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_BUFFER, MPIR_ERR_BUFFER_SIZE, myname, (char *)0, (char *)0, size ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); } #endif if ((mpi_errno = MPIR_BsendInitBuffer( buffer, size ))) return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, myname ); TR_POP; return MPI_SUCCESS; }
/*@ MPI_Comm_remote_group - Accesses the remote group associated with the given inter-communicator Input Parameter: . comm - Communicator (must be intercommunicator) Output Parameter: . group - remote group of communicator .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM @*/ int MPI_Comm_remote_group ( MPI_Comm comm, MPI_Group *group ) { struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_GROUP *group_ptr; int flag; int mpi_errno = MPI_SUCCESS; static char myname[] = "MPI_COMM_REMOTE_GROUP"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname ); /* Check for intra-communicator */ MPI_Comm_test_inter ( comm, &flag ); if (!flag) return MPIR_ERROR(comm_ptr, MPIR_ERRCLASS_TO_CODE(MPI_ERR_COMM,MPIR_ERR_COMM_INTRA),myname); MPIR_Group_dup( comm_ptr->group, &group_ptr ); *group = group_ptr->self; TR_POP; return (MPI_SUCCESS); }
/*@ MPI_Bcast - Broadcasts a message from the process with rank "root" to all other processes of the group. Input/output Parameters: + buffer - starting address of buffer (choice) . count - number of entries in buffer (integer) . datatype - data type of buffer (handle) . root - rank of broadcast root (integer) - comm - communicator (handle) Algorithm: If the underlying device does not take responsibility, this function uses a tree-like algorithm to broadcast the message to blocks of processes. A linear algorithm is then used to broadcast the message from the first process in a block to all other processes. 'MPIR_BCAST_BLOCK_SIZE' determines the size of blocks. If this is set to 1, then this function is equivalent to using a pure tree algorithm. If it is set to the size of the group or greater, it is a pure linear algorithm. The value should be adjusted to determine the most efficient value on different machines. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER .N MPI_ERR_ROOT @*/ EXPORT_MPI_API int MPI_Bcast ( void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *dtype_ptr; static char myname[] = "MPI_BCAST"; MPIR_ERROR_DECL; TR_PUSH(myname) comm_ptr = MPIR_GET_COMM_PTR(comm); dtype_ptr = MPIR_GET_DTYPE_PTR(datatype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname); /* If an intercomm broadcast, the root can also be MPI_ROOT or MPI_PROC_NULL */ if (root < MPI_ROOT || root >= comm_ptr->np) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_ROOT, MPIR_ERR_DEFAULT, myname, (char *)0, (char *)0, root ); } MPIR_TEST_COUNT(count); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* See the overview in Collection Operations for why this is ok */ if (count == 0) return MPI_SUCCESS; MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Bcast(buffer, count, dtype_ptr, root, comm_ptr); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Scatter - Sends data from one task to all other tasks in a group Input Parameters: + sendbuf - address of send buffer (choice, significant only at 'root') . sendcount - number of elements sent to each process (integer, significant only at 'root') . sendtype - data type of send buffer elements (significant only at 'root') (handle) . recvcount - number of elements in receive buffer (integer) . recvtype - data type of receive buffer elements (handle) . root - rank of sending process (integer) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Scatter ( void *sendbuf, int sendcnt, MPI_Datatype sendtype, void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; struct MPIR_DATATYPE *stype_ptr = NULL; struct MPIR_DATATYPE *rtype_ptr = NULL; static char myname[] = "MPI_SCATTER"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); /* Significant only at root */ if (root == comm_ptr->local_rank) { stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr,myname); } rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr,myname); MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Scatter(sendbuf, sendcnt, stype_ptr, recvbuf, recvcnt, rtype_ptr, root, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Graph_map - Maps process to graph topology information Input Parameters: + comm - input communicator (handle) . nnodes - number of graph nodes (integer) . index - integer array specifying the graph structure, see 'MPI_GRAPH_CREATE' - edges - integer array specifying the graph structure Output Parameter: . newrank - reordered rank of the calling process; 'MPI_UNDEFINED' if the calling process does not belong to graph (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ EXPORT_MPI_API int MPI_Graph_map ( MPI_Comm comm_old, int nnodes, int *index, int *edges, int *newrank ) { int rank, size; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_old_ptr; static char myname[] = "MPI_GRAPH_MAP"; TR_PUSH(myname); comm_old_ptr = MPIR_GET_COMM_PTR(comm_old); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname); if (nnodes < 1) mpi_errno = MPI_ERR_ARG; MPIR_TEST_ARG(newrank); MPIR_TEST_ARG(index); MPIR_TEST_ARG(edges); if (mpi_errno) return MPIR_ERROR(comm_old_ptr, mpi_errno, myname ); #endif /* Test that the communicator is large enough */ MPIR_Comm_size( comm_old_ptr, &size ); if (size < nnodes) { return MPIR_ERROR( comm_old_ptr, MPI_ERR_ARG, myname ); } /* Am I in this topology? */ MPIR_Comm_rank ( comm_old_ptr, &rank ); if ( rank < nnodes ) (*newrank) = rank; else (*newrank) = MPI_UNDEFINED; TR_POP; return (mpi_errno); }
/*@ MPI_Allgather - Gathers data from all tasks and distribute it to all Input Parameters: + sendbuf - starting address of send buffer (choice) . sendcount - number of elements in send buffer (integer) . sendtype - data type of send buffer elements (handle) . recvcount - number of elements received from any process (integer) . recvtype - data type of receive buffer elements (handle) - comm - communicator (handle) Output Parameter: . recvbuf - address of receive buffer (choice) Notes: The MPI standard (1.0 and 1.1) says that The jth block of data sent from each proess is received by every process and placed in the jth block of the buffer 'recvbuf'. This is misleading; a better description is The block of data sent from the jth process is received by every process and placed in the jth block of the buffer 'recvbuf'. This text was suggested by Rajeev Thakur. .N fortran .N Errors .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_BUFFER @*/ int MPI_Allgather ( void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm ) { int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr=0; struct MPIR_DATATYPE *stype_ptr=0, *rtype_ptr=0; MPIR_ERROR_DECL; static char myname[] = "MPI_ALLGATHER"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); stype_ptr = MPIR_GET_DTYPE_PTR(sendtype); rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype); /* Check for invalid arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname); MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname ); MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname ); MPIR_TEST_COUNT(sendcount); MPIR_TEST_COUNT(recvcount); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif MPIR_ERROR_PUSH(comm_ptr); mpi_errno = comm_ptr->collops->Allgather( sendbuf, sendcount, stype_ptr, recvbuf, recvcount, rtype_ptr, comm_ptr ); MPIR_ERROR_POP(comm_ptr); TR_POP; MPIR_RETURN(comm_ptr,mpi_errno,myname); }
/*@ MPI_Testsome - Tests for some given communications to complete Input Parameters: + incount - length of array_of_requests (integer) - array_of_requests - array of requests (array of handles) Output Parameters: + outcount - number of completed requests (integer) . array_of_indices - array of indices of operations that completed (array of integers) - array_of_statuses - array of status objects for operations that completed (array of Status). May be 'MPI_STATUSES_IGNORE'. .N waitstatus .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_IN_STATUS @*/ int MPI_Testsome( int incount, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[] ) { int i, j, mpi_errno = MPI_SUCCESS; int nfound = 0; int nnull = 0; int mpi_lerr; MPI_Request request; static char myname[] = "MPI_TESTSOME"; disableSignal(); TR_PUSH(myname); /* NOTE: This implementation will not work correctly if the device requires messages to be received in some particular order. In that case, this routine needs to try and complete the messages in ANY order. The same is true for testall.c . */ MPID_DeviceCheck( MPID_NOTBLOCKING ); for (i = 0; i < incount; i++) { /* Skip over null handles. We need this for handles generated when MPI_PROC_NULL is the source or destination of an operation */ request = array_of_requests[i]; if (!request) {/* || !request->chandle.active) { */ nnull ++; continue; } mpi_lerr = 0; switch (request->handle_type) { case MPIR_SEND: if (MPID_SendRequestCancelled(request)) { if (array_of_statuses) { array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; array_of_statuses[i].MPI_ERROR = MPI_SUCCESS; } nfound++; } else { if (request->shandle.is_complete || MPID_SendIcomplete( request, &mpi_lerr )) { array_of_indices[nfound] = i; if (mpi_lerr) { if (mpi_errno == MPI_SUCCESS) { if (array_of_statuses) { for (j=0; j<incount; j++) array_of_statuses[j].MPI_ERROR = MPI_SUCCESS; } mpi_errno = MPI_ERR_IN_STATUS; } if (array_of_statuses) array_of_statuses[nfound].MPI_ERROR = mpi_lerr; } MPIR_FORGET_SEND( &request->shandle ); MPID_SendFree( request ); array_of_requests[i] = 0; nfound++; } } break; case MPIR_RECV: if (request->rhandle.s.MPI_TAG == MPIR_MSG_CANCELLED) { if (array_of_statuses) array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; nfound++; } else { if (request->rhandle.is_complete || MPID_RecvIcomplete( request, (MPI_Status *)0, &mpi_lerr )) { array_of_indices[nfound] = i; if (request->rhandle.s.MPI_ERROR) { if (mpi_errno == MPI_SUCCESS) { if (array_of_statuses) { for (j=0; j<incount; j++) array_of_statuses[j].MPI_ERROR = MPI_SUCCESS; } mpi_errno = MPI_ERR_IN_STATUS; } } if (array_of_statuses) array_of_statuses[nfound] = request->rhandle.s; MPID_RecvFree( request ); array_of_requests[i] = 0; nfound++; } } break; case MPIR_PERSISTENT_SEND: if (!request->persistent_shandle.active) { if (MPID_SendRequestCancelled(&request->persistent_shandle)) { if (array_of_statuses) array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; nfound++; } else nnull++; } else if (request->persistent_shandle.shandle.is_complete || MPID_SendIcomplete( request, &mpi_lerr )) { array_of_indices[nfound] = i; if (mpi_lerr) { if (mpi_errno == MPI_SUCCESS) { if (array_of_statuses) { for (j=0; j<incount; j++) array_of_statuses[j].MPI_ERROR = MPI_SUCCESS; } mpi_errno = MPI_ERR_IN_STATUS; } if (array_of_statuses) array_of_statuses[nfound].MPI_ERROR = mpi_lerr; } request->persistent_shandle.active = 0; nfound++; } break; case MPIR_PERSISTENT_RECV: if (!request->persistent_rhandle.active) { if (request->persistent_rhandle.rhandle.s.MPI_TAG == MPIR_MSG_CANCELLED) { if (array_of_statuses) array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; nfound++; } else nnull++; } else if (request->persistent_rhandle.rhandle.is_complete || MPID_RecvIcomplete( request, (MPI_Status *)0, &mpi_lerr )) { array_of_indices[nfound] = i; if (mpi_lerr) { if (mpi_errno == MPI_SUCCESS) { if (array_of_statuses) { for (j=0; j<incount; j++) array_of_statuses[j].MPI_ERROR = MPI_SUCCESS; } mpi_errno = MPI_ERR_IN_STATUS; } } if (array_of_statuses) array_of_statuses[nfound] = request->persistent_rhandle.rhandle.s; request->persistent_rhandle.active = 0; nfound++; } break; } } if (nnull == incount) *outcount = MPI_UNDEFINED; else *outcount = nfound; if (mpi_errno) { revertSignal(); return MPIR_ERROR(MPIR_COMM_WORLD, mpi_errno, myname ); } TR_POP; revertSignal(); return mpi_errno; }
/*@ MPI_Cart_create - Makes a new communicator to which topology information has been attached Input Parameters: + comm_old - input communicator (handle) . ndims - number of dimensions of cartesian grid (integer) . dims - integer array of size ndims specifying the number of processes in each dimension . periods - logical array of size ndims specifying whether the grid is periodic (true) or not (false) in each dimension - reorder - ranking may be reordered (true) or not (false) (logical) Output Parameter: . comm_cart - communicator with new cartesian topology (handle) Algorithm: We ignore 'reorder' info currently. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_DIMS .N MPI_ERR_ARG @*/ int MPI_Cart_create ( MPI_Comm comm_old, int ndims, int *dims, int *periods, int reorder, MPI_Comm *comm_cart ) { int range[1][3]; MPI_Group group_old, group; int i, rank, num_ranks = 1; int mpi_errno = MPI_SUCCESS; int flag, size; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_old_ptr; static char myname[] = "MPI_CART_CREATE"; TR_PUSH(myname); comm_old_ptr = MPIR_GET_COMM_PTR(comm_old); /* Check validity of arguments */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname); MPIR_TEST_ARG(comm_cart); MPIR_TEST_ARG(periods); if (ndims < 1 || dims == (int *)0) mpi_errno = MPI_ERR_DIMS; if (mpi_errno) return MPIR_ERROR(comm_old_ptr, mpi_errno, myname ); /* Check for Intra-communicator */ MPI_Comm_test_inter ( comm_old, &flag ); if (flag) return MPIR_ERROR(comm_old_ptr, MPIR_ERRCLASS_TO_CODE(MPI_ERR_COMM,MPIR_ERR_COMM_INTER), myname ); #endif /* Determine number of ranks in topology */ for ( i=0; i<ndims; i++ ) num_ranks *= (dims[i]>0)?dims[i]:-dims[i]; if ( num_ranks < 1 ) { (*comm_cart) = MPI_COMM_NULL; return MPIR_ERROR( comm_old_ptr, MPI_ERR_TOPOLOGY, myname ); } /* Is the old communicator big enough? */ MPIR_Comm_size (comm_old_ptr, &size); if (num_ranks > size) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_TOPOLOGY, MPIR_ERR_TOPO_TOO_LARGE, myname, "Topology size is larger than size of communicator", "Topology size %d is greater than communicator size %d", num_ranks, size ); return MPIR_ERROR(comm_old_ptr, mpi_errno, myname ); } /* Make new comm */ range[0][0] = 0; range[0][1] = num_ranks - 1; range[0][2] = 1; MPI_Comm_group ( comm_old, &group_old ); MPI_Group_range_incl ( group_old, 1, range, &group ); MPI_Comm_create ( comm_old, group, comm_cart ); MPI_Group_free( &group ); MPI_Group_free( &group_old ); /* Store topology information in new communicator */ if ( (*comm_cart) != MPI_COMM_NULL ) { MPIR_ALLOC(topo,(MPIR_TOPOLOGY *) MPIR_SBalloc ( MPIR_topo_els ), comm_old_ptr,MPI_ERR_EXHAUSTED,myname); MPIR_SET_COOKIE(&topo->cart,MPIR_CART_TOPOL_COOKIE) topo->cart.type = MPI_CART; topo->cart.nnodes = num_ranks; topo->cart.ndims = ndims; MPIR_ALLOC(topo->cart.dims,(int *)MALLOC( sizeof(int) * 3 * ndims ), comm_old_ptr,MPI_ERR_EXHAUSTED,myname); topo->cart.periods = topo->cart.dims + ndims; topo->cart.position = topo->cart.periods + ndims; for ( i=0; i<ndims; i++ ) { topo->cart.dims[i] = dims[i]; topo->cart.periods[i] = periods[i]; } /* Compute my position */ MPI_Comm_rank ( (*comm_cart), &rank ); for ( i=0; i < ndims; i++ ) { num_ranks = num_ranks / dims[i]; topo->cart.position[i] = rank / num_ranks; rank = rank % num_ranks; } /* cache topology information */ MPI_Attr_put ( (*comm_cart), MPIR_TOPOLOGY_KEYVAL, (void *)topo ); } TR_POP; return (mpi_errno); }
/*@ MPI_Attr_put - Stores attribute value associated with a key Input Parameters: + comm - communicator to which attribute will be attached (handle) . keyval - key value, as returned by 'MPI_KEYVAL_CREATE' (integer) - attribute_val - attribute value Notes: Values of the permanent attributes 'MPI_TAG_UB', 'MPI_HOST', 'MPI_IO', and 'MPI_WTIME_IS_GLOBAL' may not be changed. The type of the attribute value depends on whether C or Fortran is being used. In C, an attribute value is a pointer ('void *'); in Fortran, it is a single integer (`not` a pointer, since Fortran has no pointers and there are systems for which a pointer does not fit in an integer (e.g., any > 32 bit address system that uses 64 bits for Fortran 'DOUBLE PRECISION'). If an attribute is already present, the delete function (specified when the corresponding keyval was created) will be called. .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_KEYVAL .N MPI_ERR_PERM_KEY .seealso MPI_Attr_get, MPI_Keyval_create, MPI_Attr_delete @*/ EXPORT_MPI_API int MPI_Attr_put ( MPI_Comm comm, int keyval, void *attr_value ) { MPIR_HBT_node *attr; MPIR_Attr_key *attr_key; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_ATTR_PUT"; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); attr_key = MPIR_GET_KEYVAL_PTR( keyval ); MPIR_TEST_MPI_KEYVAL(keyval,attr_key,comm_ptr,myname); /* Check for valid arguments */ if ( ( (keyval == MPI_KEYVAL_INVALID) && (mpi_errno = MPI_ERR_OTHER) ) ) return MPIR_ERROR( comm_ptr, mpi_errno, myname); if (comm == MPI_COMM_WORLD && attr_key->permanent) return MPIR_ERROR( comm_ptr, MPIR_ERRCLASS_TO_CODE(MPI_ERR_ARG,MPIR_ERR_PERM_KEY),myname ); MPIR_HBT_lookup(comm_ptr->attr_cache, keyval, &attr); if (attr == (MPIR_HBT_node *)0) { (void) MPIR_HBT_new_node ( attr_key, attr_value, &attr ); (void) MPIR_HBT_insert ( comm_ptr->attr_cache, attr ); /* Every update to the attr_key must be counted! */ MPIR_REF_INCR(attr_key); } else { /* This is an unclear part of the standard. Under MPI_KEYVAL_CREATE, it is claimed that ONLY MPI_COMM_FREE and MPI_ATTR_DELETE can cause the delete routine to be called. Under MPI_ATTR_PUT, however, the delete routine IS called. */ if ( attr_key->delete_fn.c_delete_fn ) { if (attr_key->FortranCalling) { MPI_Aint invall = (MPI_Aint)attr->value; int inval = (int)invall; (void) (*attr_key->delete_fn.f77_delete_fn)(comm, &keyval, &inval, attr_key->extra_state, &mpi_errno ); attr->value = (void *)(MPI_Aint)inval; } else mpi_errno = (*attr_key->delete_fn.c_delete_fn)(comm, keyval, attr->value, attr_key->extra_state ); if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname); } attr->value = attr_value; } /* The device may want to know about attributes */ MPID_ATTR_SET(comm_ptr,keyval,attr_value); TR_POP; return (mpi_errno); }
/*@ MPI_Group_union - Produces a group by combining two groups Input Parameters: + group1 - first group (handle) - group2 - second group (handle) Output Parameter: . newgroup - union group (handle) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_GROUP .N MPI_ERR_EXHAUSTED .seealso: MPI_Group_free @*/ EXPORT_MPI_API int MPI_Group_union ( MPI_Group group1, MPI_Group group2, MPI_Group *group_out ) { int i, j, global_rank; struct MPIR_GROUP *group1_ptr, *group2_ptr, *new_group_ptr; int n; int mpi_errno = MPI_SUCCESS; static char myname[] = "MPI_GROUP_UNION"; TR_PUSH(myname); group1_ptr = MPIR_GET_GROUP_PTR(group1); group2_ptr = MPIR_GET_GROUP_PTR(group2); /* MPIR_TEST_MPI_GROUP(group1,group1_ptr,MPIR_COMM_WORLD,myname); MPIR_TEST_MPI_GROUP(grou2p,group2_ptr,MPIR_COMM_WORLD,myname); */ #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_GROUP(group1_ptr); MPIR_TEST_GROUP(group2_ptr); if (mpi_errno) return MPIR_ERROR(MPIR_COMM_WORLD, mpi_errno, myname ); #endif /* Check for EMPTY groups */ if ( (group1 == MPI_GROUP_EMPTY) && (group2 == MPI_GROUP_EMPTY) ) { MPIR_Group_dup ( MPIR_GROUP_EMPTY, &new_group_ptr ); TR_POP; *group_out = new_group_ptr->self; return (mpi_errno); } if ( group1 == MPI_GROUP_EMPTY ) { MPIR_Group_dup ( group2_ptr, &new_group_ptr ); *group_out = new_group_ptr->self; TR_POP; return (mpi_errno); } if ( group2 == MPI_GROUP_EMPTY ) { MPIR_Group_dup ( group1_ptr, &new_group_ptr ); *group_out = new_group_ptr->self; TR_POP; return (mpi_errno); } /* Create the new group */ MPIR_ALLOC(new_group_ptr,NEW(struct MPIR_GROUP),MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_GROUP_UNION" ); *group_out = (MPI_Group) MPIR_FromPointer( new_group_ptr ); new_group_ptr->self = *group_out; MPIR_SET_COOKIE(new_group_ptr,MPIR_GROUP_COOKIE) new_group_ptr->ref_count = 1; new_group_ptr->permanent = 0; new_group_ptr->local_rank = group1_ptr->local_rank; new_group_ptr->set_mark = (int *)0; /* Set the number in the union */ n = group1_ptr->np + group2_ptr->np; /* Allocate set marking space for group2 if necessary */ if (group2_ptr->set_mark == NULL) { MPIR_ALLOC(group2_ptr->set_mark,(int *) MALLOC( group2_ptr->np * sizeof(int) ), MPIR_COMM_WORLD,MPI_ERR_EXHAUSTED,"MPI_GROUP_UNION"); } /* Mark the union */ for ( j=0; j<group2_ptr->np; j++ ) { group2_ptr->set_mark[j] = MPIR_MARKED; for ( i=0; i<group1_ptr->np; i++ ) if ( group1_ptr->lrank_to_grank[i] == group2_ptr->lrank_to_grank[j] ) { group2_ptr->set_mark[j] = MPIR_UNMARKED; n--; break; } } /* Alloc the memory */ new_group_ptr->np = n; MPIR_ALLOC(new_group_ptr->lrank_to_grank,(int *) MALLOC( n * sizeof(int) ), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_GROUP_UNION" ); /* Fill in the space */ n = group1_ptr->np; memcpy(new_group_ptr->lrank_to_grank,group1_ptr->lrank_to_grank,n*sizeof(int)); for ( j=0; j<group2_ptr->np; j++ ) if ( (group2_ptr->set_mark[j]==MPIR_MARKED) && (n < new_group_ptr->np) ) new_group_ptr->lrank_to_grank[n++] = group2_ptr->lrank_to_grank[j]; /* Find the local rank only if local rank not defined in group 1 */ if ( new_group_ptr->local_rank == MPI_UNDEFINED ) { global_rank = MPID_MyWorldRank; for( i=group1_ptr->np; i<new_group_ptr->np; i++ ) if ( global_rank == new_group_ptr->lrank_to_grank[i] ) { new_group_ptr->local_rank = i; break; } } /* Determine the previous and next powers of 2 */ MPIR_Powers_of_2 ( new_group_ptr->np, &(new_group_ptr->N2_next), &(new_group_ptr->N2_prev) ); TR_POP; return (mpi_errno); }
/*@ MPI_Testall - Tests for the completion of all previously initiated communications Input Parameters: + count - lists length (integer) - array_of_requests - array of requests (array of handles) Output Parameters: + flag - (logical) - array_of_statuses - array of status objects (array of Status) Notes: 'flag' is true only if all requests have completed. Otherwise, flag is false and neither the 'array_of_requests' nor the 'array_of_statuses' is modified. .N waitstatus .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_IN_STATUS @*/ EXPORT_MPI_API int MPI_Testall( int count, MPI_Request array_of_requests[], int *flag, MPI_Status array_of_statuses[] ) { int i, mpi_errno = MPI_SUCCESS; MPI_Request request; int nready; static char myname[] = "MPI_TESTALL"; TR_PUSH(myname); MPID_DeviceCheck( MPID_NOTBLOCKING ); /* It is a good thing that the receive requests contain the status object! We need this to save the status information in the case where not all of the requests have completed. Note that this routine forces some changes on the ADI test routines. It must be possible to test a completed request multiple times; once the "is_complete" field is set, the data must be saved until the request is explicitly freed. That is, unlike the MPI tests, the ADI tests must be nondestructive. */ nready = 0; for (i = 0; i < count; i++ ) { request = array_of_requests[i]; if (!request) { nready ++; continue; } switch (request->handle_type) { case MPIR_SEND: if (MPID_SendRequestCancelled(request)) { array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; nready++; } else { if (!request->shandle.is_complete) { if (MPID_SendIcomplete( request, &mpi_errno )) nready++; } else nready++; } break; case MPIR_RECV: if (request->rhandle.s.MPI_TAG == MPIR_MSG_CANCELLED) { array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; nready++; } else { if (!request->rhandle.is_complete) { if (MPID_RecvIcomplete( request, (MPI_Status *)0, &mpi_errno )) nready++; } else nready++; } break; case MPIR_PERSISTENT_SEND: if (request->persistent_shandle.active && !request->persistent_shandle.shandle.is_complete) { if (MPID_SendIcomplete( request, &mpi_errno )) nready++; } else nready++; break; case MPIR_PERSISTENT_RECV: if (request->persistent_rhandle.active && !request->persistent_rhandle.rhandle.is_complete) { if (MPID_RecvIcomplete( request, (MPI_Status *)0, &mpi_errno )) nready++; } else nready++; break; } if (mpi_errno) { MPIR_Set_Status_error_array( array_of_requests, count, i, mpi_errno, array_of_statuses ); mpi_errno = MPI_ERR_IN_STATUS; TR_POP; MPIR_RETURN(MPIR_COMM_WORLD, mpi_errno, myname ); } } *flag = (nready == count); /* Because a request may have completed with an error (such as MPI_ERR_TRUNCATE), we need to check here as well */ if (nready == count) { for (i=0; i<count; i++) { request = array_of_requests[i]; if (!request) { /* See MPI Standard, 3.7 */ array_of_statuses[i].MPI_TAG = MPI_ANY_TAG; array_of_statuses[i].MPI_SOURCE = MPI_ANY_SOURCE; array_of_statuses[i].MPI_ERROR = MPI_SUCCESS; array_of_statuses[i].count = 0; continue; } switch (request->handle_type) { case MPIR_SEND: if (array_of_statuses[i].MPI_TAG != MPIR_MSG_CANCELLED) { MPIR_FORGET_SEND( &request->shandle ); MPID_Send_free( array_of_requests[i] ); array_of_requests[i] = 0; } break; case MPIR_RECV: if (array_of_statuses[i].MPI_TAG != MPIR_MSG_CANCELLED) { if (request->rhandle.s.MPI_ERROR) mpi_errno = request->rhandle.s.MPI_ERROR; /* if (request->rhandle.s.MPI_ERROR && mpi_errno == MPI_SUCCESS) { for (j=0; j<count; j++) { if (!array_of_requests[i] || array_of_requests[i].is_complete) array_of_statuses[j].MPI_ERROR = MPI_SUCCESS; else array_of_statuses[j].MPI_ERROR = MPI_ERR_PENDING; } mpi_errno = MPI_ERR_IN_STATUS; } */ array_of_statuses[i] = request->rhandle.s; MPID_Recv_free( array_of_requests[i] ); array_of_requests[i] = 0; } break; case MPIR_PERSISTENT_SEND: if (request->persistent_shandle.active) { /* array_of_statuses[i] = request->persistent_shandle.shandle.s; */ array_of_statuses[i].MPI_ERROR = MPID_SendRequestErrval(&request->persistent_shandle.shandle); request->persistent_shandle.active = 0; } else { /* See MPI Standard, 3.7 */ /* Thanks to [email protected] for this fix */ if (MPID_SendRequestCancelled(&request->persistent_shandle)) array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; else array_of_statuses[i].MPI_TAG = MPI_ANY_TAG; array_of_statuses[i].MPI_SOURCE = MPI_ANY_SOURCE; array_of_statuses[i].MPI_ERROR = MPI_SUCCESS; array_of_statuses[i].count = 0; } break; case MPIR_PERSISTENT_RECV: if (request->persistent_rhandle.active) { array_of_statuses[i] = request->persistent_rhandle.rhandle.s; mpi_errno = request->persistent_rhandle.rhandle.s.MPI_ERROR; request->persistent_rhandle.active = 0; } else { /* See MPI Standard, 3.7 */ /* Thanks to [email protected] for this fix */ if (request->persistent_rhandle.rhandle.s.MPI_TAG == MPIR_MSG_CANCELLED) array_of_statuses[i].MPI_TAG = MPIR_MSG_CANCELLED; else array_of_statuses[i].MPI_TAG = MPI_ANY_TAG; array_of_statuses[i].MPI_SOURCE = MPI_ANY_SOURCE; array_of_statuses[i].MPI_ERROR = MPI_SUCCESS; array_of_statuses[i].count = 0; } break; } if (mpi_errno) { MPIR_Set_Status_error_array( array_of_requests, count, i, mpi_errno, array_of_statuses ); mpi_errno = MPI_ERR_IN_STATUS; TR_POP; MPIR_RETURN(MPIR_COMM_WORLD, mpi_errno, myname ); } } } TR_POP; MPIR_RETURN(MPIR_COMM_WORLD, mpi_errno, myname ); }
/*@ MPI_Bsend - Basic send with user-specified buffering Input Parameters: + buf - initial address of send buffer (choice) . count - number of elements in send buffer (nonnegative integer) . datatype - datatype of each send buffer element (handle) . dest - rank of destination (integer) . tag - message tag (integer) - comm - communicator (handle) Notes: This send is provided as a convenience function; it allows the user to send messages without worring about where they are buffered (because the user `must` have provided buffer space with 'MPI_Buffer_attach'). In deciding how much buffer space to allocate, remember that the buffer space is not available for reuse by subsequent 'MPI_Bsend's unless you are certain that the message has been received (not just that it should have been received). For example, this code does not allocate enough buffer space .vb MPI_Buffer_attach( b, n*sizeof(double) + MPI_BSEND_OVERHEAD ); for (i=0; i<m; i++) { MPI_Bsend( buf, n, MPI_DOUBLE, ... ); } .ve because only enough buffer space is provided for a single send, and the loop may start a second 'MPI_Bsend' before the first is done making use of the buffer. In C, you can force the messages to be delivered by .vb MPI_Buffer_detach( &b, &n ); MPI_Buffer_attach( b, n ); .ve (The 'MPI_Buffer_detach' will not complete until all buffered messages are delivered.) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_COMM .N MPI_ERR_COUNT .N MPI_ERR_TYPE .N MPI_ERR_RANK .N MPI_ERR_TAG .seealso: MPI_Buffer_attach, MPI_Ibsend, MPI_Bsend_init @*/ int MPI_Bsend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm ) { MPI_Request handle; MPI_Status status; int mpi_errno = MPI_SUCCESS; struct MPIR_COMMUNICATOR *comm_ptr; MPIR_ERROR_DECL; static char myname[] = "MPI_BSEND"; disableSignal(); TR_PUSH(myname); if (dest != MPI_PROC_NULL) { /* We should let Ibsend find the errors, but we will soon add a special case for faster Bsend and we'll need these tests then */ comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_COUNT(count); MPIR_TEST_SEND_TAG(tag); MPIR_TEST_SEND_RANK(comm_ptr,dest); if (mpi_errno) { revertSignal(); return MPIR_ERROR(comm_ptr, mpi_errno, myname ); } #endif /* ? BsendDatatype? MPID_BsendContig( comm, buf, len, src_lrank, tag, context_id, dest_grank, msgrep, &mpi_errno ); if (!mpi_errno) return MPI_SUCCESS; if (mpi_errno != MPIR_ERR_MAY_BLOCK) return MPIR_ERROR( comm, mpi_errno, myname ); */ MPIR_ERROR_PUSH(comm_ptr); /* We don't use MPIR_CALL_POP so that we can free the handle */ handle = MPI_REQUEST_NULL; if ((mpi_errno = MPI_Ibsend( buf, count, datatype, dest, tag, comm, &handle ))) { MPIR_ERROR_POP(comm_ptr); if (handle != MPI_REQUEST_NULL) MPID_SendFree( handle ); revertSignal(); return MPIR_ERROR(comm_ptr,mpi_errno,myname); } /* This Wait only completes the transfer of data into the buffer area. The test/wait in util/bsendutil.c completes the actual transfer */ MPIR_CALL_POP(MPI_Wait( &handle, &status ),comm_ptr,myname); MPIR_ERROR_POP(comm_ptr); } TR_POP; revertSignal(); return mpi_errno; }