Exemple #1
0
/*@
    MPI_Iprobe - Nonblocking test for a message

Input Parameters:
+ source - source rank, or  'MPI_ANY_SOURCE' (integer) 
. tag - tag value or  'MPI_ANY_TAG' (integer) 
- comm - communicator (handle) 

Output Parameter:
+ flag - (logical) 
- status - status object (Status) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_TAG
.N MPI_ERR_RANK

@*/
EXPORT_MPI_API int MPI_Iprobe( int source, int tag, MPI_Comm comm, int *flag, 
		MPI_Status *status )
{
    MPI_Status __status;
    int mpi_errno = MPI_SUCCESS;
    struct MPIR_COMMUNICATOR *comm_ptr;
    static char myname[] = "MPI_IPROBE";

    TR_PUSH(myname);

    if(status == MPI_STATUS_IGNORE) status = &__status;

    comm_ptr = MPIR_GET_COMM_PTR(comm);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
    MPIR_TEST_RECV_TAG(tag);
    MPIR_TEST_RECV_RANK(comm_ptr,source);
    if (mpi_errno)
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

    if (source == MPI_PROC_NULL) {
	status->MPI_SOURCE = MPI_PROC_NULL;
	status->MPI_TAG	   = MPI_ANY_TAG;
	MPID_ZERO_STATUS_COUNT(status);
	return MPI_SUCCESS;
	}
    MPID_Iprobe( comm_ptr, tag, comm_ptr->recv_context, source, flag, 
		 &mpi_errno, status );
    TR_POP;
    MPIR_RETURN( comm_ptr, mpi_errno, myname );
}
Exemple #2
0
/*@C

MPI_Attr_get - Retrieves attribute value by key

Input Parameters:
+ comm - communicator to which attribute is attached (handle) 
- keyval - key value (integer) 

Output Parameters:
+ attr_value - attribute value, unless 'flag' = false 
- flag -  true if an attribute value was extracted;  false if no attribute is
  associated with the key 

Notes:
    Attributes must be extracted from the same language as they were inserted  
    in with 'MPI_ATTR_PUT'.  The notes for C and Fortran below explain why.

Notes for C:
    Even though the 'attr_value' arguement is declared as 'void *', it is
    really the address of a void pointer.  See the rationale in the 
    standard for more details. 

.N fortran

    The 'attr_value' in Fortran is a pointer to a Fortran integer, not
    a pointer to a 'void *'.  

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_KEYVAL
@*/
int MPI_Attr_get ( 
	MPI_Comm comm, 
	int keyval, 
	void *attr_value, 
	int *flag )
{
  MPIR_HBT_node *attr;
  int mpi_errno = MPI_SUCCESS;
  struct MPIR_COMMUNICATOR *comm_ptr;
  static char myname[] = "MPI_ATTR_GET";

  TR_PUSH(myname);
  comm_ptr = MPIR_GET_COMM_PTR(comm);
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

  if ( ( (keyval == MPI_KEYVAL_INVALID) && (mpi_errno = MPI_ERR_OTHER) ) )
	return MPIR_ERROR(comm_ptr, mpi_errno, myname);
		  
  MPIR_HBT_lookup(comm_ptr->attr_cache, keyval, &attr);
  if ( attr == (MPIR_HBT_node *)0 ) {
	(*flag) = 0;
	(*(void **)attr_value) = (void *)0; 
  }
  else {
	(*flag) = 1;
	/* Device may want to update attribute */
	MPID_ATTR_GET(comm_ptr,keyval,&attr->value);
	(*(void **)attr_value) = attr->value;
  }
  TR_POP;
  return(mpi_errno);
}
Exemple #3
0
/*@
    MPI_Send - Performs a basic send

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements in send buffer (nonnegative integer) 
. datatype - datatype of each send buffer element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 

Notes:
This routine may block until the message is received.

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_TAG
.N MPI_ERR_RANK

.seealso: MPI_Isend, MPI_Bsend
@*/
EXPORT_MPI_API int MPI_Send( void *buf, int count, MPI_Datatype datatype, int dest, 
	      int tag, MPI_Comm comm )
{
    int          mpi_errno = MPI_SUCCESS;
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE *dtype_ptr;
    static char myname[] = "MPI_SEND";

    if (dest == MPI_PROC_NULL) 
	return MPI_SUCCESS;

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_COUNT(count);
    MPIR_TEST_SEND_TAG(tag);
    MPIR_TEST_SEND_RANK(comm_ptr,dest);
    if (mpi_errno)
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

    /* This COULD test for the contiguous homogeneous case first .... */
    MPID_SendDatatype( comm_ptr, buf, count, dtype_ptr, comm_ptr->local_rank, tag, 
		       comm_ptr->send_context, comm_ptr->lrank_to_grank[dest], &mpi_errno );
    MPIR_RETURN(comm_ptr, mpi_errno, myname );
}
Exemple #4
0
/*@

MPI_Reduce_scatter - Combines values and scatters the results

Input Parameters:
+ sendbuf - starting address of send buffer (choice) 
. recvcounts - integer array specifying the 
number of elements in result distributed to each process.
Array must be identical on all calling processes. 
. datatype - data type of elements of input buffer (handle) 
. op - operation (handle) 
- comm - communicator (handle) 

Output Parameter:
. recvbuf - starting address of receive buffer (choice) 

.N fortran

.N collops

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_BUFFER
.N MPI_ERR_OP
.N MPI_ERR_BUFFER_ALIAS
@*/
EXPORT_MPI_API int MPI_Reduce_scatter ( void *sendbuf, void *recvbuf, int *recvcnts, 
			 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm )
{
  int   mpi_errno = MPI_SUCCESS;
  struct MPIR_COMMUNICATOR *comm_ptr;
  struct MPIR_DATATYPE     *dtype_ptr;
  static char myname[] = "MPI_REDUCE_SCATTER";
  MPIR_ERROR_DECL;

  TR_PUSH(myname);

  comm_ptr = MPIR_GET_COMM_PTR(comm);

  dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);

  /* Check for invalid arguments */
#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
  MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);
  MPIR_TEST_ALIAS(recvbuf,sendbuf);
  if (mpi_errno)
      return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

  MPIR_ERROR_PUSH(comm_ptr);
  mpi_errno = comm_ptr->collops->Reduce_scatter(sendbuf, recvbuf, recvcnts, 
						dtype_ptr, op, comm_ptr );
  MPIR_ERROR_POP(comm_ptr);
  TR_POP;
  MPIR_RETURN(comm_ptr,mpi_errno,myname);
}
/*@

MPI_Attr_delete - Deletes attribute value associated with a key

Input Parameters:
+ comm - communicator to which attribute is attached (handle) 
- keyval - The key value of the deleted attribute (integer) 

.N fortran

.N Errors
.N MPI_ERR_COMM
.N MPI_ERR_PERM_KEY
@*/
EXPORT_MPI_API int MPI_Attr_delete ( MPI_Comm comm, int keyval )
{
  MPIR_HBT_node *attr;
  MPIR_Attr_key *attr_key;
  int            mpi_errno    = MPI_SUCCESS;
  struct MPIR_COMMUNICATOR *comm_ptr;
  static char myname[] = "MPI_ATTR_DELETE";

  TR_PUSH(myname);

  comm_ptr = MPIR_GET_COMM_PTR(comm);
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
  
  if ( ( (keyval == MPI_KEYVAL_INVALID) && (mpi_errno = MPI_ERR_OTHER) ) )
	return MPIR_ERROR(comm_ptr, mpi_errno, myname);

  attr_key = MPIR_GET_KEYVAL_PTR( keyval );
  MPIR_TEST_MPI_KEYVAL(keyval,attr_key,comm_ptr,myname);

  if (comm == MPI_COMM_WORLD && attr_key->permanent) 
	return MPIR_ERROR( comm_ptr, 
	     MPIR_ERRCLASS_TO_CODE(MPI_ERR_ARG,MPIR_ERR_PERM_KEY),myname );

  MPIR_HBT_lookup(comm_ptr->attr_cache, keyval, &attr);
  if (attr != (MPIR_HBT_node *)0) {
	if ( attr_key->delete_fn.c_delete_fn ) {
	    if (attr_key->FortranCalling) {
		MPI_Aint  invall = (MPI_Aint)attr->value;
		int inval = (int)invall;
		(*attr_key->delete_fn.f77_delete_fn)(comm, 
					   &keyval, &inval,
					   attr_key->extra_state, &mpi_errno );
		attr->value = (void *)(MPI_Aint)inval;
	    }
	    else
		mpi_errno = (*attr_key->delete_fn.c_delete_fn)(comm, 
					    keyval, attr->value,
					    attr_key->extra_state );
	    if (mpi_errno) 
		return MPIR_ERROR( comm_ptr, mpi_errno, myname );
	    }
	MPIR_HBT_delete(comm_ptr->attr_cache, keyval, &attr);
	/* We will now have one less reference to keyval */
	MPIR_REF_DECR(attr_key);
	if ( attr != (MPIR_HBT_node *)0 ) 
	  (void) MPIR_HBT_free_node ( attr );
	}
  else {
      mpi_errno = MPIR_Err_setmsg( MPI_ERR_ARG, MPIR_ERR_NOKEY, myname,
				   "Key not in communicator", 
				   "Key %d not in communicator", keyval );
      return MPIR_ERROR( comm_ptr, mpi_errno, myname );
      /* "Error in MPI_ATTR_DELETE: key not in communicator" ); */
  }

  TR_POP;
  return(mpi_errno);
}
Exemple #6
0
/*@
    MPI_Sendrecv - Sends and receives a message

Input Parameters:
+ sendbuf - initial address of send buffer (choice) 
. sendcount - number of elements in send buffer (integer) 
. sendtype - type of elements in send buffer (handle) 
. dest - rank of destination (integer) 
. sendtag - send tag (integer) 
. recvcount - number of elements in receive buffer (integer) 
. recvtype - type of elements in receive buffer (handle) 
. source - rank of source (integer) 
. recvtag - receive tag (integer) 
- comm - communicator (handle) 

Output Parameters:
+ recvbuf - initial address of receive buffer (choice) 
- status - status object (Status).  This refers to the receive operation.
  
.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_TAG
.N MPI_ERR_RANK

@*/
EXPORT_MPI_API int MPI_Sendrecv( void *sendbuf, int sendcount, MPI_Datatype sendtype, 
		  int dest, int sendtag, 
                  void *recvbuf, int recvcount, MPI_Datatype recvtype, 
		  int source, int recvtag, MPI_Comm comm, MPI_Status *status )
{
    MPI_Status __status;
    int               mpi_errno = MPI_SUCCESS;
    MPI_Status        status_array[2];
    MPI_Request       req[2];
    MPIR_ERROR_DECL;
    struct MPIR_COMMUNICATOR *comm_ptr;
    static char myname[] = "MPI_SENDRECV";

    if(status == MPI_STATUS_IGNORE) status = &__status;

    /* Let the Isend/Irecv check arguments */
    /* Comments on this:
       We can probably do an Irecv/Send/Wait on Irecv (blocking send)
       but what we really like to do is "send if odd, recv if even, 
       followed by send if even, recv if odd".  We can't do that, 
       because we don't require that these match up in any particular
       way (that is, there is no way to assert the "parity" of the 
       partners).  Note that the IBM "mp_bsendrecv" DOES require that
       only mp_bsendrecv be used.  

       Should there be a send/recv bit in the send mode? 

       Note that in this implementation, if the error handler is "return",
       these will return the error to the caller.  If the handler causes
       an abort or message, then that will occur in the called routine.
       Thus, this code need not call the error handler AGAIN.
     */

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

    MPIR_ERROR_PUSH(comm_ptr);
    MPIR_CALL_POP(MPI_Irecv ( recvbuf, recvcount, recvtype,
		    source, recvtag, comm, &req[1] ),comm_ptr,myname);
    MPIR_CALL_POP(MPI_Isend ( sendbuf, sendcount, sendtype, dest,   
			    sendtag, comm, &req[0] ),comm_ptr,myname);
    /* FPRINTF( stderr, "[%d] Starting waitall\n", MPIR_tid );*/
    mpi_errno = MPI_Waitall( 2, req, status_array );
    /* We don't use MPIR_CALL_POP because we want to convert
       error in status to the direct error */
    /* MPIR_CALL_POP(MPI_Waitall ( 2, req, status_array ),comm_ptr,myname); */
    MPIR_ERROR_POP(comm_ptr);
    /*FPRINTF( stderr, "[%d] Ending waitall\n", MPIR_tid );*/

    if (mpi_errno == MPI_ERR_IN_STATUS) {
	if (status_array[0].MPI_ERROR) mpi_errno = status_array[0].MPI_ERROR;
	if (status_array[1].MPI_ERROR) mpi_errno = status_array[1].MPI_ERROR;
    }
    (*status) = status_array[1];
    MPIR_RETURN(comm_ptr,mpi_errno,myname);
}
Exemple #7
0
/*@
    MPI_Issend - Starts a nonblocking synchronous send

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements in send buffer (integer) 
. datatype - datatype of each send buffer element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 

Output Parameter:
. request - communication request (handle) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_TAG
.N MPI_ERR_RANK
.N MPI_ERR_EXHAUSTED
@*/
int MPI_Issend( void *buf, int count, MPI_Datatype datatype, int dest, 
		int tag, MPI_Comm comm, MPI_Request *request )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    MPIR_SHANDLE             *shandle;
    static char myname[] = "MPI_ISSEND";
    int mpi_errno = MPI_SUCCESS;

    disableSignal();

    TR_PUSH(myname);

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_COUNT(count);
    MPIR_TEST_SEND_TAG(tag);
    MPIR_TEST_SEND_RANK(comm_ptr,dest);
    if (mpi_errno) {
        revertSignal();
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
    }
#endif

    MPIR_ALLOCFN(shandle,MPID_SendAlloc,
	       comm_ptr,MPI_ERR_EXHAUSTED,myname );
    *request = (MPI_Request)shandle;
    MPID_Request_init( shandle, MPIR_SEND );

    MPIR_REMEMBER_SEND( shandle, buf, count, datatype, dest, tag, comm_ptr);

    if (dest == MPI_PROC_NULL) {
	shandle->is_complete = 1;
        revertSignal();
	return MPI_SUCCESS;
    }
    /* This COULD test for the contiguous homogeneous case first .... */
    MPID_IssendDatatype( comm_ptr, buf, count, dtype_ptr, 
			 comm_ptr->local_rank, tag, 
			 comm_ptr->send_context, 
			 comm_ptr->lrank_to_grank[dest], 
			 *request, &mpi_errno );
    if (mpi_errno) {
        revertSignal();
        return MPIR_ERROR( comm_ptr, mpi_errno, myname );
    }
    TR_POP;
    revertSignal();
    return MPI_SUCCESS;
}
/*@
    MPI_Ssend_init - Builds a handle for a synchronous send

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements sent (integer) 
. datatype - type of each element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 

Output Parameter:
. request - communication request (handle) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_TAG
.N MPI_ERR_RANK
@*/
int MPI_Ssend_init( void *buf, int count, MPI_Datatype datatype, int dest, 
		    int tag, MPI_Comm comm, MPI_Request *request )
{
    int         mpi_errno = MPI_SUCCESS;
    struct MPIR_DATATYPE *dtype_ptr;
    struct MPIR_COMMUNICATOR *comm_ptr;
    static char myname[] = "MPI_SSEND_INIT";
    MPIR_PSHANDLE *shandle;

    disableSignal();

    TR_PUSH(myname);

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_COUNT(count);
    MPIR_TEST_SEND_TAG(tag);
    MPIR_TEST_SEND_RANK(comm_ptr,dest);
    if (mpi_errno) {
        revertSignal();
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
    }
#endif

    /* This is IDENTICAL to the create_send code except for the send
       function */
    MPIR_ALLOCFN(shandle,MPID_PSendAlloc,
	       comm_ptr,MPI_ERR_EXHAUSTED,myname );
    *request = (MPI_Request)shandle;
    MPID_Request_init( &(shandle->shandle), MPIR_PERSISTENT_SEND );
    /* Save the information about the operation, being careful with
       ref-counted items */
    dtype_ptr   = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);
    MPIR_REF_INCR(dtype_ptr);
    shandle->perm_datatype = dtype_ptr;
    shandle->perm_tag	   = tag;
    shandle->perm_dest	   = dest;
    shandle->perm_count	   = count;
    shandle->perm_buf	   = buf;
    MPIR_REF_INCR(comm_ptr);
    shandle->perm_comm	   = comm_ptr;
    shandle->active	   = 0;
    shandle->send          = MPID_IssendDatatype;
    /* dest of MPI_PROC_NULL handled in start */

    TR_POP;
    revertSignal();
    return MPI_SUCCESS;
}
Exemple #9
0
/*@
    MPI_Irsend - Starts a nonblocking ready send

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements in send buffer (integer) 
. datatype - datatype of each send buffer element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 
Output Parameter:
. request - communication request (handle) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_TAG
.N MPI_ERR_RANK
.N MPI_ERR_EXHAUSTED

@*/
EXPORT_MPI_API int MPI_Irsend( void *buf, int count, MPI_Datatype datatype, int dest, 
		int tag, MPI_Comm comm, MPI_Request *request )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    MPIR_SHANDLE             *shandle;
    static char myname[] = "MPI_IRSEND";
    int mpi_errno = MPI_SUCCESS;

    TR_PUSH(myname);

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_COUNT(count);
    MPIR_TEST_SEND_TAG(tag);
    MPIR_TEST_SEND_RANK(comm_ptr,dest);
    if (mpi_errno)
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

    MPIR_ALLOCFN(shandle,MPID_Send_alloc,
	       comm_ptr,MPI_ERR_EXHAUSTED,myname );
    *request = (MPI_Request)shandle;
    MPID_Request_init( (MPI_Request)shandle, MPIR_SEND );

    /* we need the rank of dest in MPI_COMM_ALL in MPID_Gateway_SendCancelPacket(),
       so we save it here */
    shandle->partner_grank = comm_ptr->lrank_to_grank[dest];

    MPIR_REMEMBER_SEND(shandle, buf, count, datatype, dest, tag, comm_ptr);

    if (dest == MPI_PROC_NULL) {
	shandle->is_complete = 1;
	return MPI_SUCCESS;
    }
    /* This COULD test for the contiguous homogeneous case first .... */
    MPID_IrsendDatatype( comm_ptr, buf, count, dtype_ptr, 
			 comm_ptr->local_rank, tag, 
			 comm_ptr->send_context, 
			 comm_ptr->lrank_to_grank[dest], 
			 *request, &mpi_errno, 1 );
    if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname );
    TR_POP;
    return MPI_SUCCESS;
}
Exemple #10
0
/*@
    MPI_Recv_init - Builds a handle for a receive

Input Parameters:
+ buf - initial address of receive buffer (choice) 
. count - number of elements received (integer) 
. datatype - type of each element (handle) 
. source - rank of source or 'MPI_ANY_SOURCE' (integer) 
. tag - message tag or 'MPI_ANY_TAG' (integer) 
- comm - communicator (handle) 

Output Parameter:
. request - communication request (handle) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_RANK
.N MPI_ERR_TAG
.N MPI_ERR_COMM
.N MPI_ERR_EXHAUSTED

.seealso: MPI_Start, MPI_Request_free
@*/
int MPI_Recv_init( void *buf, int count, MPI_Datatype datatype, int source, 
		   int tag, MPI_Comm comm, MPI_Request *request )
{
    int         mpi_errno = MPI_SUCCESS;
    struct MPIR_DATATYPE *dtype_ptr;
    struct MPIR_COMMUNICATOR *comm_ptr;
    static char myname[] = "MPI_RECV_INIT";
    MPIR_PRHANDLE *rhandle;

    disableSignal();

    TR_PUSH(myname);

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
    
    dtype_ptr   = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
	MPIR_TEST_COUNT(count);
	MPIR_TEST_RECV_TAG(tag);
	MPIR_TEST_RECV_RANK(comm_ptr,source);
	if (mpi_errno) {
            revertSignal();
	    return MPIR_ERROR(comm_ptr, mpi_errno, myname );
        }
#endif

    MPIR_ALLOCFN(rhandle,MPID_PRecvAlloc,
	       comm_ptr,MPI_ERR_EXHAUSTED,myname );
    *request = (MPI_Request)rhandle;
    MPID_Request_init( &(rhandle->rhandle), MPIR_PERSISTENT_RECV );
    /* Save the information about the operation, being careful with
       ref-counted items */
    MPIR_REF_INCR(dtype_ptr);
    rhandle->perm_datatype = dtype_ptr;
    rhandle->perm_tag	   = tag;
    rhandle->perm_source   = source;
    rhandle->perm_count	   = count;
    rhandle->perm_buf	   = buf;
    MPIR_REF_INCR(comm_ptr);
    rhandle->perm_comm	   = comm_ptr;
    rhandle->active	   = 0;
    /* dest of MPI_PROC_NULL handled in start */

    TR_POP;
    revertSignal();
    return MPI_SUCCESS;
}
Exemple #11
0
/*@

MPI_Graph_get - Retrieves graph topology information associated with a 
                communicator

Input Parameters:
+ comm - communicator with graph structure (handle) 
. maxindex - length of vector 'index' in the calling program  (integer) 
- maxedges - length of vector 'edges' in the calling program  (integer) 

Output Parameter:
+ index - array of integers containing the graph structure (for details see the definition of 'MPI_GRAPH_CREATE') 
- edges - array of integers containing the graph structure 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_COMM
.N MPI_ERR_ARG
@*/
int MPI_Graph_get ( MPI_Comm comm, int maxindex, int maxedges, 
		    int *index, int *edges )
{
  int i, num, flag;
  int *array;
  int mpi_errno = MPI_SUCCESS;
  MPIR_TOPOLOGY *topo;
  struct MPIR_COMMUNICATOR *comm_ptr;
  static char myname[] = "MPI_GRAPH_GET";
  MPIR_ERROR_DECL;

  TR_PUSH(myname);
  comm_ptr = MPIR_GET_COMM_PTR(comm);

#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
  MPIR_TEST_ARG(index);
  MPIR_TEST_ARG(edges);
  if (mpi_errno)
      return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

  /* Get topology information from the communicator */
  MPIR_ERROR_PUSH(comm_ptr);
  mpi_errno = MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag );
  MPIR_ERROR_POP(comm_ptr);
  if ( ( (flag != 1)               && (mpi_errno = MPI_ERR_TOPOLOGY) ) ||
       ( (topo->type != MPI_GRAPH) && (mpi_errno = MPI_ERR_TOPOLOGY) )  )
      return MPIR_ERROR( comm_ptr, mpi_errno, myname );

  /* Get index */
  num = topo->graph.nnodes;
  array = topo->graph.index;
  if ( index != (int *)0 )
    for ( i=0; (i<maxindex) && (i<num); i++ )
      (*index++) = (*array++);

  /* Get edges */
  num = topo->graph.nedges;
  array = topo->graph.edges;
  if ( edges != (int *)0 )
    for ( i=0; (i<maxedges) && (i<num); i++ )
      (*edges++) = (*array++);

  TR_POP;
  return (mpi_errno);
}
Exemple #12
0
/*@
    MPI_Recv - Basic receive

Output Parameters:
+ buf - initial address of receive buffer (choice)
- status - status object (Status)

Input Parameters:
+ count - maximum number of elements in receive buffer (integer)
. datatype - datatype of each receive buffer element (handle)
. source - rank of source (integer)
. tag - message tag (integer)
- comm - communicator (handle)

Notes:
The 'count' argument indicates the maximum length of a message; the actual
number can be determined with 'MPI_Get_count'.

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_TYPE
.N MPI_ERR_COUNT
.N MPI_ERR_TAG
.N MPI_ERR_RANK

@*/
int MPI_Recv( void *buf, int count, MPI_Datatype datatype, int source,
              int tag, MPI_Comm comm, MPI_Status *status )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    static char myname[] = "MPI_RECV";
    int         mpi_errno = MPI_SUCCESS;
    /*
       Because this is a very common routine, we show how it can be
       optimized to be run "inline"; In addition, this lets us exploit
       features in the ADI to simplify the execution of blocking receive
       calls.
     */
    if (source != MPI_PROC_NULL)
    {
        disableSignal();
        comm_ptr = MPIR_GET_COMM_PTR(comm);
        MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

        dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);
        MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
        MPIR_TEST_COUNT(count);
        MPIR_TEST_RECV_TAG(tag);
        MPIR_TEST_RECV_RANK(comm_ptr,source);
        if (mpi_errno) {
            revertSignal();
            return MPIR_ERROR(comm_ptr, mpi_errno, myname );
        }
#endif

        MPID_RecvDatatype( comm_ptr, buf, count, dtype_ptr, source, tag,
                           comm_ptr->recv_context, status, &mpi_errno );
        revertSignal();
        MPIR_RETURN(comm_ptr, mpi_errno, myname );
    }
    else {
        if (status != MPI_STATUS_IGNORE) {
            /* See MPI standard section 3.11 */
            MPID_ZERO_STATUS_COUNT(status);
            status->MPI_SOURCE     = MPI_PROC_NULL;
            status->MPI_TAG        = MPI_ANY_TAG;
        }
    }
    return MPI_SUCCESS;
}
Exemple #13
0
/*@

MPI_Allreduce - Combines values from all processes and distribute the result
                back to all processes

Input Parameters:
+ sendbuf - starting address of send buffer (choice) 
. count - number of elements in send buffer (integer) 
. datatype - data type of elements of send buffer (handle) 
. op - operation (handle) 
- comm - communicator (handle) 

Output Parameter:
. recvbuf - starting address of receive buffer (choice) 

.N fortran

.N collops

.N Errors
.N MPI_ERR_BUFFER
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_OP
.N MPI_ERR_COMM
@*/
EXPORT_MPI_API int MPI_Allreduce ( void *sendbuf, void *recvbuf, int count, 
		    MPI_Datatype datatype, MPI_Op op, MPI_Comm comm )
{
    int mpi_errno = MPI_SUCCESS;
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    MPIR_ERROR_DECL;
    static char myname[] = "MPI_ALLREDUCE";
#ifdef RED_DEBUG
	char zahl[10];
	static int callcount=0;
	callcount++;
	DBG("Entering Allreduce()");
	DBG(itoa(callcount,zahl,10));
#endif
    TR_PUSH(myname);

    comm_ptr = MPIR_GET_COMM_PTR(comm);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);
    MPIR_TEST_COUNT(count);
    MPIR_TEST_ALIAS(sendbuf,recvbuf);
    if (mpi_errno)
	{
#ifdef RED_DEBUG
		DBG("Leaving Allreduce Error No");
		DBG(itoa(mpi_errno,zahl,10));
#endif
		return MPIR_ERROR(comm_ptr, mpi_errno, myname );
	}
#endif
    
    MPIR_ERROR_PUSH(comm_ptr);
    /* Test for intercommunicator is done when collops is assigned */  
    mpi_errno = comm_ptr->collops->Allreduce(sendbuf, recvbuf, count, 
					     dtype_ptr, op, comm_ptr );
    MPIR_ERROR_POP(comm_ptr);
    TR_POP;
#ifdef RED_DEBUG
	DBG("Leaving Allreduce");
#endif
    MPIR_RETURN(comm_ptr,mpi_errno,myname);
}
/*@

MPI_Graphdims_get - Retrieves graph topology information associated with a 
                    communicator

Input Parameters:
. comm - communicator for group with graph structure (handle) 

Output Parameter:
+ nnodes - number of nodes in graph (integer) 
- nedges - number of edges in graph (integer) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_COMM
.N MPI_ERR_ARG
@*/
EXPORT_MPI_API int MPI_Graphdims_get ( MPI_Comm comm, int *nnodes, int *nedges )
{
  int mpi_errno = MPI_SUCCESS, flag;
  MPIR_TOPOLOGY *topo;
  struct MPIR_COMMUNICATOR *comm_ptr;
  static char myname[] = "MPI_GRAPHDIMS_GET";
  MPIR_ERROR_DECL;

  TR_PUSH(myname);

  comm_ptr = MPIR_GET_COMM_PTR(comm);

#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
  MPIR_TEST_ARG(nnodes);
  MPIR_TEST_ARG(nedges);
  if (mpi_errno)
      return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

  /* Get topology information from the communicator */
  MPIR_ERROR_PUSH( comm_ptr );
  mpi_errno = 
      MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag );
  MPIR_ERROR_POP( comm_ptr );
  if (mpi_errno) {
      return MPIR_ERROR( comm_ptr, mpi_errno, myname );
  }

  /* Set nnodes */
  if ( nnodes != (int *)0 )
    if ( (flag == 1) && (topo->type == MPI_GRAPH) )
      (*nnodes) = topo->graph.nnodes;
    else
      (*nnodes) = MPI_UNDEFINED;

  /* Set nedges */
  if ( nedges != (int *)0 ) 
    if ( (flag == 1) && (topo->type == MPI_GRAPH) )
      (*nedges) = topo->graph.nedges;
    else
      (*nedges) = MPI_UNDEFINED;

  TR_POP;
  return (MPI_SUCCESS);
}
Exemple #15
0
/*@

MPI_Gatherv - Gathers into specified locations from all processes in a group

Input Parameters:
+ sendbuf - starting address of send buffer (choice) 
. sendcount - number of elements in send buffer (integer) 
. sendtype - data type of send buffer elements (handle) 
. recvcounts - integer array (of length group size) 
containing the number of elements that are received from each process
(significant only at 'root') 
. displs - integer array (of length group size). Entry 
 'i'  specifies the displacement relative to recvbuf  at
which to place the incoming data from process  'i'  (significant only
at root) 
. recvtype - data type of recv buffer elements 
(significant only at 'root') (handle) 
. root - rank of receiving process (integer) 
- comm - communicator (handle) 

Output Parameter:
. recvbuf - address of receive buffer (choice, significant only at 'root') 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_TYPE
.N MPI_ERR_BUFFER
@*/
int MPI_Gatherv ( void *sendbuf, int sendcnt, MPI_Datatype sendtype, 
                  void *recvbuf, int *recvcnts, int *displs, 
		  MPI_Datatype recvtype, 
                  int root, MPI_Comm comm )
{
  int        mpi_errno = MPI_SUCCESS;
  int        rank;
  struct MPIR_COMMUNICATOR *comm_ptr;
  struct MPIR_DATATYPE     *stype_ptr, *rtype_ptr = 0;
  MPIR_ERROR_DECL;
  static char myname[] = "MPI_GATHERV";

  TR_PUSH(myname);

  comm_ptr = MPIR_GET_COMM_PTR(comm);

  stype_ptr = MPIR_GET_DTYPE_PTR(sendtype);

#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname);
  MPIR_TEST_COUNT(sendcnt);
  MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname );
  if (mpi_errno)
      return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

  /* rtype is significant only at root */
  (void) MPIR_Comm_rank ( comm_ptr, &rank );
  if (rank == root) {
      rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype);
#ifndef MPIR_NO_ERROR_CHECKING
      MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname );
      if (mpi_errno)
          return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif
  }

  MPIR_ERROR_PUSH(comm_ptr);
  mpi_errno = comm_ptr->collops->Gatherv( sendbuf, sendcnt,  stype_ptr, 
                  recvbuf, recvcnts, displs, rtype_ptr, 
                  root, comm_ptr );
  MPIR_ERROR_POP(comm_ptr);
  TR_POP;
  MPIR_RETURN(comm_ptr,mpi_errno,myname);
}
Exemple #16
0
/*@
   MPI_Abort - Terminates MPI execution environment

Input Parameters:
+ comm - communicator of tasks to abort 
- errorcode - error code to return to invoking environment 

Notes:
Terminates all MPI processes associated with the communicator 'comm'; in
most systems (all to date), terminates `all` processes.

.N fortran
@*/
int MPI_Abort( MPI_Comm comm, int errorcode )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    static char myname[] = "MPI_ABORT";
    int mpi_errno = MPI_SUCCESS;

    disableSignal();
    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
    
    MPID_Abort( comm_ptr, errorcode, "MPI Abort by user", (char *)0 );

/* If for some reason we get here, force an abort */
    abort( );

    revertSignal();
/* This keeps lint happy */
    return MPI_ERR_UNKNOWN;
}
Exemple #17
0
/*@
    MPI_Irecv - Begins a nonblocking receive

Input Parameters:
+ buf - initial address of receive buffer (choice) 
. count - number of elements in receive buffer (integer) 
. datatype - datatype of each receive buffer element (handle) 
. source - rank of source (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 

Output Parameter:
. request - communication request (handle) 

.N fortran
@*/
EXPORT_MPI_API int MPI_Irecv( void *buf, int count, MPI_Datatype datatype, int source, 
	       int tag, MPI_Comm comm, MPI_Request *request )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE *dtype_ptr;
    MPIR_RHANDLE *rhandle;
    static char myname[] = "MPI_IRECV";
    int mpi_errno = MPI_SUCCESS;

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_COUNT(count);
    MPIR_TEST_RECV_TAG(tag);
    MPIR_TEST_RECV_RANK(comm_ptr,source);
    if (mpi_errno)
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

    MPIR_ALLOCFN(rhandle,MPID_Recv_alloc,comm_ptr,
	       MPI_ERR_EXHAUSTED,myname);
    MPID_Request_init ((MPI_Request)rhandle, MPIR_RECV );
    *request = (MPI_Request) rhandle;

    if (source == MPI_PROC_NULL) {
	rhandle->s.MPI_TAG    = MPI_ANY_TAG;
	rhandle->s.MPI_SOURCE = MPI_PROC_NULL;
	rhandle->s.count      = 0;
	rhandle->is_complete  = 1;
	return MPI_SUCCESS;
    }
    MPID_IrecvDatatype( comm_ptr, buf, count, dtype_ptr, source, tag, 
			comm_ptr->recv_context, *request, &mpi_errno );
    if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname );
    return MPI_SUCCESS;
}
Exemple #18
0
/*@

MPI_Comm_remote_group - Accesses the remote group associated with 
                        the given inter-communicator

Input Parameter:
. comm - Communicator (must be intercommunicator)

Output Parameter:
. group - remote group of communicator

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
@*/
int MPI_Comm_remote_group ( MPI_Comm comm, MPI_Group *group )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_GROUP *group_ptr;
    int flag;
    int mpi_errno = MPI_SUCCESS;
    static char myname[] = "MPI_COMM_REMOTE_GROUP";

    TR_PUSH(myname);
    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname );

    /* Check for intra-communicator */
    MPI_Comm_test_inter ( comm, &flag );
    if (!flag) return MPIR_ERROR(comm_ptr,
	 MPIR_ERRCLASS_TO_CODE(MPI_ERR_COMM,MPIR_ERR_COMM_INTRA),myname);

    MPIR_Group_dup( comm_ptr->group, &group_ptr );
    *group = group_ptr->self;
    TR_POP;
    return (MPI_SUCCESS);
}
Exemple #19
0
/*@

MPI_Bcast - Broadcasts a message from the process with rank "root" to
            all other processes of the group. 

Input/output Parameters:
+ buffer - starting address of buffer (choice) 
. count - number of entries in buffer (integer) 
. datatype - data type of buffer (handle) 
. root - rank of broadcast root (integer) 
- comm - communicator (handle) 

Algorithm:  
If the underlying device does not take responsibility, this function
uses a tree-like algorithm to broadcast the message to blocks of
processes.  A linear algorithm is then used to broadcast the message
from the first process in a block to all other processes.
'MPIR_BCAST_BLOCK_SIZE' determines the size of blocks.  If this is set
to 1, then this function is equivalent to using a pure tree algorithm.
If it is set to the size of the group or greater, it is a pure linear
algorithm.  The value should be adjusted to determine the most
efficient value on different machines.

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_BUFFER
.N MPI_ERR_ROOT
@*/
EXPORT_MPI_API int MPI_Bcast ( void *buffer, int count, MPI_Datatype datatype, int root, 
		MPI_Comm comm )
{
    int mpi_errno = MPI_SUCCESS;
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    static char myname[] = "MPI_BCAST";
    MPIR_ERROR_DECL;

    TR_PUSH(myname)
    comm_ptr = MPIR_GET_COMM_PTR(comm);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);

    /* Check for invalid arguments */
#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);
    /* If an intercomm broadcast, the root can also be MPI_ROOT or 
       MPI_PROC_NULL */
    if (root < MPI_ROOT || root >= comm_ptr->np) {
	mpi_errno = MPIR_Err_setmsg( MPI_ERR_ROOT, MPIR_ERR_DEFAULT, myname, 
				     (char *)0, (char *)0, root );
    }
    MPIR_TEST_COUNT(count);
    if (mpi_errno)
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

    /* See the overview in Collection Operations for why this is ok */
    if (count == 0) return MPI_SUCCESS;
    
    MPIR_ERROR_PUSH(comm_ptr);
    mpi_errno = comm_ptr->collops->Bcast(buffer, count, dtype_ptr, root, 
					 comm_ptr);
    MPIR_ERROR_POP(comm_ptr);
    TR_POP;
    MPIR_RETURN(comm_ptr,mpi_errno,myname);
}
Exemple #20
0
/*@

MPI_Scatter - Sends data from one task to all other tasks in a group

Input Parameters:
+ sendbuf - address of send buffer (choice, significant 
only at 'root') 
. sendcount - number of elements sent to each process 
(integer, significant only at 'root') 
. sendtype - data type of send buffer elements (significant only at 'root') 
(handle) 
. recvcount - number of elements in receive buffer (integer) 
. recvtype - data type of receive buffer elements (handle) 
. root - rank of sending process (integer) 
- comm - communicator (handle) 

Output Parameter:
. recvbuf - address of receive buffer (choice) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_BUFFER
@*/
int MPI_Scatter ( 
	void *sendbuf, 
	int sendcnt, 
	MPI_Datatype sendtype, 
	void *recvbuf, 
	int recvcnt, 
	MPI_Datatype recvtype, 
	int root, 
	MPI_Comm comm )
{
  int        mpi_errno = MPI_SUCCESS;
  struct MPIR_COMMUNICATOR *comm_ptr;
  struct MPIR_DATATYPE     *stype_ptr = NULL;
  struct MPIR_DATATYPE     *rtype_ptr = NULL;
  static char myname[] = "MPI_SCATTER";
  MPIR_ERROR_DECL;  

  TR_PUSH(myname);
  comm_ptr = MPIR_GET_COMM_PTR(comm);
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

  /* Significant only at root */
  if (root == comm_ptr->local_rank) {
      stype_ptr = MPIR_GET_DTYPE_PTR(sendtype);
      MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr,myname);
  }

  rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype);
  MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr,myname);

  MPIR_ERROR_PUSH(comm_ptr);
  mpi_errno = comm_ptr->collops->Scatter(sendbuf, sendcnt, stype_ptr, 
				recvbuf, recvcnt, rtype_ptr, 
				root, comm_ptr );
  MPIR_ERROR_POP(comm_ptr);
  TR_POP;
  MPIR_RETURN(comm_ptr,mpi_errno,myname);
}
Exemple #21
0
/*@

MPI_Graph_map - Maps process to graph topology information

Input Parameters:
+ comm - input communicator (handle) 
. nnodes - number of graph nodes (integer) 
. index - integer array specifying the graph structure, see 'MPI_GRAPH_CREATE' 
- edges - integer array specifying the graph structure 

Output Parameter:
. newrank - reordered rank of the calling process; 'MPI_UNDEFINED' if the 
calling process does not belong to graph (integer) 
 
.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_COMM
.N MPI_ERR_ARG
@*/
EXPORT_MPI_API int MPI_Graph_map ( MPI_Comm comm_old, int nnodes, int *index, int *edges, 
		    int *newrank )
{
  int rank, size;
  int mpi_errno = MPI_SUCCESS;
  struct MPIR_COMMUNICATOR *comm_old_ptr;
  static char myname[] = "MPI_GRAPH_MAP";

  TR_PUSH(myname);
  comm_old_ptr = MPIR_GET_COMM_PTR(comm_old);

#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname);
  if (nnodes < 1) mpi_errno = MPI_ERR_ARG;
  MPIR_TEST_ARG(newrank);
  MPIR_TEST_ARG(index);
  MPIR_TEST_ARG(edges);
  if (mpi_errno)
      return MPIR_ERROR(comm_old_ptr, mpi_errno, myname );
#endif
  
  /* Test that the communicator is large enough */
  MPIR_Comm_size( comm_old_ptr, &size );
  if (size < nnodes) {
      return MPIR_ERROR( comm_old_ptr, MPI_ERR_ARG, myname );
  }

  /* Am I in this topology? */
  MPIR_Comm_rank ( comm_old_ptr, &rank );
  if ( rank < nnodes )
    (*newrank) = rank;
  else
    (*newrank) = MPI_UNDEFINED;

  TR_POP;
  return (mpi_errno);
}
Exemple #22
0
/*@

MPI_Allgather - Gathers data from all tasks and distribute it to all 

Input Parameters:
+ sendbuf - starting address of send buffer (choice) 
. sendcount - number of elements in send buffer (integer) 
. sendtype - data type of send buffer elements (handle) 
. recvcount - number of elements received from any process (integer) 
. recvtype - data type of receive buffer elements (handle) 
- comm - communicator (handle) 

Output Parameter:
. recvbuf - address of receive buffer (choice) 

Notes:
 The MPI standard (1.0 and 1.1) says that 

 The jth block of data sent from 
 each proess is received by every process and placed in the jth block of the 
 buffer 'recvbuf'.  

 This is misleading; a better description is

 The block of data sent from the jth process is received by every
 process and placed in the jth block of the buffer 'recvbuf'.

 This text was suggested by Rajeev Thakur.

.N fortran

.N Errors
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_BUFFER
@*/
int MPI_Allgather ( void *sendbuf, int sendcount, MPI_Datatype sendtype,
                    void *recvbuf, int recvcount, MPI_Datatype recvtype, 
		    MPI_Comm comm )
{
  int mpi_errno = MPI_SUCCESS;
  struct MPIR_COMMUNICATOR *comm_ptr=0;
  struct MPIR_DATATYPE     *stype_ptr=0, *rtype_ptr=0;
  MPIR_ERROR_DECL;
  static char myname[] = "MPI_ALLGATHER";

  TR_PUSH(myname);
  comm_ptr = MPIR_GET_COMM_PTR(comm);

  stype_ptr = MPIR_GET_DTYPE_PTR(sendtype);

  rtype_ptr = MPIR_GET_DTYPE_PTR(recvtype);

  /* Check for invalid arguments */
#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr, myname);
  MPIR_TEST_DTYPE(sendtype,stype_ptr,comm_ptr, myname );
  MPIR_TEST_DTYPE(recvtype,rtype_ptr,comm_ptr, myname );
  MPIR_TEST_COUNT(sendcount);
  MPIR_TEST_COUNT(recvcount);
  if (mpi_errno)
      return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

  MPIR_ERROR_PUSH(comm_ptr);
  mpi_errno = comm_ptr->collops->Allgather( sendbuf, sendcount, stype_ptr,
					    recvbuf, recvcount, rtype_ptr, 
					    comm_ptr );
  MPIR_ERROR_POP(comm_ptr);
  TR_POP;
  MPIR_RETURN(comm_ptr,mpi_errno,myname);
}
Exemple #23
0
/*@

MPI_Intercomm_create - Creates an intercommuncator from two intracommunicators

Input Paramters:
+ local_comm - Local (intra)communicator
. local_leader - Rank in local_comm of leader (often 0)
. peer_comm - Remote communicator
. remote_leader - Rank in peer_comm of remote leader (often 0)
- tag - Message tag to use in constructing intercommunicator; if multiple
  'MPI_Intercomm_creates' are being made, they should use different tags (more
  precisely, ensure that the local and remote leaders are using different
  tags for each 'MPI_intercomm_create').

Output Parameter:
. comm_out - Created intercommunicator

Notes:
  The MPI 1.1 Standard contains two mutually exclusive comments on the
  input intracommunicators.  One says that their repective groups must be
  disjoint; the other that the leaders can be the same process.  After
  some discussion by the MPI Forum, it has been decided that the groups must
  be disjoint.  Note that the `reason` given for this in the standard is
  `not` the reason for this choice; rather, the `other` operations on 
  intercommunicators (like 'MPI_Intercomm_merge') do not make sense if the
  groups are not disjoint.

.N fortran

Algorithm:
+ 1) Allocate a send context, an inter-coll context, and an intra-coll context
. 2) Send "send_context" and lrank_to_grank list from local comm group 
     if I''m the local_leader.
. 3) If I''m the local leader, then wait on the posted sends and receives
     to complete.  Post the receive for the remote group information and
	 wait for it to complete.
. 4) Broadcast information received from the remote leader.  
. 5) Create the inter_communicator from the information we now have.
-    An inter-communicator ends up with three levels of communicators. 
     The inter-communicator returned to the user, a "collective" 
     inter-communicator that can be used for safe communications between
     local & remote groups, and a collective intra-communicator that can 
     be used to allocate new contexts during the merge and dup operations.

	 For the resulting inter-communicator, 'comm_out'

.vb
       comm_out                       = inter-communicator
       comm_out->comm_coll            = "collective" inter-communicator
       comm_out->comm_coll->comm_coll = safe collective intra-communicator
.ve

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_TAG
.N MPI_ERR_EXHAUSTED
.N MPI_ERR_RANK

.seealso: MPI_Intercomm_merge, MPI_Comm_free, MPI_Comm_remote_group, 
          MPI_Comm_remote_size
@*/
EXPORT_MPI_API int MPI_Intercomm_create ( MPI_Comm local_comm, int local_leader, 
			   MPI_Comm peer_comm, int remote_leader, int tag, 
			   MPI_Comm *comm_out )
{
  int              local_size, local_rank, peer_size, peer_rank;
  int              remote_size;
  int              mpi_errno = MPI_SUCCESS;
  MPIR_CONTEXT     context, send_context;
  struct MPIR_GROUP *remote_group_ptr;
  struct MPIR_COMMUNICATOR *new_comm, *local_comm_ptr, *peer_comm_ptr;
  MPI_Request      req[6];
  MPI_Status       status[6];
  MPIR_ERROR_DECL;
  static char myname[]="MPI_INTERCOMM_CREATE";

  TR_PUSH(myname);
  local_comm_ptr = MPIR_GET_COMM_PTR(local_comm);

  
#ifndef MPIR_NO_ERROR_CHECKING
  /* Check for valid arguments to function */
  MPIR_TEST_MPI_COMM(local_comm,local_comm_ptr,local_comm_ptr,myname);
  MPIR_TEST_SEND_TAG(tag);
  if (mpi_errno)
      return MPIR_ERROR(local_comm_ptr, mpi_errno, myname );
#endif

  if (local_comm  == MPI_COMM_NULL) {
      mpi_errno = MPIR_Err_setmsg( MPI_ERR_COMM, MPIR_ERR_LOCAL_COMM, myname, 
		   "Local communicator must not be MPI_COMM_NULL", (char *)0 );
      return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );
  }

  (void) MPIR_Comm_size ( local_comm_ptr, &local_size );
  (void) MPIR_Comm_rank ( local_comm_ptr, &local_rank );

  if ( local_leader == local_rank ) {
      /* Peer_comm need be valid only at local_leader */
      peer_comm_ptr = MPIR_GET_COMM_PTR(peer_comm);
      if ((MPIR_TEST_COMM_NOTOK(peer_comm,peer_comm_ptr) || 
	   (peer_comm == MPI_COMM_NULL))) {
	  mpi_errno = MPIR_Err_setmsg( MPI_ERR_COMM, MPIR_ERR_PEER_COMM,
			       myname, "Peer communicator is not valid", 
				       (char *)0 );
      return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );
      }

    (void) MPIR_Comm_size ( peer_comm_ptr,  &peer_size  );
    (void) MPIR_Comm_rank ( peer_comm_ptr,  &peer_rank  );

    if (((peer_rank     == MPI_UNDEFINED) && (mpi_errno = MPI_ERR_RANK)))
	return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );

    if (((remote_leader >= peer_size)     && (mpi_errno = MPI_ERR_RANK)) || 
        ((remote_leader <  0)             && (mpi_errno = MPI_ERR_RANK))) {
	mpi_errno = MPIR_Err_setmsg( MPI_ERR_RANK, MPIR_ERR_REMOTE_RANK, 
				     myname, 
				     "Error specifying remote_leader", 
"Error specifying remote_leader; value %d not between 0 and %d", remote_leader, peer_size );
       return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );
    }
  }

  if (((local_leader  >= local_size)    && (mpi_errno = MPI_ERR_RANK)) || 
      ((local_leader  <  0)             && (mpi_errno = MPI_ERR_RANK))) {
	mpi_errno = MPIR_Err_setmsg( MPI_ERR_RANK, MPIR_ERR_LOCAL_RANK, 
				     myname, 
				     "Error specifying local_leader", 
"Error specifying local_leader; value %d not in between 0 and %d", local_leader, local_size );
       return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );
    }

  /* Allocate send context, inter-coll context and intra-coll context */
  MPIR_Context_alloc ( local_comm_ptr, 3, &context );

  
  /* If I'm the local leader, then exchange information */
  if (local_rank == local_leader) {
      MPIR_ERROR_PUSH(peer_comm_ptr);

      /* Post the receives for the information from the remote_leader */
      /* We don't post a receive for the remote group yet, because we */
      /* don't know how big it is yet. */
      MPIR_CALL_POP(MPI_Irecv (&remote_size, 1, MPI_INT, remote_leader, tag,
			       peer_comm, &(req[2])),peer_comm_ptr,myname);
      MPIR_CALL_POP(MPI_Irecv (&send_context, 1, MPIR_CONTEXT_TYPE, 
			       remote_leader,tag, peer_comm, &(req[3])),
		    peer_comm_ptr,myname);
    
      /* Send the lrank_to_grank table of the local_comm and an allocated */
      /* context. Currently I use multiple messages to send this info.    */
      /* Eventually, this will change(?) */
      MPIR_CALL_POP(MPI_Isend (&local_size, 1, MPI_INT, remote_leader, tag, 
               peer_comm, &(req[0])),peer_comm_ptr,myname);
      MPIR_CALL_POP(MPI_Isend (&context, 1, MPIR_CONTEXT_TYPE, remote_leader, 
               tag, peer_comm, &(req[1])),peer_comm_ptr,myname);
    
      /* Wait on the communication requests to finish */
      MPIR_CALL_POP(MPI_Waitall ( 4, req, status ),peer_comm_ptr,myname);
    
      /* We now know how big the remote group is, so create it */
      remote_group_ptr = MPIR_CreateGroup ( remote_size );
      remote_group_ptr->self = 
	  (MPI_Group) MPIR_FromPointer( remote_group_ptr );

      /* Post the receive for the group information */
      MPIR_CALL_POP(MPI_Irecv (remote_group_ptr->lrank_to_grank, remote_size, 
			       MPI_INT, remote_leader, tag, peer_comm, 
			       &(req[5])),peer_comm_ptr,myname);
    
      /* Send the local group info to the remote group */
      MPIR_CALL_POP(MPI_Isend (local_comm_ptr->group->lrank_to_grank, local_size, 
			       MPI_INT, remote_leader, tag, peer_comm, 
			       &(req[4])),peer_comm_ptr,myname);
    
      /* wait on the send and the receive for the group information */
      MPIR_CALL_POP(MPI_Waitall ( 2, &(req[4]), &(status[4]) ),peer_comm_ptr,
		    myname);
      MPIR_ERROR_POP(peer_comm_ptr);

      /* Now we can broadcast the group information to the other local comm */
      /* members. */
      MPIR_ERROR_PUSH(local_comm_ptr);
      MPIR_CALL_POP(MPI_Bcast(&remote_size,1,MPI_INT,local_rank,local_comm),
		    local_comm_ptr,myname);
      MPIR_CALL_POP(MPI_Bcast(remote_group_ptr->lrank_to_grank, remote_size, 
			      MPI_INT, local_rank, local_comm),local_comm_ptr,
		    myname);
      MPIR_ERROR_POP(local_comm_ptr);
  }
  /* Else I'm just an ordinary comm member, so receive the bcast'd */
  /* info about the remote group */
  else {
      MPIR_ERROR_PUSH(local_comm_ptr);
      MPIR_CALL_POP(MPI_Bcast(&remote_size, 1, MPI_INT, local_leader,
			      local_comm),local_comm_ptr,myname);
    
      /* We now know how big the remote group is, so create it */
      remote_group_ptr = MPIR_CreateGroup ( remote_size );
      remote_group_ptr->self = 
	  (MPI_Group) MPIR_FromPointer( remote_group_ptr );
	
      /* Receive the group info */
      MPIR_CALL_POP(MPI_Bcast(remote_group_ptr->lrank_to_grank, remote_size, 
			      MPI_INT, local_leader, local_comm),
		    local_comm_ptr,myname );
      MPIR_ERROR_POP(local_comm_ptr);
  }

  MPIR_ERROR_PUSH(local_comm_ptr);
  /* Broadcast the send context */
  MPIR_CALL_POP(MPI_Bcast(&send_context, 1, MPIR_CONTEXT_TYPE, 
			  local_leader, local_comm),local_comm_ptr,myname);
  MPIR_ERROR_POP(local_comm_ptr);

  /* We all now have all the information necessary, start building the */
  /* inter-communicator */
  MPIR_ALLOC(new_comm,NEW(struct MPIR_COMMUNICATOR),local_comm_ptr, 
	     MPI_ERR_EXHAUSTED,myname );
  MPIR_Comm_init( new_comm, local_comm_ptr, MPIR_INTER );
  *comm_out = new_comm->self;
  new_comm->group = remote_group_ptr;
  MPIR_Group_dup( local_comm_ptr->group, &(new_comm->local_group) );
  new_comm->local_rank	   = new_comm->local_group->local_rank;
  new_comm->lrank_to_grank = new_comm->group->lrank_to_grank;
  new_comm->np             = new_comm->group->np;
  new_comm->send_context   = send_context;
  new_comm->recv_context   = context;
  new_comm->comm_name      = 0;
  if ((mpi_errno = MPID_CommInit( local_comm_ptr, new_comm )) )
      return mpi_errno;
  (void) MPIR_Attr_create_tree ( new_comm );

  /* Build the collective inter-communicator */
  MPIR_Comm_make_coll( new_comm, MPIR_INTER );
  MPIR_Comm_make_onesided( new_comm, MPIR_INTER );
  
  /* Build the collective intra-communicator.  Note that we require
     an intra-communicator for the "coll_comm" so that MPI_COMM_DUP
     can use it for some collective operations (do we need this
     for MPI-2 with intercommunicator collective?) 
     
     Note that this really isn't the right thing to do; we need to replace
     *all* of the Mississippi state collective code.
   */
  MPIR_Comm_make_coll( new_comm->comm_coll, MPIR_INTRA );
#if 0
  MPIR_Comm_make_coll( new_comm->comm_onesided, MPIR_INTRA );
#endif
  
  /* Remember it for the debugger */
  MPIR_Comm_remember ( new_comm );

  TR_POP;
  return (mpi_errno);
}
Exemple #24
0
/*@

MPI_Cart_create - Makes a new communicator to which topology information
                  has been attached

Input Parameters:
+ comm_old - input communicator (handle) 
. ndims - number of dimensions of cartesian grid (integer) 
. dims - integer array of size ndims specifying the number of processes in 
  each dimension 
. periods - logical array of size ndims specifying whether the grid is 
  periodic (true) or not (false) in each dimension 
- reorder - ranking may be reordered (true) or not (false) (logical) 

Output Parameter:
. comm_cart - communicator with new cartesian topology (handle) 

Algorithm:
We ignore 'reorder' info currently.

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_DIMS
.N MPI_ERR_ARG
@*/
int MPI_Cart_create ( MPI_Comm comm_old, int ndims, int *dims, int *periods, 
		      int reorder, MPI_Comm *comm_cart )
{
  int range[1][3];
  MPI_Group group_old, group;
  int i, rank, num_ranks = 1;
  int mpi_errno = MPI_SUCCESS;
  int flag, size;
  MPIR_TOPOLOGY *topo;
  struct MPIR_COMMUNICATOR *comm_old_ptr;
  static char myname[] = "MPI_CART_CREATE";

  TR_PUSH(myname);
  comm_old_ptr = MPIR_GET_COMM_PTR(comm_old);

  /* Check validity of arguments */
#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname);
  MPIR_TEST_ARG(comm_cart);
  MPIR_TEST_ARG(periods);
  if (ndims < 1 || dims == (int *)0) mpi_errno = MPI_ERR_DIMS;
  if (mpi_errno)
	return MPIR_ERROR(comm_old_ptr, mpi_errno, myname );
  
  /* Check for Intra-communicator */
  MPI_Comm_test_inter ( comm_old, &flag );
  if (flag)
    return MPIR_ERROR(comm_old_ptr, 
            MPIR_ERRCLASS_TO_CODE(MPI_ERR_COMM,MPIR_ERR_COMM_INTER), myname );
#endif

  /* Determine number of ranks in topology */
  for ( i=0; i<ndims; i++ )
    num_ranks    *= (dims[i]>0)?dims[i]:-dims[i];
  if ( num_ranks < 1 ) {
    (*comm_cart)  = MPI_COMM_NULL;
    return MPIR_ERROR( comm_old_ptr, MPI_ERR_TOPOLOGY, myname );
  }

  /* Is the old communicator big enough? */
  MPIR_Comm_size (comm_old_ptr, &size);
  if (num_ranks > size) {
      mpi_errno = MPIR_Err_setmsg( MPI_ERR_TOPOLOGY, MPIR_ERR_TOPO_TOO_LARGE, 
				   myname, 
                  "Topology size is larger than size of communicator",
		  "Topology size %d is greater than communicator size %d", 
				   num_ranks, size );
	return MPIR_ERROR(comm_old_ptr, mpi_errno, myname );
  }
	
  /* Make new comm */
  range[0][0] = 0; range[0][1] = num_ranks - 1; range[0][2] = 1;
  MPI_Comm_group ( comm_old, &group_old );
  MPI_Group_range_incl ( group_old, 1, range, &group );
  MPI_Comm_create  ( comm_old, group, comm_cart );
  MPI_Group_free( &group );
  MPI_Group_free( &group_old );

  /* Store topology information in new communicator */
  if ( (*comm_cart) != MPI_COMM_NULL ) {
      MPIR_ALLOC(topo,(MPIR_TOPOLOGY *) MPIR_SBalloc ( MPIR_topo_els ),
		 comm_old_ptr,MPI_ERR_EXHAUSTED,myname);
      MPIR_SET_COOKIE(&topo->cart,MPIR_CART_TOPOL_COOKIE)
	  topo->cart.type         = MPI_CART;
      topo->cart.nnodes       = num_ranks;
      topo->cart.ndims        = ndims;
      MPIR_ALLOC(topo->cart.dims,(int *)MALLOC( sizeof(int) * 3 * ndims ),
		 comm_old_ptr,MPI_ERR_EXHAUSTED,myname);
      topo->cart.periods      = topo->cart.dims + ndims;
      topo->cart.position     = topo->cart.periods + ndims;
      for ( i=0; i<ndims; i++ ) {
	  topo->cart.dims[i]    = dims[i];
	  topo->cart.periods[i] = periods[i];
      }

    /* Compute my position */
    MPI_Comm_rank ( (*comm_cart), &rank );
    for ( i=0; i < ndims; i++ ) {
      num_ranks = num_ranks / dims[i];
      topo->cart.position[i]  = rank / num_ranks;
      rank   = rank % num_ranks;
    }

    /* cache topology information */
    MPI_Attr_put ( (*comm_cart), MPIR_TOPOLOGY_KEYVAL, (void *)topo );
  }
  TR_POP;
  return (mpi_errno);
}
Exemple #25
0
/*@

MPI_Attr_put - Stores attribute value associated with a key

Input Parameters:
+ comm - communicator to which attribute will be attached (handle) 
. keyval - key value, as returned by  'MPI_KEYVAL_CREATE' (integer) 
- attribute_val - attribute value 

Notes:
Values of the permanent attributes 'MPI_TAG_UB', 'MPI_HOST', 'MPI_IO', and
'MPI_WTIME_IS_GLOBAL' may not be changed. 

The type of the attribute value depends on whether C or Fortran is being used.
In C, an attribute value is a pointer ('void *'); in Fortran, it is a single 
integer (`not` a pointer, since Fortran has no pointers and there are systems 
for which a pointer does not fit in an integer (e.g., any > 32 bit address 
system that uses 64 bits for Fortran 'DOUBLE PRECISION').

If an attribute is already present, the delete function (specified when the
corresponding keyval was created) will be called.

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_KEYVAL
.N MPI_ERR_PERM_KEY

.seealso MPI_Attr_get, MPI_Keyval_create, MPI_Attr_delete
@*/
EXPORT_MPI_API int MPI_Attr_put ( MPI_Comm comm, int keyval, void *attr_value )
{
  MPIR_HBT_node *attr;
  MPIR_Attr_key *attr_key;
  int mpi_errno = MPI_SUCCESS;
  struct MPIR_COMMUNICATOR *comm_ptr;
  static char myname[] = "MPI_ATTR_PUT";

  TR_PUSH(myname);

  comm_ptr = MPIR_GET_COMM_PTR(comm);
  MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

  attr_key = MPIR_GET_KEYVAL_PTR( keyval );
  MPIR_TEST_MPI_KEYVAL(keyval,attr_key,comm_ptr,myname);

  /* Check for valid arguments */
  if ( ( (keyval == MPI_KEYVAL_INVALID) && (mpi_errno = MPI_ERR_OTHER) ) )
	return MPIR_ERROR( comm_ptr, mpi_errno, myname);

  if (comm == MPI_COMM_WORLD && attr_key->permanent) 
	return MPIR_ERROR( comm_ptr, 
	     MPIR_ERRCLASS_TO_CODE(MPI_ERR_ARG,MPIR_ERR_PERM_KEY),myname );

  MPIR_HBT_lookup(comm_ptr->attr_cache, keyval, &attr);
  if (attr == (MPIR_HBT_node *)0) {
	(void) MPIR_HBT_new_node ( attr_key, attr_value, &attr );
	(void) MPIR_HBT_insert ( comm_ptr->attr_cache, attr );
	/* Every update to the attr_key must be counted! */
	MPIR_REF_INCR(attr_key);
  }
  else {
      /* 
	 This is an unclear part of the standard.  Under MPI_KEYVAL_CREATE,
	 it is claimed that ONLY MPI_COMM_FREE and MPI_ATTR_DELETE
	 can cause the delete routine to be called.  Under 
	 MPI_ATTR_PUT, however, the delete routine IS called.
       */
	if ( attr_key->delete_fn.c_delete_fn ) {
	    if (attr_key->FortranCalling) {
		MPI_Aint  invall = (MPI_Aint)attr->value;
		int inval = (int)invall;
		(void) (*attr_key->delete_fn.f77_delete_fn)(comm, 
					   &keyval, &inval,
					   attr_key->extra_state, &mpi_errno );
		attr->value = (void *)(MPI_Aint)inval;
	    }
	    else
		mpi_errno = (*attr_key->delete_fn.c_delete_fn)(comm, keyval, 
					   attr->value,
					   attr_key->extra_state );
	    if (mpi_errno) 
		return MPIR_ERROR( comm_ptr, mpi_errno, myname);
	    }
	attr->value = attr_value;
  }
  /* The device may want to know about attributes */
  MPID_ATTR_SET(comm_ptr,keyval,attr_value);

  TR_POP;
  return (mpi_errno);
}
Exemple #26
-1
/*@
    MPI_Bsend - Basic send with user-specified buffering

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements in send buffer (nonnegative integer) 
. datatype - datatype of each send buffer element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 

Notes:
This send is provided as a convenience function; it allows the user to 
send messages without worring about where they are buffered (because the
user `must` have provided buffer space with 'MPI_Buffer_attach').  

In deciding how much buffer space to allocate, remember that the buffer space 
is not available for reuse by subsequent 'MPI_Bsend's unless you are certain 
that the message
has been received (not just that it should have been received).  For example,
this code does not allocate enough buffer space
.vb
    MPI_Buffer_attach( b, n*sizeof(double) + MPI_BSEND_OVERHEAD );
    for (i=0; i<m; i++) {
        MPI_Bsend( buf, n, MPI_DOUBLE, ... );
    }
.ve
because only enough buffer space is provided for a single send, and the
loop may start a second 'MPI_Bsend' before the first is done making use of the
buffer.  

In C, you can 
force the messages to be delivered by 
.vb
    MPI_Buffer_detach( &b, &n );
    MPI_Buffer_attach( b, n );
.ve
(The 'MPI_Buffer_detach' will not complete until all buffered messages are 
delivered.)

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_RANK
.N MPI_ERR_TAG

.seealso: MPI_Buffer_attach, MPI_Ibsend, MPI_Bsend_init
@*/
int MPI_Bsend( 
	void *buf, 
	int count, 
	MPI_Datatype datatype, 
	int dest, 
	int tag, 
	MPI_Comm comm )
{
    MPI_Request handle;
    MPI_Status  status;
    int         mpi_errno = MPI_SUCCESS;
    struct MPIR_COMMUNICATOR *comm_ptr;
    MPIR_ERROR_DECL;
    static char myname[] = "MPI_BSEND";

    disableSignal();

    TR_PUSH(myname);

    if (dest != MPI_PROC_NULL)
    {
        /* We should let Ibsend find the errors, but
	   we will soon add a special case for faster Bsend and we'll
	   need these tests then 
	 */

	comm_ptr = MPIR_GET_COMM_PTR(comm);
#ifndef MPIR_NO_ERROR_CHECKING
	MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

	MPIR_TEST_COUNT(count);
	MPIR_TEST_SEND_TAG(tag);
	MPIR_TEST_SEND_RANK(comm_ptr,dest);
	if (mpi_errno) {
            revertSignal();
	    return MPIR_ERROR(comm_ptr, mpi_errno, myname );
        }
#endif
	/* 
	   ? BsendDatatype?
	   MPID_BsendContig( comm, buf, len, src_lrank, tag, context_id,
	   dest_grank, msgrep, &mpi_errno );
	   if (!mpi_errno) return MPI_SUCCESS;
	   if (mpi_errno != MPIR_ERR_MAY_BLOCK) 
	   return MPIR_ERROR( comm, mpi_errno, myname );
	 */
	MPIR_ERROR_PUSH(comm_ptr);
	/* We don't use MPIR_CALL_POP so that we can free the handle */
	handle = MPI_REQUEST_NULL;
	if ((mpi_errno = MPI_Ibsend( buf, count, datatype, dest, tag, comm, 
				  &handle ))) {
	    MPIR_ERROR_POP(comm_ptr);
	    if (handle != MPI_REQUEST_NULL) 
		MPID_SendFree( handle );
            revertSignal();
	    return MPIR_ERROR(comm_ptr,mpi_errno,myname);
	}

	/* This Wait only completes the transfer of data into the 
	   buffer area.  The test/wait in util/bsendutil.c completes
	   the actual transfer 
	 */
	MPIR_CALL_POP(MPI_Wait( &handle, &status ),comm_ptr,myname);
	MPIR_ERROR_POP(comm_ptr);
    }
    TR_POP;
    revertSignal();
    return mpi_errno;
}