Exemplo n.º 1
0
/*@
    MPI_Issend - Starts a nonblocking synchronous send

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements in send buffer (integer) 
. datatype - datatype of each send buffer element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 

Output Parameter:
. request - communication request (handle) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_TAG
.N MPI_ERR_RANK
.N MPI_ERR_EXHAUSTED
@*/
int MPI_Issend( void *buf, int count, MPI_Datatype datatype, int dest, 
		int tag, MPI_Comm comm, MPI_Request *request )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    MPIR_SHANDLE             *shandle;
    static char myname[] = "MPI_ISSEND";
    int mpi_errno = MPI_SUCCESS;

    disableSignal();

    TR_PUSH(myname);

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_COUNT(count);
    MPIR_TEST_SEND_TAG(tag);
    MPIR_TEST_SEND_RANK(comm_ptr,dest);
    if (mpi_errno) {
        revertSignal();
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
    }
#endif

    MPIR_ALLOCFN(shandle,MPID_SendAlloc,
	       comm_ptr,MPI_ERR_EXHAUSTED,myname );
    *request = (MPI_Request)shandle;
    MPID_Request_init( shandle, MPIR_SEND );

    MPIR_REMEMBER_SEND( shandle, buf, count, datatype, dest, tag, comm_ptr);

    if (dest == MPI_PROC_NULL) {
	shandle->is_complete = 1;
        revertSignal();
	return MPI_SUCCESS;
    }
    /* This COULD test for the contiguous homogeneous case first .... */
    MPID_IssendDatatype( comm_ptr, buf, count, dtype_ptr, 
			 comm_ptr->local_rank, tag, 
			 comm_ptr->send_context, 
			 comm_ptr->lrank_to_grank[dest], 
			 *request, &mpi_errno );
    if (mpi_errno) {
        revertSignal();
        return MPIR_ERROR( comm_ptr, mpi_errno, myname );
    }
    TR_POP;
    revertSignal();
    return MPI_SUCCESS;
}
Exemplo n.º 2
0
/*@
    MPI_Irsend - Starts a nonblocking ready send

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements in send buffer (integer) 
. datatype - datatype of each send buffer element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 
Output Parameter:
. request - communication request (handle) 

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_TAG
.N MPI_ERR_RANK
.N MPI_ERR_EXHAUSTED

@*/
EXPORT_MPI_API int MPI_Irsend( void *buf, int count, MPI_Datatype datatype, int dest, 
		int tag, MPI_Comm comm, MPI_Request *request )
{
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    MPIR_SHANDLE             *shandle;
    static char myname[] = "MPI_IRSEND";
    int mpi_errno = MPI_SUCCESS;

    TR_PUSH(myname);

    comm_ptr = MPIR_GET_COMM_PTR(comm);
    MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

    dtype_ptr = MPIR_GET_DTYPE_PTR(datatype);
    MPIR_TEST_DTYPE(datatype,dtype_ptr,comm_ptr,myname);

#ifndef MPIR_NO_ERROR_CHECKING
    MPIR_TEST_COUNT(count);
    MPIR_TEST_SEND_TAG(tag);
    MPIR_TEST_SEND_RANK(comm_ptr,dest);
    if (mpi_errno)
	return MPIR_ERROR(comm_ptr, mpi_errno, myname );
#endif

    MPIR_ALLOCFN(shandle,MPID_Send_alloc,
	       comm_ptr,MPI_ERR_EXHAUSTED,myname );
    *request = (MPI_Request)shandle;
    MPID_Request_init( (MPI_Request)shandle, MPIR_SEND );

    /* we need the rank of dest in MPI_COMM_ALL in MPID_Gateway_SendCancelPacket(),
       so we save it here */
    shandle->partner_grank = comm_ptr->lrank_to_grank[dest];

    MPIR_REMEMBER_SEND(shandle, buf, count, datatype, dest, tag, comm_ptr);

    if (dest == MPI_PROC_NULL) {
	shandle->is_complete = 1;
	return MPI_SUCCESS;
    }
    /* This COULD test for the contiguous homogeneous case first .... */
    MPID_IrsendDatatype( comm_ptr, buf, count, dtype_ptr, 
			 comm_ptr->local_rank, tag, 
			 comm_ptr->send_context, 
			 comm_ptr->lrank_to_grank[dest], 
			 *request, &mpi_errno, 1 );
    if (mpi_errno) return MPIR_ERROR( comm_ptr, mpi_errno, myname );
    TR_POP;
    return MPI_SUCCESS;
}
Exemplo n.º 3
0
int IMPI_Tunnel_import(int src_comm_lrank, int dest_grank, int tag, size_t length, void **buffer, int get_buffer_flag)
{
  int i;

  static Meta_Header **meta_msg_i;
  static size_t *meta_msg_i_size;
  static int *meta_header_sent;
  
  static MPI_Status  *snd_status;
  static MPI_Request *snd_request;
    
  static IntQueue *availQ;
  static IntQueue *pendingQ;

  static IntQueue _availQ;
  static IntQueue _pendingQ;

  static size_t size;
  static int req_id;

  static char   **router_msg;
  static size_t *bufsize;
  static int    flag;

  static int procs_on_metahost = 3;

  static int firstcall=1;

  DBG("This is IMPI_Import_msgs");

  if(firstcall)
  {
    
    /* !!! This must be determined !!!*/
    procs_on_metahost = 3;

    meta_msg_i = (Meta_Header **) malloc( procs_on_metahost * sizeof( Meta_Header * ) );
    meta_msg_i_size = (int *) malloc( procs_on_metahost * sizeof( int ) );
    meta_header_sent = (int *) malloc( procs_on_metahost * sizeof( int ) );

    DBG("Import_msgs: meta_msg buffers allocated");
    
    for( i = 0; i < procs_on_metahost; i++ )
    {
      if( !(MPIR_meta_cfg.isRouter[i]) )
      {
	meta_msg_i[i] = (Meta_Header *) malloc( INIT_ROUTER_BUFFER_SIZE * sizeof(char) );
	meta_msg_i_size[i] = INIT_ROUTER_BUFFER_SIZE * sizeof(char);
	meta_header_sent[i] = 0;
      }
    }

    snd_status  = (MPI_Status *) malloc (MPIR_RouterConfig.isend_num * sizeof (MPI_Status));
    snd_request = (MPI_Request *) malloc (MPIR_RouterConfig.isend_num * sizeof (MPI_Request));
      
    Qinit (&_availQ, MPIR_RouterConfig.isend_num, 1);
    Qinit (&_pendingQ, MPIR_RouterConfig.isend_num, 0);

    availQ = &_availQ;
    pendingQ = &_pendingQ;

    if( !(router_msg = (char **)malloc( MPIR_RouterConfig.isend_num * sizeof(char *))) ) {
      PRERROR( "Could not allocate enough local memory" );
      ROUTER_ABORT;
    }	
    bufsize = (int *)malloc( MPIR_RouterConfig.isend_num * sizeof(int) );
    for (i = 0; i < MPIR_RouterConfig.isend_num; i++) {
      if( !( router_msg[i]  = (char *)malloc( INIT_ROUTER_BUFFER_SIZE * sizeof(char))) ) {
	PRERROR( "Could not allocate enough local memory" );
	ROUTER_ABORT;
      }
      bufsize[i]   = INIT_ROUTER_BUFFER_SIZE;
    }
    
    DBG("Import_msgs: router_msg buffer allocated");
    
    firstcall = 0;

    DBG("Import_msg --> first call");
  }

  /* size of the meta packet to be tunneld: */
  size = length + sizeof(Meta_Header);

  if(get_buffer_flag)
  {
    DBG("This is a 'get_buffer' call to IMPI_Send_tunnel");

    /* before we get req_id for this transaction (id of buffer to be used), we must make shure that
       not all buffers are full; if this is the case, we block until at least one buffer is available */
    while (Qfull (pendingQ))
    {
      for (i = Qfirst(pendingQ); i >= 0; i = Qnext(pendingQ))
      {	
	MPI_Test(&snd_request[i], &flag, &snd_status[i]);
	
	if (flag)
	{
	  /* message has been sent */
	  Qput (availQ, i);
	  Qremove (pendingQ, i);
	}
      }
    }

    /* get id for this transaction */
    req_id = Qget (availQ);
    Qput (pendingQ, req_id);
  
    router_msg[req_id] = IMPI_adjustbuffer (router_msg[req_id], bufsize[req_id], size);
    if( bufsize[req_id] < size )  bufsize[req_id] = size;
  
    *buffer = (Meta_Header *)router_msg[req_id]+1;

    DBG("Leaving Send_tunnel");
    return 0;
  }
  else
  {     
    DBG("This is a 'send_call' to IMPI_Send_tunnel");

    /*
     |   This is a Send_Call!
     */
    int dest = dest_grank;
    struct MPIR_COMMUNICATOR *comm_ptr;
    struct MPIR_DATATYPE     *dtype_ptr;
    MPIR_SHANDLE             *shandle;
    static char myname[] = "MPI_ISSEND";
    int mpi_errno = MPI_SUCCESS;
    int my_all_rank, my_all_size;

    /* Create MetaHeader: */
    memset((Meta_Header *)router_msg[req_id], 0, sizeof(Meta_Header));
    ((Meta_Header *)router_msg[req_id])->msg.MPI.dest_grank     = dest;
    ((Meta_Header *)router_msg[req_id])->msg.MPI.src_comm_lrank = src_comm_lrank;
    ((Meta_Header *)router_msg[req_id])->msg.MPI.tag            = tag;
    ((Meta_Header *)router_msg[req_id])->msg.MPI.count          = length;
    ((Meta_Header *)router_msg[req_id])->msg.MPI.msgrep         = 1;
    
#if 0
    /* even more to fake ??? */
    typedef struct _GW_MPI_msg {
      int src_comm_lrank;
      int dest_grank;
      int tag;         
      int context_id;
      MPI_Sendmode mode;
      unsigned int count;            /* byte-size of the original msg (appended to this struct) */
      int msgrep;      
      unsigned int msgid;                 /* id for cancelling */
    } GW_MPI_msg;
    
    typedef struct _Meta_Header {
      MPIR_GW_mode mode;
      union {
	GW_MPI_msg     MPI;
	GW_Router_msg  Rout;
      } msg;
      unsigned char dummychar;
    } Meta_Header;

#endif
    
    DBG4("Gateway-msg for [a%d] from [m%d], tag %d, MPI size %d",
	 ((Meta_Header *)router_msg[req_id])->msg.MPI.dest_grank,
	 ((Meta_Header *)router_msg[req_id])->msg.MPI.src_comm_lrank,
	 ((Meta_Header *)router_msg[req_id])->msg.MPI.tag,
	 ((Meta_Header *)router_msg[req_id])->msg.MPI.count);
    
    TR_PUSH(myname);
      
    MPI_Comm_rank(MPI_COMM_ALL, &my_all_rank);
    MPI_Comm_size(MPI_COMM_ALL, &my_all_size);
      
    comm_ptr = MPIR_GET_COMM_PTR(MPI_COMM_ALL);	  
    dtype_ptr = MPIR_GET_DTYPE_PTR(MPI_BYTE);

    MPIR_ALLOCFN(shandle, MPID_Send_alloc, comm_ptr, MPI_ERR_EXHAUSTED, myname);
      
    snd_request[req_id] = (MPI_Request)shandle;
    MPID_Request_init( (MPI_Request)shandle, MPIR_SEND );
	  
    /* we need the rank of dest in MPI_COMM_ALL in MPID_Gateway_SendCancelPacket(),
       so we save it here */
    shandle->partner_grank = comm_ptr->lrank_to_grank[dest];
      
    MPIR_REMEMBER_SEND( shandle, router_msg[req_id], size, MPI_BYTE, dest, MPIR_MPIMSG_TAG, comm_ptr);
      
    if (dest == MPI_PROC_NULL)
    {
      shandle->is_complete = 1;
    }
    else
    {
      DBG("Going to tunnel the msg..");
      MPID_IsendDatatype( comm_ptr, router_msg[req_id], size, dtype_ptr, 
			  comm_ptr->local_rank, MPIR_MPIMSG_TAG, 
			  comm_ptr->send_context, 
			  comm_ptr->lrank_to_grank[dest], 
			  snd_request[req_id], &mpi_errno, 0 );
      DBG("Msg tunneld!");
    }
  }

  /* wait for completion of pending sends */
  DBG("Waiting for pending sends...");
  if (!Qempty(pendingQ))
  {
    for (i = Qfirst(pendingQ); i >= 0; i = Qnext(pendingQ))
    {
      MPI_Test(&snd_request[i], &flag, &snd_status[i]);
      if (flag)
      {
	/* message has been sent */
	Qput (availQ, i);
	Qremove (pendingQ, i);
      }
    }
  }

  DBG("Leaving Send_tunnel");
  return 0;
}