Example #1
0
void *sender_ibsend(void *ptr)
{
    char buffer[MSGSIZE];
    MPI_Request req;
    MPI_Ibsend(buffer, MSGSIZE, MPI_CHAR, (rank + 1) % size, 0, MPI_COMM_WORLD, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    return NULL;
}
Example #2
0
/**
 * vsg_packed_msg_ibsend:
 * @pm: a #VsgPackedMsg.
 * @dst: the destination task id.
 * @tag: an integer message tag.
 * @request: the corresponding request object
 *
 * Sends stored message to the specified destination with the specified tag in
 * a non blocking mode. @request is provided for output.
 */
void vsg_packed_msg_ibsend (VsgPackedMsg *pm, gint dst, gint tag,
                            MPI_Request *request)
{
  gint ierr;

  _trace_write_msg_send (pm, "ibsend", dst, tag);

  ierr = MPI_Ibsend (pm->buffer, pm->position, MPI_PACKED, dst, tag,
                     pm->communicator, request);

  if (ierr != MPI_SUCCESS) vsg_mpi_error_output (ierr);
}
Example #3
0
int main( int argc, char *argv[] )
{
    MPI_Status status;
    MPI_Request request;
    int a[10], b[10];
    int buf[BUFSIZE], *bptr, bl, i, j, rank, size, errs=0;

    MTest_Init( 0, 0 );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Buffer_attach( buf, BUFSIZE );

    for (j=0; j<10; j++) {
	for (i=0; i<10; i++) {
	    a[i] = (rank + 10 * j) * size + i;
	}
	MPI_Ibsend( a, 10, MPI_INT, 0, 27+j, MPI_COMM_WORLD, &request );
	MPI_Wait( &request, &status );
    }
    if (rank == 0) {

	for (i=0; i<size; i++) {
	    for (j=0; j<10; j++) {
		int k;
		status.MPI_TAG = -10;
		status.MPI_SOURCE = -20;
		MPI_Recv( b, 10, MPI_INT, i, 27+j, MPI_COMM_WORLD, &status );
    
		if (status.MPI_TAG != 27+j) {
		    errs++;
		    printf( "Wrong tag = %d\n", status.MPI_TAG );
		}
		if (status.MPI_SOURCE != i) {
		    errs++;
		    printf( "Wrong source = %d\n", status.MPI_SOURCE );
		}
		for (k=0; k<10; k++) {
		    if (b[k] != (i + 10 * j) * size + k) {
			errs ++;
			printf( "received b[%d] = %d from %d tag %d\n",
				k, b[k], i, 27+j );
		    }
		}
	    }
	}
    }
    MPI_Buffer_detach( &bptr, &bl );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #4
0
void mpi_ibsend_f(char *buf, MPI_Fint *count, MPI_Fint *datatype,
		  MPI_Fint *dest, MPI_Fint *tag, MPI_Fint *comm, 
		  MPI_Fint *request, MPI_Fint *ierr)
{
   MPI_Datatype c_type = MPI_Type_f2c(*datatype);
   MPI_Request c_req;
   MPI_Comm c_comm;

   c_comm = MPI_Comm_f2c (*comm);

   *ierr = OMPI_INT_2_FINT(MPI_Ibsend(OMPI_F2C_BOTTOM(buf), OMPI_FINT_2_INT(*count),
                                      c_type, OMPI_FINT_2_INT(*dest),
                                      OMPI_FINT_2_INT(*tag),
                                      c_comm, &c_req));

   if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
      *request = MPI_Request_c2f(c_req);
   }
}
Example #5
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, /* source, */ dest;
    MPI_Comm      comm;
    MPI_Status    status;
    MPI_Request   req;
    static int bufsizes[4] = { 1, 100, 10000, 1000000 };
    char *buf;
#ifdef TEST_IRSEND
    int veryPicky = 0;   /* Set to 1 to test "quality of implementation" in
			    a tricky part of cancel */
#endif
    int  cs, flag, n;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;
    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

    /* source = 0; */
    dest   = size - 1;

    MTestPrintfMsg( 1, "Starting scancel test\n" );
    for (cs=0; cs<4; cs++) {
	if (rank == 0) {
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MTestPrintfMsg( 1, "(%d) About to create isend and cancel\n",cs );
	    MPI_Isend( buf, n, MPI_CHAR, dest, cs+n+1, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MTestPrintfMsg( 1, "Completed wait on isend\n" );
	    MPI_Test_cancelled( &status, &flag );
	    if (!flag) {
		errs ++;
		printf( "Failed to cancel an Isend request\n" );
		fflush(stdout);
	    }
	    else
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+1;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	}
	else if (rank == dest)
	{
	    int nn, tag;
	    char *btemp;
	    MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (nn > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( nn );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", nn );
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );

	if (rank == 0) {
	    char *bsendbuf;
	    int bsendbufsize;
	    int bf, bs;
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    bsendbufsize = n + MPI_BSEND_OVERHEAD;
	    bsendbuf = (char *)malloc( bsendbufsize );
	    if (!bsendbuf) {
		fprintf( stderr, "Unable to allocate %d bytes for bsend\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MPI_Buffer_attach( bsendbuf, bsendbufsize );
	    MTestPrintfMsg( 1, "About to create and cancel ibsend\n" );
	    MPI_Ibsend( buf, n, MPI_CHAR, dest, cs+n+2, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MPI_Test_cancelled( &status, &flag );
	    if (!flag) {
		errs ++;
		printf( "Failed to cancel an Ibsend request\n" );
		fflush(stdout);
	    }
	    else
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+2;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	    MPI_Buffer_detach( &bf, &bs );
	    free( bsendbuf );
	}
	else if (rank == dest)
	{
	    int nn, tag;
	    char *btemp;
	    MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (nn > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( nn );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", nn);
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );

	/* Because this test is erroneous, we do not perform it unless
	   TEST_IRSEND is defined.  */
#ifdef TEST_IRSEND
	/* We avoid ready send to self because an implementation
	   is free to detect the error in delivering a message to
	   itself without a pending receive; we could also check
	   for an error return from the MPI_Irsend */
	if (rank == 0 && dest != rank) {
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MTestPrintfMsg( 1, "About to create and cancel irsend\n" );
	    MPI_Irsend( buf, n, MPI_CHAR, dest, cs+n+3, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MPI_Test_cancelled( &status, &flag );
	    /* This can be pretty ugly.  The standard is clear (Section 3.8)
	       that either a sent message is received or the 
	       sent message is successfully cancelled.  Since this message
	       can never be received, the cancel must complete
	       successfully.  

	       However, since there is no matching receive, this
	       program is erroneous.  In this case, we can't really
	       flag this as an error */
	    if (!flag && veryPicky) {
		errs ++;
		printf( "Failed to cancel an Irsend request\n" );
		fflush(stdout);
	    }
	    if (flag)
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+3;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	}
	else if (rank == dest)
	{
	    int n, tag;
	    char *btemp;
	    MPI_Recv( &n, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (n > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( n );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", n);
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, n, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );
#endif

	if (rank == 0) {
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MTestPrintfMsg( 1, "About to create and cancel issend\n" );
	    MPI_Issend( buf, n, MPI_CHAR, dest, cs+n+4, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MPI_Test_cancelled( &status, &flag );
	    if (!flag) {
		errs ++;
		printf( "Failed to cancel an Issend request\n" );
		fflush(stdout);
	    }
	    else
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+4;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	}
	else if (rank == dest)
	{
	    int nn, tag;
	    char *btemp;
	    MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (nn > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( nn );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", nn);
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #6
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  MPI_Comm comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES];
  int buf[BUF_SIZE * 2 * NUM_SEND_TYPES];
  int i, j, k, at_size, send_t_number, index, outcount, total, flag;
  int num_errors, error_count, indices[2 * NUM_SEND_TYPES];
  MPI_Request aReq[2 * NUM_SEND_TYPES];
  MPI_Status aStatus[2 * NUM_SEND_TYPES];

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Buffer_attach (bbuf, sizeof(int) * 
		     (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);

  if (rank == 0) {
    /* set up persistent sends... */
    send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES;

    MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);

    send_t_number++;

    MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);


    send_t_number++;

    MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);

    send_t_number++;

    MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);
  }

  for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) {
    if (rank == 0) {
      /* initialize all of the send buffers */
      for (j = 0; j < NUM_SEND_TYPES; j++) {
	for (i = 0; i < BUF_SIZE; i++) {
	  buf[2 * j * BUF_SIZE + i] = i;
	  buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i;
	}
      }
    }
    else if (rank == 1) {
      /* zero out all of the receive buffers */
      bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0) {
      /* set up transient sends... */
      send_t_number = 0;
    
      MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		 comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;
      
      MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;

      /* Barrier to ensure receives are posted for rsends... */
      MPI_Barrier(MPI_COMM_WORLD);

      MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;

      MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      /* just to be paranoid */
      send_t_number++;
      assert (send_t_number == NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);

      /* start the persistent sends... */
      if (k % 2) {
	MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2, &aReq[2 * send_t_number]);
      }
      else {
	for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) {
	  MPI_Start (&aReq[2 * send_t_number + j]);
	}
      }
    
      /* NOTE: Changing the send buffer of a Bsend is NOT an error... */
      for (j = 0; j < NUM_SEND_TYPES; j++) {
	/* muck the buffers */
	buf[j * 2 * BUF_SIZE + (BUF_SIZE >> 1)] = BUF_SIZE;
      }

      printf ("USER MSG: 6 change send buffer errors in iteration #%d:\n", k);

      /* complete the sends */
      switch (k/2) {
      case 0:
	/* use MPI_Wait */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  MPI_Wait (&aReq[j], &aStatus[j]);
	}
	break;
	
      case 1:
	/* use MPI_Waitall */
	MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
	break;

      case 2:
	/* use MPI_Waitany */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
	}

	break;
	
      case 3:
	/* use MPI_Waitsome */
	total = 0;
	while (total < NUM_SEND_TYPES * 2) {
	  MPI_Waitsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus);

	  total += outcount;
	}

	break;

      case 4:
	/* use MPI_Test */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  flag = 0;

	  while (!flag) {
	    MPI_Test (&aReq[j], &flag, &aStatus[j]);
	  }
	}

	break;
	
      case 5:
	/* use MPI_Testall */
	flag = 0;
	while (!flag) {
	  MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
	}

	break;

      case 6:
	/* use MPI_Testany */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  flag = 0;
	  while (!flag) {
	    MPI_Testany (NUM_SEND_TYPES * 2, aReq, &index, &flag, aStatus);
	  }
	}

	break;
	
      case 7:
	/* use MPI_Testsome */
	total = 0;
	while (total < NUM_SEND_TYPES * 2) {
	  outcount = 0;

	  while (!outcount) {
	    MPI_Testsome (NUM_SEND_TYPES * 2, aReq, 
			  &outcount, indices, aStatus);
	  }

	  total += outcount;
	}

	break;

      default:
	assert (0);
	break;
      }
    }
    else if (rank == 1) {
Example #7
0
File: MPI-api.c Project: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
Example #8
-1
int MPIR_Ibsend_cdesc(CFI_cdesc_t* x0, int x1, MPI_Datatype x2, int x3, int x4, MPI_Comm x5, MPI_Request * x6)
{
    int err = MPI_SUCCESS;
    void *buf0 = x0->base_addr;
    int count0 = x1;
    MPI_Datatype dtype0 = x2;

    if (buf0 == &MPIR_F08_MPI_BOTTOM) {
        buf0 = MPI_BOTTOM;
    }

    if (x0->rank != 0 && !CFI_is_contiguous(x0)) {
        err = cdesc_create_datatype(x0, x1, x2, &dtype0);
        count0 = 1;
    }

    err = MPI_Ibsend(buf0, count0, dtype0, x3, x4, x5, x6);

    if (dtype0 != x2)  MPI_Type_free(&dtype0);
    return err;
}
Example #9
-1
/*@
    MPI_Bsend - Basic send with user-specified buffering

Input Parameters:
+ buf - initial address of send buffer (choice) 
. count - number of elements in send buffer (nonnegative integer) 
. datatype - datatype of each send buffer element (handle) 
. dest - rank of destination (integer) 
. tag - message tag (integer) 
- comm - communicator (handle) 

Notes:
This send is provided as a convenience function; it allows the user to 
send messages without worring about where they are buffered (because the
user `must` have provided buffer space with 'MPI_Buffer_attach').  

In deciding how much buffer space to allocate, remember that the buffer space 
is not available for reuse by subsequent 'MPI_Bsend's unless you are certain 
that the message
has been received (not just that it should have been received).  For example,
this code does not allocate enough buffer space
.vb
    MPI_Buffer_attach( b, n*sizeof(double) + MPI_BSEND_OVERHEAD );
    for (i=0; i<m; i++) {
        MPI_Bsend( buf, n, MPI_DOUBLE, ... );
    }
.ve
because only enough buffer space is provided for a single send, and the
loop may start a second 'MPI_Bsend' before the first is done making use of the
buffer.  

In C, you can 
force the messages to be delivered by 
.vb
    MPI_Buffer_detach( &b, &n );
    MPI_Buffer_attach( b, n );
.ve
(The 'MPI_Buffer_detach' will not complete until all buffered messages are 
delivered.)

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_COMM
.N MPI_ERR_COUNT
.N MPI_ERR_TYPE
.N MPI_ERR_RANK
.N MPI_ERR_TAG

.seealso: MPI_Buffer_attach, MPI_Ibsend, MPI_Bsend_init
@*/
int MPI_Bsend( 
	void *buf, 
	int count, 
	MPI_Datatype datatype, 
	int dest, 
	int tag, 
	MPI_Comm comm )
{
    MPI_Request handle;
    MPI_Status  status;
    int         mpi_errno = MPI_SUCCESS;
    struct MPIR_COMMUNICATOR *comm_ptr;
    MPIR_ERROR_DECL;
    static char myname[] = "MPI_BSEND";

    disableSignal();

    TR_PUSH(myname);

    if (dest != MPI_PROC_NULL)
    {
        /* We should let Ibsend find the errors, but
	   we will soon add a special case for faster Bsend and we'll
	   need these tests then 
	 */

	comm_ptr = MPIR_GET_COMM_PTR(comm);
#ifndef MPIR_NO_ERROR_CHECKING
	MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname);

	MPIR_TEST_COUNT(count);
	MPIR_TEST_SEND_TAG(tag);
	MPIR_TEST_SEND_RANK(comm_ptr,dest);
	if (mpi_errno) {
            revertSignal();
	    return MPIR_ERROR(comm_ptr, mpi_errno, myname );
        }
#endif
	/* 
	   ? BsendDatatype?
	   MPID_BsendContig( comm, buf, len, src_lrank, tag, context_id,
	   dest_grank, msgrep, &mpi_errno );
	   if (!mpi_errno) return MPI_SUCCESS;
	   if (mpi_errno != MPIR_ERR_MAY_BLOCK) 
	   return MPIR_ERROR( comm, mpi_errno, myname );
	 */
	MPIR_ERROR_PUSH(comm_ptr);
	/* We don't use MPIR_CALL_POP so that we can free the handle */
	handle = MPI_REQUEST_NULL;
	if ((mpi_errno = MPI_Ibsend( buf, count, datatype, dest, tag, comm, 
				  &handle ))) {
	    MPIR_ERROR_POP(comm_ptr);
	    if (handle != MPI_REQUEST_NULL) 
		MPID_SendFree( handle );
            revertSignal();
	    return MPIR_ERROR(comm_ptr,mpi_errno,myname);
	}

	/* This Wait only completes the transfer of data into the 
	   buffer area.  The test/wait in util/bsendutil.c completes
	   the actual transfer 
	 */
	MPIR_CALL_POP(MPI_Wait( &handle, &status ),comm_ptr,myname);
	MPIR_ERROR_POP(comm_ptr);
    }
    TR_POP;
    revertSignal();
    return mpi_errno;
}