Esempio n. 1
0
 void TreeCommunicatorLevel::close_recv(void)
 {
     check_mpi(MPI_Cancel(&m_policy_request));
     if (m_rank == 0) {
         for (auto request_it = m_sample_request.begin(); request_it < m_sample_request.end(); ++request_it) {
             check_mpi(MPI_Cancel(&(*request_it)));
         }
     }
 }
Esempio n. 2
0
EXPORT_MPI_API void FORTRAN_API mpi_cancel_( MPI_Fint *request, MPI_Fint *__ierr )
{
    MPI_Request lrequest;

    lrequest = MPI_Request_f2c(*request);  
    *__ierr = MPI_Cancel(&lrequest); 
}
Esempio n. 3
0
int async_mpi_session_reset(async_mpi_session* ses, void* buf, int count, MPI_Datatype datatype, int peer, int tag, MPI_Comm comm, MPI_Request* request, unsigned char flags) {
	if(ses->buf != NULL && ses->buf != buf && (ses->flags & ASYNC_MPI_FLAG_FREE_BUF) != 0) {
		safe_free(ses->buf);
		ses->buf = NULL;
	}
	if(ses->request != NULL) {
		if(ses->state > ASYNC_MPI_STATE_SEND_OR_RECV && ses->state < ASYNC_MPI_STATE_SUCCESS && (ses->flags & ASYNC_MPI_FLAG_IS_SENDING) == 0) {
			MPI_Status status;
			int flag = 0;
			//printf("Canceling in reset.\n");
			MPI_Cancel(ses->request);
			while(!flag) {
				MPI_Test(ses->request, &flag, &status);
			}
		}
		if((ses->flags & ASYNC_MPI_FLAG_FREE_REQUEST) != 0) {
			safe_free(ses->request);
		}
	}
	ses->buf = buf;
	ses->count = count;
	ses->datatype = datatype;
	ses->peer = peer;
	ses->tag = tag;
	ses->comm = comm;
	ses->request = request;
	ses->flags = flags;
	ses->state = ASYNC_MPI_STATE_SEND_OR_RECV;
	return 1;
}
Esempio n. 4
0
JNIEXPORT void JNICALL Java_mpi_Request_cancel(
        JNIEnv *env, jobject jthis, jlong handle)
{
    MPI_Request req = (MPI_Request)handle;
    int rc = MPI_Cancel(&req);
    ompi_java_exceptionCheck(env, rc);
}
Esempio n. 5
0
		bool Communicator::GetPacked(int bufSize, CommType::Type commType, unsigned source, unsigned messageTag, /*out*/void *buf)
		{
			MPI_Status status;
			MPI_Request *request = 0;
			int flag;

			if (commType == CommType::Synch)
			{            
				MPI_Recv(buf, bufSize, MPI_PACKED, source, messageTag, MPI_COMM_WORLD, &status); 

				this->recvPosition = 0;
	            
				flag = 0;
			}
			else
			{
				request = new MPI_Request();
	            
				MPI_Irecv(buf, bufSize, MPI_PACKED, source, /*messageTag*/messageTag, MPI_COMM_WORLD, request);

				this->recvPosition = 0;

				//This is necessary to cancel
				sem_wait(&this->cancelSem);
				this->messageRequest = request;

				if (this->cancel)
				{
					if (!Validator::IsNull(request, NAME("request")))
					{
						MPI_Cancel(request);
					}
				}
				sem_post(&this->cancelSem);
				
				if (!Validator::IsNull(request, NAME("request")))
				{
					MPI_Wait(request, &status);

					MPI_Test_cancelled( &status, &flag );            
				}

				sem_wait(&this->cancelSem);
				delete request; 
				request = NULL;

				this->messageRequest = 0;
				sem_post(&this->cancelSem);
			}

			#ifdef Debug
			if (!Validator::IsNull(log, NAME("log")))
			{
				log->Log(&typeid(this), "Comunicator::GetPacked: source: %u, Tag: %d", source, messageTag);
			}
			#endif

			return (flag != 0);
		}
Esempio n. 6
0
void event_queue_destroy(event_queue_t *queue){
    for(int i=0;i<(*queue)->pending;i++){
        MPI_Cancel(&(*queue)->request[i]);
    }
    destroy_manager ((*queue)->man);
    RTfree(*queue);
    *queue=NULL;
}
Esempio n. 7
0
void f2crequest_( MPI_Fint * req )
{
    MPI_Request cReq;

    MPI_Irecv( NULL, 0, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, 
	       MPI_COMM_WORLD, &cReq );
    MPI_Cancel( &cReq );
    *req = MPI_Request_c2f( cReq );
    
}
GrouppingMpiAsync::~GrouppingMpiAsync() {
	if(groupsMap.size() > 0) {
		LOG(WARNING) << "Pending requests left in GrouppingMpiAsync, but destructor has been called. Canceling them";

		for(auto& kv: groupsMap) {
			El& el = kv.second;
			MPI_Cancel(el.rq);
			cleaner(el.rq);
		}
	}
}
Esempio n. 9
0
		void Communicator::CancelASynch()
		{
			sem_wait(&this->cancelSem);			
			if (!Validator::IsNull(this->messageRequest, NAME("this->messageRequest")))
			{	
				MPI_Cancel(this->messageRequest);
			}
			else
			{
				cancel = true;
			}
			sem_post(&this->cancelSem);
		}
Esempio n. 10
0
void requests_t::cancel_and_waitall() {
#if PENTAGO_MPI_FUNNEL
  spin_t spin(immediate_lock);
  GEODE_ASSERT(!immediate_count);
#endif
  callbacks.clear();
  for (int i=0;i<requests.size();i++) {
    GEODE_ASSERT(cancellables[i]);
    CHECK(MPI_Cancel(&requests[i]));
  }
  CHECK(MPI_Waitall(requests.size(),requests.data(),MPI_STATUSES_IGNORE));
  requests.clear();
  cancellables.clear();
}
Esempio n. 11
0
/*!
    Cancels the receive associated to the specified rank.

    \param rank is the rank associated to the receive to cancel
*/
void DataCommunicator::cancelRecv(int rank)
{
    if (m_recvIds.count(rank) == 0) {
        return;
    }

    int id = m_recvIds[rank];
    if (m_recvRequests[id] == MPI_REQUEST_NULL) {
        return;
    }

    MPI_Cancel(&m_recvRequests[id]);
    MPI_Request_free(&m_recvRequests[id]);
}
void MPIWorker::receive_order() {
    int work_test = 0;
    MPI_Test(&WorkReq, &work_test, MPI_STATUS_IGNORE);
    if (Status == pMPI::Pending && work_test) {
        Status = pMPI::Work;
        return;
    };
    int finish_test = 0;
    MPI_Test(&FinishReq, &finish_test, MPI_STATUS_IGNORE);
    if (Status == pMPI::Pending && finish_test) {
        Status = pMPI::Finish;
        //WorkReq.cancel();
        MPI_Cancel(&WorkReq);
        return;
    };
}
Esempio n. 13
0
void memheap_oob_destruct(void)
{
    int i;
    oob_comm_request_t *r;

    opal_progress_unregister(oshmem_mkey_recv_cb);

    for (i = 0; i < MEMHEAP_RECV_REQS_MAX; i++) { 
        r = &memheap_oob.req_pool[i];
        MPI_Cancel(&r->recv_req);
        MPI_Request_free(&r->recv_req);
    }

    OBJ_DESTRUCT(&memheap_oob.req_list);
    OBJ_DESTRUCT(&memheap_oob.lck);
    OBJ_DESTRUCT(&memheap_oob.cond);
}
Esempio n. 14
0
/* This is a program to enable testing and demonstration of the debugger
   interface, particularly in terms of showing message queues.  To use
   this, run with a few processes and attach with the debugger when the
   program stops.  You can change the variable "hold" to 0 to allow the
   program to complete. */
int main( int argc, char *argv[] )
{
    int wsize, wrank;
    int source, dest, i;
    int buf1[10], buf2[10], buf3[10];
    MPI_Request r[3];
    volatile int hold = 1; 
    MPI_Comm dupcomm;
    MPI_Status status;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &wsize );
    MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
    
    /* Set the source and dest in a ring */
    source = (wrank + 1) % wsize;
    dest   = (wrank + wsize - 1) % wsize;

    MPI_Comm_dup( MPI_COMM_WORLD, &dupcomm );
    MPI_Comm_set_name( dupcomm, "Dup of comm world" );

    for (i=0; i<3; i++) {
	MPI_Irecv( MPI_BOTTOM, 0, MPI_INT, source, i + 100, MPI_COMM_WORLD, 
		   &r[i] );
    }

    MPI_Send( buf2, 8, MPI_INT, dest, 1, MPI_COMM_WORLD );
    MPI_Send( buf3, 4, MPI_INT, dest, 2, dupcomm );
    
    while (hold) ;

    MPI_Recv( buf1, 10, MPI_INT, source, 1, MPI_COMM_WORLD, &status );
    MPI_Recv( buf1, 10, MPI_INT, source, 1, dupcomm, &status );

    for (i=0; i<3; i++) {
	MPI_Cancel( &r[i] );
    }

    MPI_Comm_free( &dupcomm );
    
    
    MPI_Finalize();
    
    return 0;
}
Esempio n. 15
0
/**
 * Función que verifica la llegada de un mensaje
 * si esta no ocurre después del valor TIEMPO (decimas de segundo)
 * el mensaje se descarta.Regresa 0 si el mensaje fue recibido
 * y 1 si la espera por el mensaje no fue exitosa
 *
 * @return En función del valor del retorno, la petición se considera exitosa o no.
 */
int MPI_timer ( MPI_Request *request, MPI_Status *status ){
	int flag = 0, time = 0;
	while ( !flag ){
        /* Do some work ... */
        MPI_Test( request, &flag, status );
        usleep( 100000 );
        time ++;
        if ( time == TIEMPO2 ){
        	printf( "no se recibio mensaje\n" );
        	MPI_Cancel( request );
        	flag = 1;
        }
	}
	if( time != TIEMPO2 )
		return 0;
	else
		return 1;
}
Esempio n. 16
0
int main( int argc, char *argv[] )
{
    MPI_Fint handleA, handleB;
    int      rc;
    int      errs = 0;
    int      buf[1];
    MPI_Request cRequest;
    MPI_Status st;
    int        tFlag;

    MTest_Init( &argc, &argv );

    /* Request */
    rc = MPI_Irecv( buf, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &cRequest );
    if (rc) {
	errs++;
	printf( "Unable to create request\n" );
    }
    else {
	handleA = MPI_Request_c2f( cRequest );
	handleB = MPI_Request_c2f( cRequest );
	if (handleA != handleB) {
	    errs++;
	    printf( "MPI_Request_c2f does not give the same handle twice on the same MPI_Request\n" );
	}
    }
    MPI_Cancel( &cRequest );
    MPI_Test( &cRequest, &tFlag, &st );
    MPI_Test_cancelled( &st, &tFlag );
    if (!tFlag) {
	errs++;
	printf( "Unable to cancel MPI_Irecv request\n" );
    }
    /* Using MPI_Request_free should be ok, but some MPI implementations
       object to it imediately after the cancel and that isn't essential to
       this test */

    MTest_Finalize( errs );
    MPI_Finalize();
    
    return 0;
}
Esempio n. 17
0
void async_mpi_session_destroy(async_mpi_session* ses) {
	if((ses->flags & ASYNC_MPI_FLAG_FREE_BUF) != 0) {
		safe_free(ses->buf);
	}
	if(ses->request != NULL) {
		if(ses->state > ASYNC_MPI_STATE_SEND_OR_RECV && ses->state < ASYNC_MPI_STATE_SUCCESS) {
			MPI_Status status;
			int flag = 0;
			//printf("Canceling in destroy.\n");
			MPI_Cancel(ses->request);
			while(!flag) {
				MPI_Test(ses->request, &flag, &status);
			}
		}
		if((ses->flags & ASYNC_MPI_FLAG_FREE_REQUEST) != 0) {
			safe_free(ses->request);
		}
	}
	free(ses->tmp);
	safe_free(ses);
}
Esempio n. 18
0
int main(int argc, char **argv) {
    int size, rank, msg, cancelled;
    MPI_Request request;
    MPI_Status status;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (size != 2) {
        fprintf(stderr,"ERROR: must be run with 2 processes");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 0) {
        msg = -1;
        /* Post, then cancel MPI_ANY_SOURCE recv */
        MPI_Irecv(&msg, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &request);
        MPI_Cancel(&request);
        MPI_Wait(&request, &status);
        MPI_Test_cancelled(&status, &cancelled);
        assert(cancelled);

        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Irecv(&msg, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &request);
        MPI_Wait(&request, &status);
        assert(msg == 42);
    } else {
        MPI_Barrier(MPI_COMM_WORLD);
        msg = 42;
        MPI_Send(&msg, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
    }

    if (rank == 0)
        printf(" No Errors\n");

    MPI_Finalize();
}
Esempio n. 19
0
int Timeout_recv(void* buf, int count, MPI_Datatype datatype, int source,
              int tag, MPI_Comm comm, MPI_Status *status, double timeout) {
	MPI_Request request;
	
	double start_time = MPI_Wtime();
	double interval = 0.0;
	MPI_Irecv(buf, count, datatype, source, tag, comm, &request);
	while(1){
		// Better sleep for a short time interval
		int flag = 0;
		MPI_Test(&request, &flag, status);
		//printf("Flag: %d\n", flag);
		if(flag) { 
			return 1;
		}else {
			double curr_time = MPI_Wtime();
			interval = curr_time - start_time;
			if(interval > timeout) {
				MPI_Cancel(&request);
				return 0;
			}
		}
	}
}
Esempio n. 20
0
void *receiveWithTimeout(void *src) {
    float time = 0.0;
    int recvMsg = 0;
    MPI_Request request;
    int flag = 0;

    MPI_Irecv(&recvMsgs[(int)src], 1, MPI_INT, (int)src, 1, MPI_COMM_WORLD, &request);
    printf("Waiting to receive from %d\n", src);

    while (1) {
        usleep(TIMEOUT_INTERVAL);
        MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
        if (flag) {
            break;
        } else if (time >= TIMEOUT_TIME) {
            printf("Believe %d is dead\n", (int)src);
            MPI_Cancel(&request);
            recvMsgs[(int)src] = MSG_TIMEOUT;
            break;
        } else {
            time += TIMEOUT_INTERVAL;
        }
    }
}
Esempio n. 21
0
double pingpongtest(int iters, int msgsz) {
  int i;
  int64_t starttime, endtime;
  int iamsender = (rank % 2 == 0);
  int iamreceiver = !iamsender || peerid == rank; /* handle loopback */
  char *sendMsgbuffer = (char*)malloc(msgsz);
  char *sendAckbuffer = (char*)malloc(msgsz);
  char *recvMsgbuffer = (char*)malloc(msgsz);
  char *recvAckbuffer = (char*)malloc(msgsz);
  MPI_Request recvMsgHandle = MPI_REQUEST_NULL;
  MPI_Request recvAckHandle = MPI_REQUEST_NULL;
  MPI_Request sendMsgHandle = MPI_REQUEST_NULL;
  MPI_Request sendAckHandle = MPI_REQUEST_NULL;
  MPI_Status status;

  #if USE_ZERO_BYTE_ACK
    #define ACKSZ 0
  #else
    #define ACKSZ msgsz
  #endif

  if (iamreceiver) {
    /* prepost a recv */
    MPI_SAFE(MPI_Irecv(recvMsgbuffer, msgsz, MPI_BYTE, 
              peerid, MPI_ANY_TAG, MPI_COMM_WORLD, 
              &recvMsgHandle));
  }

  barrier();

  starttime = getMicrosecondTimeStamp();

  for (i=0; i < iters; i++) {

    if (iamsender) {
      /* send message */
      WRITEMSG(sendMsgbuffer, msgsz);
   #if USE_ISEND
      MPI_SAFE(MPI_Isend(sendMsgbuffer, msgsz, MPI_BYTE, peerid, peermpitag, MPI_COMM_WORLD, &sendMsgHandle));
   #else
      MPI_SAFE(MPI_Send(sendMsgbuffer, msgsz, MPI_BYTE, peerid, peermpitag, MPI_COMM_WORLD));
   #endif

      /* prepost a recv for acknowledgement */
      MPI_SAFE(MPI_Irecv(recvAckbuffer, ACKSZ, MPI_BYTE, 
                peerid, MPI_ANY_TAG, MPI_COMM_WORLD, 
                &recvAckHandle));

   #if USE_ISEND
      MPI_SAFE(MPI_Wait(&sendMsgHandle, &status));
   #endif
    }

    if (iamreceiver) {
      /* wait for message */
     #if USE_TEST
      int flag = 0;
      while (!flag) MPI_SAFE(MPI_Test(&recvMsgHandle, &flag, &status)); 
     #else
      MPI_SAFE(MPI_Wait(&recvMsgHandle, &status));
     #endif
      CHECKTAG(status.MPI_TAG);

      READMSG(recvMsgbuffer, msgsz);

      /* send acknowledgement */
      WRITEMSG(sendAckbuffer, 1);
    #if USE_ISEND
      MPI_SAFE(MPI_Isend(sendAckbuffer, ACKSZ, MPI_BYTE, peerid, peermpitag, MPI_COMM_WORLD, &sendAckHandle));
    #else
      MPI_SAFE(MPI_Send(sendAckbuffer, ACKSZ, MPI_BYTE, peerid, peermpitag, MPI_COMM_WORLD));
    #endif

      /* pre-post recv for next message */
      MPI_SAFE(MPI_Irecv(recvMsgbuffer, msgsz, MPI_BYTE, 
                peerid, MPI_ANY_TAG, MPI_COMM_WORLD, 
                &recvMsgHandle));
    #if USE_ISEND
      MPI_SAFE(MPI_Wait(&sendAckHandle, &status));
    #endif
    }

    if (iamsender) {
      /* wait for acknowledgement */
      MPI_SAFE(MPI_Wait(&recvAckHandle, &status));
      CHECKTAG(status.MPI_TAG);
      READMSG(recvAckbuffer, 1);
    }

  }

  endtime = getMicrosecondTimeStamp();

  /* last recv must be cancelled (not included in timing) */
  #if 0
      if (iamreceiver) MPI_SAFE(MPI_Cancel(&recvMsgHandle));
  #else
      /* apparently some MPI impls don't implement cancel at all.. (grr..) */
      /* use an extra send instead to get the same effect */
      if (iamsender) 
        MPI_SAFE(MPI_Send(sendMsgbuffer, msgsz, MPI_BYTE, peerid, peermpitag, MPI_COMM_WORLD));
  #endif

  if (iamreceiver) MPI_SAFE(MPI_Wait(&recvMsgHandle, &status));

  free(sendMsgbuffer);
  free(sendAckbuffer);
  free(recvMsgbuffer);
  free(recvAckbuffer);

  return (double)(endtime - starttime);
}
Esempio n. 22
0
static int exchanger_waitall(exchanger_t* ex, mpi_message_t* msg)
{
  // Allocate storage for statuses of sends/receives.
  int num_requests = msg->num_requests;
  MPI_Status statuses[num_requests];
  
  int err = 0;
  if (ex->dl_thresh <= 0.0)
  {
    // If we're not using deadlock detection, simply call MPI_Waitall. 
    err = MPI_Waitall(num_requests, msg->requests, statuses);
  }
  else
  {
    // Otherwise, we get all fancy.
    int finished[num_requests];
    memset(finished, 0, num_requests*sizeof(int));
    bool expecting_data = (msg->num_receives > 0);
    bool sent_data = (msg->num_sends > 0);

    // Start the deadlock clock.
    real_t t1 = (real_t)MPI_Wtime();

    // Now poll the transmissions till they complete.
    bool all_finished;
    do
    {
      all_finished = true;
      for (int i = 0; i < num_requests; ++i)
      {
        if (!finished[i])
        {
          if (MPI_Test(&(msg->requests[i]), &(finished[i]), &(statuses[i])) != MPI_SUCCESS)
            return -1;
          if (!finished[i]) all_finished = false;
        }
      }

      // If the transmissions have finished at this point, we 
      // can break out of the loop. 
      if (all_finished) break;

      // Take a look at the time. 
      real_t t2 = (real_t)MPI_Wtime();

      // If we've passed the deadlock threshold, set our error flag and 
      // and gather some diagnostic data. 
      if ((t2 - t1) > ex->dl_thresh)
      {
        // Cancel all unfinished communications. 
        for (int i = 0; i < num_requests; ++i)
        {
          if (!finished[i])
            MPI_Cancel(&(msg->requests[i]));
        }

        // Now generate a comprehensive report. 
        err = -1;

        int num_outstanding_sends = 0, num_outstanding_receives = 0,
            num_completed_sends = 0, num_completed_receives = 0;
        int outstanding_send_procs[msg->num_sends],
            outstanding_send_bytes[msg->num_sends],
            outstanding_receive_procs[msg->num_receives],
            outstanding_receive_bytes[msg->num_receives],
            completed_send_procs[msg->num_sends],
            completed_send_bytes[msg->num_sends],
            completed_receive_procs[msg->num_receives],
            completed_receive_bytes[msg->num_receives];
        for (int i = 0; i < num_requests; ++i)
        {
          if (!finished[i])
          {
            if (expecting_data && (i < ex->receive_map->size)) // outstanding receive 
            {
              outstanding_receive_procs[num_outstanding_receives] = msg->source_procs[i];
              outstanding_receive_bytes[num_outstanding_receives] = msg->receive_buffer_sizes[i] * msg->data_size;
              ++num_outstanding_receives;
            }
            else if (sent_data) // outstanding send 
            {
              outstanding_send_procs[num_outstanding_sends] = msg->dest_procs[i - msg->num_receives];
              outstanding_send_bytes[num_outstanding_sends] = msg->send_buffer_sizes[i - msg->num_receives] * msg->data_size;
              ++num_outstanding_sends;
            }
          }
          else
          {
            if (expecting_data && (i < msg->num_receives)) // completed receive 
            {
              completed_receive_procs[num_completed_receives] = msg->source_procs[i];
              completed_receive_bytes[num_completed_receives] = msg->receive_buffer_sizes[i] * msg->data_size;
              ++num_completed_receives;
            }
            else if (sent_data) // completed send 
            {
              completed_send_procs[num_completed_sends] = msg->dest_procs[i - msg->num_receives];
              completed_send_bytes[num_completed_sends] = msg->send_buffer_sizes[i - msg->num_receives] * msg->data_size;
              ++num_completed_sends;
            }
          }
        }

        // At this point, there must be at least one uncompleted 
        // send and/or receive. 
        ASSERT((num_outstanding_sends > 0) || (num_outstanding_receives > 0));

        // Format the report.
        fprintf(ex->dl_output_stream, "%d: MPI Deadlock:\n", ex->rank);
        if (num_completed_sends > 0)
        {
          fprintf(ex->dl_output_stream, "Completed sending data to:\n");
          for (int i = 0; i < num_completed_sends; ++i)
            fprintf(ex->dl_output_stream, "  %d (%d bytes)\n", completed_send_procs[i], completed_send_bytes[i]);
        }
        if (num_completed_receives > 0)
        {
          fprintf(ex->dl_output_stream, "Completed receiving data from:\n");
          for (int i = 0; i < num_completed_receives; ++i)
            fprintf(ex->dl_output_stream, "  %d (%d bytes)\n", completed_receive_procs[i], completed_receive_bytes[i]);
        }
        if (num_outstanding_sends > 0)
        {
          fprintf(ex->dl_output_stream, "Still sending data to:\n");
          for (int i = 0; i < num_outstanding_sends; ++i)
            fprintf(ex->dl_output_stream, "  %d (%d bytes)\n", outstanding_send_procs[i], outstanding_send_bytes[i]);
        }
        if (num_outstanding_receives > 0)
        {
          fprintf(ex->dl_output_stream, "Still expecting data from:\n");
          for (int i = 0; i < num_outstanding_receives; ++i)
            fprintf(ex->dl_output_stream, "  %d (%d bytes)\n", outstanding_receive_procs[i], outstanding_receive_bytes[i]);
        }
        fprintf(ex->dl_output_stream, "Grace period: %g seconds\n", ex->dl_thresh);

        // Bug out. 
        return -1;
      }
      // Otherwise, slog onward. 
    }
    while (!all_finished && (err == 0));
  }

  // If the status buffer contains any errors, check it out. 
  if (err == MPI_ERR_IN_STATUS)
  {
    char errstr[MPI_MAX_ERROR_STRING];
    int errlen;
    for (int i = 0; i < num_requests; ++i)
    {
      if (statuses[i].MPI_ERROR != MPI_SUCCESS)
      {
        MPI_Error_string(statuses[i].MPI_ERROR, errstr, &errlen);
        if (i < msg->num_receives)
        {
          // Now we can really get nitty-gritty and try to diagnose the
          // problem carefully! 
          if (statuses[i].MPI_ERROR == MPI_ERR_TRUNCATE)
          {
            fprintf(ex->dl_output_stream, "%d: MPI error receiving from %d (%d) %s\n"
                    "(Expected %d bytes)\n", ex->rank, msg->source_procs[i], statuses[i].MPI_ERROR, 
                    errstr, msg->receive_buffer_sizes[i]);
          }
          else
          {
            fprintf(ex->dl_output_stream, "%d: MPI error receiving from %d (%d) %s\n",
                    ex->rank, msg->source_procs[i], statuses[i].MPI_ERROR, errstr);
          }
        }
        else 
        {
          fprintf(ex->dl_output_stream, "%d: MPI error sending to %d (%d) %s\n",
                  ex->rank, msg->dest_procs[i - msg->num_receives], statuses[i].MPI_ERROR, errstr);
        }
        return -1;
      }
      // We shouldn't get here. 
    }
  }

  // That's it.
  return err;
}
Esempio n. 23
0
int main(int argc, char * argv[]) {

	// initialize MPI

	int i, rank, size, rec;
	int arr[100];

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);

	// populate a sample array,
	// i.e. simulated input

	if(rank == 0) {

		printf("\r\n\r\nHello from r00t. Starting\r\n\r\n");

	} else {

		int i;

		for(i=0;i<=100;i++) {
			srand(time(NULL) * rank * i);
			arr[i] = rand() % 100;
		}

	}

	MPI_Request sreq, rreq;
	MPI_Status status;

	if(rank > 0) {

		int source, dest;

		if(rank == 1) { source = size - 1; } else { source = rank - 1; }
		if(rank == size - 1) { dest = 1; } else { dest = rank + 1; }

		// create a persistent send and a persistent recieve request
		MPI_Send_init(&arr[rank], 1, MPI_INT, dest, NULL, MPI_COMM_WORLD, &sreq);
		MPI_Recv_init(&rec, 1, MPI_INT, source, NULL, MPI_COMM_WORLD, &rreq);

		// once created we can use them over and over again...

		for(i=0; i<100; i++) {

			MPI_Start(&rreq);
			MPI_Start(&sreq);

			MPI_Wait(&rreq, &status);

			printf("My rank is %d and I received %d from %d\n", rank, rec, source);

			MPI_Wait(&sreq, &status);


		}

		MPI_Cancel(&rreq);
		MPI_Cancel(&sreq);

	}

}
Esempio n. 24
0
void doWorkQueueManager(int size, StealStack *s)
{
	MPI_Request request[size*3]; //make one array so we can do a Waitall on all comm
	MPI_Request *req_make_global = &request[0];
	MPI_Request *req_work_request = &request[size];
	MPI_Request *req_response = &request[2*size];
	MPI_Status request_status; //, send_status;
	MPI_Status wait_all_status[3*size];
	void *shared_work_buf[size];
	unsigned long work_request_buf[size];
	int flag, who, i;
	struct waiting_entry waiting[size];
	unsigned long timestamps[size];
	unsigned long msg_counts[size];
	int work_response_send_count=0;
	int done=0;

#ifdef TRACE_RELEASES
	/* Track releases */
	ss_setState(s, SS_WORK);
#else
	/* Attribute the WQM's time to overhead */
	ss_setState(s, SS_WORK);
	ss_setState(s, SS_IDLE);
#endif 

	/* Init the receieve buffers */
	for(i = 0; i < size; i++) {
		waiting[i].flag = 0;   /*init waiting to not waiting*/
		waiting[i].buf  = NULL;    /*init waiting to not waiting*/
		timestamps[i] = 0;
		msg_counts[i] = 0;
		shared_work_buf[i] = malloc(s->work_size*s->chunk_size);
	}

	/* Setup non-block recieves for communicating with workers */
	for(i=0; i < size; i++) {
		/* Listen for work releases */
		MPI_Irecv(shared_work_buf[i], s->work_size*s->chunk_size, MPI_BYTE, i,
				MPI_MAKEWORKGLOBAL_TAG, MPI_COMM_WORLD, &req_make_global[i]);

		/* Listen for work requests (A WORKREQUEST should be the chunksize requested) */
		MPI_Irecv(&work_request_buf[i], 1, MPI_LONG, i, MPI_WORKREQUEST_TAG,
				MPI_COMM_WORLD, &req_work_request[i]);
	}

	/** BEGIN WORK MANAGEMENT LOOP */
	while(!done) {
		/* Wait for someone to send work or to request work */
		MPI_Waitany(2*size, request, &who, &request_status);

		/* Sending shared work to the queue */
		if(who < size) {
			void *w = malloc(s->work_size*s->chunk_size);

#ifdef TRACE_RELEASES
			/* Mark this release as a "steal" event */
			ss_markSteal(s, who);
			ss_setState(s, SS_SEARCH);
			ss_setState(s, SS_WORK);
#endif 
			/* Update timestamp */
			msg_counts[who]++;

			memcpy(w, shared_work_buf[who], s->work_size*s->chunk_size);
			deq_pushFront(globalQueue, w);
			s->globalWork += s->chunk_size;

			MPI_Irecv(shared_work_buf[who], s->work_size*s->chunk_size, MPI_BYTE, who,
					MPI_MAKEWORKGLOBAL_TAG, MPI_COMM_WORLD, &req_make_global[who]);
		}

		/* Requesting shared work from the queue */
		else { // (who >= size)
			who -= size;
			/* mark this id is waiting for work */
			waiting[who].flag = 1;

			/* Update timestamp */
			msg_counts[who]++;
			timestamps[who] = work_request_buf[who];
			/* This should be an invariant.. */
			if (timestamps[who] < msg_counts[who]) {
				ss_error("WQM: message delivery failure!\n", 10);
			}


			MPI_Irecv(&work_request_buf[who], 1, MPI_LONG, who, MPI_WORKREQUEST_TAG, MPI_COMM_WORLD, &req_work_request[who]);
		}

		/* finish last round of sends before start to send more data */
		if (work_response_send_count) {
			MPI_Waitall(work_response_send_count, req_response, wait_all_status);

                        // Free all the buffers used in the last round
                        for (i = 0; i < size; i++) {
                          if (waiting[i].buf != NULL) {
                            free(waiting[i].buf);
                            waiting[i].buf = NULL;
                          }
                        }
		}

		/* Attempt to send work to everyone who is waiting. */
		work_response_send_count = 0;
		for (i = 0; i < size; i++) {
			if (waiting[i].flag && !deq_isEmpty(globalQueue)) {
				void* work_ptr = deq_popFront(globalQueue);

				MPI_Isend(work_ptr, s->work_size*s->chunk_size, MPI_BYTE, i,
						MPI_RESPONDWORK_TAG, MPI_COMM_WORLD, &req_response[work_response_send_count]);

				work_response_send_count++;
				s->globalWork -= s->chunk_size;
				waiting[i].flag = 0;
                                waiting[i].buf  = work_ptr;
			}
		}

		/** Check for termination **/
		/* If everyone is still waiting and there are no outstanding messages
		   then we are done.  */
		done = 1;
		for(i=0; i < size; i++) {
			if(!waiting[i].flag || (msg_counts[i] != timestamps[i])) {
				done=0;
				break; //no need to check everyone else
			}
		}

		/* Sanity check */
		if(done && !deq_isEmpty(globalQueue)) {
			ss_error("WQM: Something evil happened.  We are terminating but I still have work!", 13);
		}
	} /* END: while (!done) */

	if (DEBUG & 2) printf("Queue Manager: We are done.  Letting everyone know.\n");

	/* This is a sanity test to make sure our prioritazation above works.  If this testany were to
	   return true, the cancels below would error out. */
	MPI_Testany(2*size, request, &who, &flag, &request_status);
	if (flag) {
		ss_error("WQM: Attempted to terminate with inbound work!", 13);
	}

	/* Cancel the outstanding MPI_Irecvs */
	for (i = 0; i < size; i++) {
		MPI_Cancel(&req_make_global[i]);
		MPI_Cancel(&req_work_request[i]);
	}

	/* send a msg to everyone that no work exists, everyone should be waiting on an MPI_recv here */
	work_response_send_count = 0;
	for(i=0; i < size; i++) {
		MPI_Isend(NULL, 0, MPI_BYTE, i, MPI_RESPONDWORK_TAG, MPI_COMM_WORLD, &req_response[i]);
		work_response_send_count++;
	}

	MPI_Waitall(work_response_send_count, req_response, wait_all_status);

	ss_setState(s, SS_IDLE);
}
Esempio n. 25
0
int main( int argc, char *argv[] )
{

    double       sbuf[20000];
#ifdef FOO
    double rbuf[20000];
#endif
    int          rank;
    int          n, flag, size;
    int          err = 0;
    int          verbose = 0;
    MPI_Status   status;
    MPI_Request  req;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    if (size < 2) {
	printf( "Cancel test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    /* Short Message Test */
    n = 200;

    if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req); 
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag) {
	    err++;
	    printf( "Cancelling a short message failed where it should succeed.\n" );
	}
	else if (verbose)
	{
	    printf("Cancelling a short message succeeded.\n");
	}
    }  /* end if rank == 1 */

#ifdef FOO
/* Note that MPI-2 specifies that status.MPI_ERROR is only set by
   multiple completion (e.g., MPI_Waitsome) and not by test_cancelled.
*/
    MPI_Barrier(MPI_COMM_WORLD); 

    if (rank == 0) {  /* begin if rank == 0 */
	MPI_Recv( rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &status);
    }  /* end if rank = 0 */
    else if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req); 
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag && status.MPI_ERROR != MPI_SUCCESS) {
	    err++;
	    printf( "Cancel of a send returned an error in the status field.\n" );
	}
	  /* end if status.MPI_ERROR */
    }  /* end if rank == 1 */
#endif

    MPI_Barrier(MPI_COMM_WORLD);

    /* Eager Message Test */
    n = 3000;

    if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req);
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag) {
	    err++;
	    printf( "Cancelling an eager message (3000 doubles) failed where it should succeed.\n" );
	}
	else if (verbose)
	{
	    printf("Cancelling an eager message (3000 doubles) succeeded.\n");
	}
    }  /* end if rank == 1 */

#ifdef FOO
    MPI_Barrier(MPI_COMM_WORLD); 

    if (rank == 0) {  /* begin if rank == 0 */
	MPI_Irecv(rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &req );
	MPI_Wait( &req, &status);
    }  /* end if rank = 0 */
    else if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req);
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag && status.MPI_ERROR != MPI_SUCCESS) {
	    err++;
	    printf( "Cancel of a send returned an error in the status field.\n" );
	}
	/* end if status.MPI_ERROR */
    }  /* end if rank == 1 */
#endif

    MPI_Barrier(MPI_COMM_WORLD);

    /* Rndv Message Test */
    n = 20000;

    if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req);
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag) {
	    err++;
	    printf( "Cancelling a rendezvous message failed (20000 doubles) where it should succeed.\n" );
	}
	else if (verbose)
	{
	    printf("Cancelling an rendezvous message (20000 doubles) succeeded.\n");
	}
    }  /* end if rank == 1 */

#ifdef FOO
    MPI_Barrier(MPI_COMM_WORLD); 

    if (rank == 0) {  /* begin if rank == 0 */
	MPI_Irecv(rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &req );
	MPI_Wait( &req, &status); 
    }  /* end if rank = 0 */
    else if (rank == 1) { /* begin if rank = 1 */
	MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req );
	MPI_Cancel(&req);
	MPI_Wait(&req, &status);
	MPI_Test_cancelled(&status, &flag);
	if (!flag && status.MPI_ERROR != MPI_SUCCESS) {
	    err++;
	    printf( "Cancel of a send returned an error in the status field.\n" );
	}
	/* end if status.MPI_ERROR */
    }  /* end if rank == 1 */
#endif

    MPI_Barrier(MPI_COMM_WORLD); 

    if (rank == 1) {  /* begin if rank = 1 */
	if (err) {
	    printf( "Test failed with %d errors.\n", err );
	}
	else {
	    printf( " No Errors\n" );
	}
    }

    MPI_Finalize( );

    return 0;
}
Esempio n. 26
0
 MpiChannelInterface::~MpiChannelInterface()
 {
     MPI_Cancel( &m_request );
     delete m_ser1;
     delete m_ser2;
 }
Esempio n. 27
0
int main (int argc, char *argv[])
{
  int ierr;
  int rank;
  static char buffer[80];
  MPI_Request req = MPI_REQUEST_NULL;
  MPI_Status status, status2;
#ifdef V_T
  double ts;
  int messageframe;
#endif

  ierr = MPI_Init(&argc,&argv);
#ifdef V_T
  ts = VT_timestamp();
#endif



  /* this used to be buggy... */
  MPI_Wait( &req, &status );

  ierr = MPI_Barrier(MPI_COMM_WORLD);
  test_pair();





  MPI_Comm_rank ( MPI_COMM_WORLD, &rank );
  if ( getenv ("VT_ABORT_BEFORE_FINALIZE") ) {
      if ( atoi ( getenv ("VT_ABORT_BEFORE_FINALIZE") ) < 2 )
          MPI_Abort( MPI_COMM_WORLD, 10 );

      if ( !rank ) {
	  *((char *)NULL) = 0;
      } else {
	  MPI_Barrier ( MPI_COMM_WORLD );
      }
  }

  /* test some other aspects of message transfer: persistent send with MPI_PROC_NULL */
  MPI_Send_init( &ierr, 1, MPI_INT, MPI_PROC_NULL, 100, MPI_COMM_WORLD, &req );
  MPI_Start( &req );
  MPI_Wait( &req, &status );
  MPI_Start( &req );
  MPI_Wait( &req, &status );
  MPI_Request_free( &req );

  /* persistent receive with MPI_PROC_NULL */
  MPI_Recv_init( &ierr, 1, MPI_INT, MPI_PROC_NULL, 100, MPI_COMM_WORLD, &req );
  MPI_Start( &req );
  MPI_Wait( &req, &status );
  MPI_Start( &req );
  MPI_Wait( &req, &status );
  MPI_Request_free( &req );

  /* real reuse of persistent communication */
  if( rank & 1 ) {
      MPI_Recv_init( buffer, sizeof(buffer), MPI_CHAR, rank^1, 101, MPI_COMM_WORLD, &req );
  } else {
      MPI_Send_init( buffer, sizeof(buffer), MPI_CHAR, rank^1, 101, MPI_COMM_WORLD, &req );
  }
  MPI_Start( &req );
  MPI_Wait( &req, &status );
  MPI_Start( &req );
  MPI_Wait( &req, &status );
  MPI_Request_free( &req );

  /* send to MPI_PROC_NULL */
  MPI_Send( buffer, sizeof(buffer), MPI_CHAR, MPI_PROC_NULL, 103, MPI_COMM_WORLD );

  /* cancelled receive */
  MPI_Irecv( buffer, sizeof(buffer), MPI_CHAR, rank^1, 105, MPI_COMM_WORLD, &req );
  MPI_Cancel( &req );
  MPI_Wait( &req, &status2 );

#ifdef V_T
  printf( "Time: %f\n", VT_timestamp()-ts );
#endif
  ierr = MPI_Finalize();

  return ierr;
}
Esempio n. 28
0
static int intra_Alltoallv(void *sendbuf,
                           int *sendcnts,
                           int *sdispls,
                           struct MPIR_DATATYPE *sendtype,
                           void *recvbuf,
                           int *recvcnts,
                           int *rdispls,
                           struct MPIR_DATATYPE *recvtype,
                           struct MPIR_COMMUNICATOR *comm)
{
    int size, i, j, rcnt;
    MPI_Aint send_extent, recv_extent;
    int mpi_errno = MPI_SUCCESS;
    MPI_Status *starray;
    MPI_Request *reqarray;

    /* Get size and switch to collective communicator */
    MPIR_Comm_size(comm, &size);
    comm = comm->comm_coll;

    /* Get extent of send and recv types */
    MPI_Type_extent(sendtype->self, &send_extent);
    MPI_Type_extent(recvtype->self, &recv_extent);

    /* Lock for collective operation */
    MPID_THREAD_LOCK(comm->ADIctx, comm);

    /* 1st, get some storage from the heap to hold handles, etc. */
    MPIR_ALLOC(starray,
               (MPI_Status *) MALLOC(2 * size * sizeof(MPI_Status)), comm,
               MPI_ERR_EXHAUSTED, "MPI_ALLTOALLV");

    MPIR_ALLOC(reqarray,
               (MPI_Request *) MALLOC(2 * size * sizeof(MPI_Request)),
               comm, MPI_ERR_EXHAUSTED, "MPI_ALLTOALLV");

    /* do the communication -- post *all* sends and receives: */
    rcnt = 0;
    for (i = 0; i < size; i++) {
        reqarray[2 * i] = MPI_REQUEST_NULL;
        if ((mpi_errno = MPI_Irecv((void *) ((char *) recvbuf +
                                             rdispls[i] * recv_extent),
                                   recvcnts[i], recvtype->self, i,
                                   MPIR_ALLTOALLV_TAG, comm->self,
                                   &reqarray[2 * i + 1]))
            )
            break;
        rcnt++;
        if ((mpi_errno = MPI_Isend((void *) ((char *) sendbuf +
                                             sdispls[i] * send_extent),
                                   sendcnts[i], sendtype->self, i,
                                   MPIR_ALLTOALLV_TAG, comm->self,
                                   &reqarray[2 * i]))
            )
            break;
        rcnt++;
    }

    /* ... then wait for *all* of them to finish: */
    if (mpi_errno) {
        /* We should really cancel all of the active requests */
        for (j = 0; j < rcnt; j++) {
            MPI_Cancel(&reqarray[j]);
        }
    } else {
        mpi_errno = MPI_Waitall(2 * size, reqarray, starray);
        if (mpi_errno == MPI_ERR_IN_STATUS) {
            for (j = 0; j < 2 * size; j++) {
                if (starray[j].MPI_ERROR != MPI_SUCCESS)
                    mpi_errno = starray[j].MPI_ERROR;
            }
        }
    }

    /* clean up */
    FREE(reqarray);
    FREE(starray);

    /* Unlock for collective operation */
    MPID_THREAD_UNLOCK(comm->ADIctx, comm);

    return (mpi_errno);
}
Esempio n. 29
0
 ~CommLayer() {
     MPI_Cancel(&m_request);
     delete[] m_rxBuffer;
 }
Esempio n. 30
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, /* source, */ dest;
    MPI_Comm      comm;
    MPI_Status    status;
    MPI_Request   req;
    static int bufsizes[4] = { 1, 100, 10000, 1000000 };
    char *buf;
#ifdef TEST_IRSEND
    int veryPicky = 0;   /* Set to 1 to test "quality of implementation" in
			    a tricky part of cancel */
#endif
    int  cs, flag, n;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;
    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

    /* source = 0; */
    dest   = size - 1;

    MTestPrintfMsg( 1, "Starting scancel test\n" );
    for (cs=0; cs<4; cs++) {
	if (rank == 0) {
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MTestPrintfMsg( 1, "(%d) About to create isend and cancel\n",cs );
	    MPI_Isend( buf, n, MPI_CHAR, dest, cs+n+1, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MTestPrintfMsg( 1, "Completed wait on isend\n" );
	    MPI_Test_cancelled( &status, &flag );
	    if (!flag) {
		errs ++;
		printf( "Failed to cancel an Isend request\n" );
		fflush(stdout);
	    }
	    else
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+1;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	}
	else if (rank == dest)
	{
	    int nn, tag;
	    char *btemp;
	    MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (nn > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( nn );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", nn );
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );

	if (rank == 0) {
	    char *bsendbuf;
	    int bsendbufsize;
	    int bf, bs;
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    bsendbufsize = n + MPI_BSEND_OVERHEAD;
	    bsendbuf = (char *)malloc( bsendbufsize );
	    if (!bsendbuf) {
		fprintf( stderr, "Unable to allocate %d bytes for bsend\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MPI_Buffer_attach( bsendbuf, bsendbufsize );
	    MTestPrintfMsg( 1, "About to create and cancel ibsend\n" );
	    MPI_Ibsend( buf, n, MPI_CHAR, dest, cs+n+2, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MPI_Test_cancelled( &status, &flag );
	    if (!flag) {
		errs ++;
		printf( "Failed to cancel an Ibsend request\n" );
		fflush(stdout);
	    }
	    else
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+2;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	    MPI_Buffer_detach( &bf, &bs );
	    free( bsendbuf );
	}
	else if (rank == dest)
	{
	    int nn, tag;
	    char *btemp;
	    MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (nn > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( nn );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", nn);
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );

	/* Because this test is erroneous, we do not perform it unless
	   TEST_IRSEND is defined.  */
#ifdef TEST_IRSEND
	/* We avoid ready send to self because an implementation
	   is free to detect the error in delivering a message to
	   itself without a pending receive; we could also check
	   for an error return from the MPI_Irsend */
	if (rank == 0 && dest != rank) {
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MTestPrintfMsg( 1, "About to create and cancel irsend\n" );
	    MPI_Irsend( buf, n, MPI_CHAR, dest, cs+n+3, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MPI_Test_cancelled( &status, &flag );
	    /* This can be pretty ugly.  The standard is clear (Section 3.8)
	       that either a sent message is received or the 
	       sent message is successfully cancelled.  Since this message
	       can never be received, the cancel must complete
	       successfully.  

	       However, since there is no matching receive, this
	       program is erroneous.  In this case, we can't really
	       flag this as an error */
	    if (!flag && veryPicky) {
		errs ++;
		printf( "Failed to cancel an Irsend request\n" );
		fflush(stdout);
	    }
	    if (flag)
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+3;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	}
	else if (rank == dest)
	{
	    int n, tag;
	    char *btemp;
	    MPI_Recv( &n, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (n > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( n );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", n);
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, n, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );
#endif

	if (rank == 0) {
	    n = bufsizes[cs];
	    buf = (char *)malloc( n );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate %d bytes\n", n );
		MPI_Abort( MPI_COMM_WORLD, 1 );
                exit(1);
	    }
	    MTestPrintfMsg( 1, "About to create and cancel issend\n" );
	    MPI_Issend( buf, n, MPI_CHAR, dest, cs+n+4, comm, &req );
	    MPI_Cancel( &req );
	    MPI_Wait( &req, &status );
	    MPI_Test_cancelled( &status, &flag );
	    if (!flag) {
		errs ++;
		printf( "Failed to cancel an Issend request\n" );
		fflush(stdout);
	    }
	    else
	    {
		n = 0;
	    }
	    /* Send the size, zero for successfully cancelled */
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    /* Send the tag so the message can be received */
	    n = cs+n+4;
	    MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
	    free( buf );
	}
	else if (rank == dest)
	{
	    int nn, tag;
	    char *btemp;
	    MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
	    MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
	    if (nn > 0)
	    {
		/* If the message was not cancelled, receive it here */
		btemp = (char*)malloc( nn );
		if (!btemp)
		{
		    fprintf( stderr, "Unable to allocate %d bytes\n", nn);
		    MPI_Abort( MPI_COMM_WORLD, 1 );
                    exit(1);
		}
		MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
		free(btemp);
	    }
	}
	MPI_Barrier( comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}