コード例 #1
0
ファイル: req.c プロジェクト: Katetc/cime
FC_FUNC(mpi_testany, MPI_TESTANY)
         (int * count, int * array_of_requests,
          int * index, int * flag, int *status, int * ierr)
{
  *ierr = MPI_Testany(*count, array_of_requests, index,
                      flag, mpi_c_status(status));
}
コード例 #2
0
ファイル: mpi_event_loop.c プロジェクト: Meijuh/ltsmin
static void event_loop(event_queue_t queue,int block){
    while(queue->pending){
        Debug("MPI waiting for %d events",queue->pending);
        int index[queue->pending];
        int completed;
        MPI_Status status[queue->pending];
        if (block) {
            Debug("MPI_Waitsome");
            //int res = MPI_Waitsome(queue->pending,queue->request,&completed,index,status);
            int res = MPI_Waitany(queue->pending,queue->request,index,status);
            completed=1;
            Debug("MPI_Waitsome : %d",res);
            if (res != MPI_SUCCESS) Abort("MPI_Waitsome");
            queue->wait_some_calls++;
            if (completed>1) queue->wait_some_multi++;
            block=0;
        } else {
            Debug("MPI_Testsome");
            //int res = MPI_Testsome(queue->pending,queue->request,&completed,index,status);
            int flag;
            int res = MPI_Testany(queue->pending,queue->request,index,&flag,status);
            completed=flag?1:0;
            Debug("MPI_Testsome : %d",res);
            if (res != MPI_SUCCESS) Abort("MPI_Testsome");
            queue->test_some_calls++;
            if (completed==0) {
                queue->test_some_none++;
                Debug("MPI exit event loop");
                return;
            }
            if (completed>1) queue->test_some_multi++;
        }
        Debug("MPI completion of %d events",completed);
        event_callback cb[completed];
        void *ctx[completed];
        for(int i=0;i<completed;i++){
            cb[i]=queue->cb[index[i]];
            queue->cb[index[i]]=NULL;
            ctx[i]=queue->context[index[i]];
        }
        int k=0;
        for(int i=0;i<queue->pending;i++){
            if (queue->cb[i]) {
                if (k<i) {
                    queue->request[k]=queue->request[i];
                    queue->cb[k]=queue->cb[i];
                    queue->context[k]=queue->context[i];
                }
                k++;
            }
        }
        queue->pending=k;
        for(int i=0;i<completed;i++) {
            Debug("MPI call back");
            cb[i](ctx[i],&status[i]);
            Debug("MPI call back done");
        }
    }
    Debug("MPI exit loop");
}
コード例 #3
0
ファイル: communicator.cpp プロジェクト: pengtaoxie/dml
int communicator::check_status()
{
  if(num_pending==0)
    return -1;
  MPI_Status  stat;
  int flag=0;
  MPI_Request * reqtmp=new MPI_Request[num_pending];
  int idx=0;
  std::unordered_map<int,int> map_checkid_connid;
  for(int i=0;i<num_connections;i++){
    if(status[i]==1){
      reqtmp[idx]=reqs[i];
      map_checkid_connid[idx]=i;
      idx++;
    }
  }
  int hit_idx=-1;
  MPI_Testany( num_pending, reqtmp, &hit_idx, &flag, &stat);
  if(flag!=0&&hit_idx>=0&&hit_idx<num_pending){
    delete[]reqtmp;

    return map_checkid_connid[hit_idx];
  }
  delete[]reqtmp;
  return -1;
}
コード例 #4
0
ファイル: GOAL_Tran_MPI.hpp プロジェクト: sriram87/mpi-goal
/* tests for outstanding elements and returns the finished index or
 * SCHED_NONE  */
static inline int scheduler_testany(sched_req *reqs, int count) {

  int index, flag;
  int ret = MPI_Testany(count, reqs, &index, &flag, MPI_STATUS_IGNORE);
  if(flag) {
    return index;
  } else {
    return SCHED_NONE;
  }
}
コード例 #5
0
JNIEXPORT jint JNICALL Java_mpi_Request_testAny(
        JNIEnv *env, jclass clazz, jlongArray requests)
{
    int count = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    int index, flag;
    int rc = MPI_Testany(count, cReq, &index, &flag, MPI_STATUS_IGNORE);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    return index;
}
コード例 #6
0
ファイル: misc_socket.c プロジェクト: binarycode/mandor2
/**
  * \brief Seeks socket which has completed operation (\b irecv or \b isend). Used to process data in the arrival order.
  * <b>Unlocking of the socket must be done by client to free the socket</b>.
  */
socket_t *socket_seekTest(const channel_t * ch, int direction)
{
	int done, num;
	MPI_Status status;
	MPI_Testany(ch->socketsN[direction], ch->requests[direction], &num, &done, &status);
	if(done && num != MPI_UNDEFINED) {
		socket_t *s = ch->sockets[direction] + num;
		if(!s->locked) error("socket_seekTest: MPI_Testany pointed to the unlocked socket (cpu = %d, direction = %s).", s->cpu, (s->direction) ? "outcome" : "income");
		return s;
	}

	return NULL;
}
コード例 #7
0
ファイル: testanyf.c プロジェクト: agrimaldi/pmap
FORT_DLL_SPEC void FORT_CALL mpi_testany_ ( MPI_Fint *v1, MPI_Fint v2[], MPI_Fint *v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Fint *ierr ){
    int l3;
    int l4;

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif

    if (v5 == MPI_F_STATUS_IGNORE) { v5 = (MPI_Fint*)MPI_STATUS_IGNORE; }
    *ierr = MPI_Testany( (int)*v1, (MPI_Request *)(v2),  &l3, &l4, (MPI_Status *)v5 );
    *v3 = (MPI_Fint)l3;
    if (l3 >= 0) *v3 = *v3 + 1;
    if (*ierr == MPI_SUCCESS) *v4 = MPIR_TO_FLOG(l4);
}
コード例 #8
0
JNIEXPORT jobject JNICALL Java_mpi_Request_testAnyStatus(
        JNIEnv *env, jclass clazz, jlongArray requests)
{
    int count = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    int index, flag;
    MPI_Status status;
    int rc = MPI_Testany(count, cReq, &index, &flag, &status);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    return flag ? ompi_java_status_newIndex(env, &status, index) : NULL;
}
コード例 #9
0
ファイル: req.c プロジェクト: Katetc/cime
int MPI_Waitany(int count, MPI_Request *array_of_requests,
                int *index, MPI_Status *status)
{
  int flag;

  MPI_Testany(count, array_of_requests, index, &flag, status);

  if (!flag)
  {
    /* none are completed */

    fprintf(stderr,"MPI_Waitany: no requests complete, deadlock\n");
    abort();

  }

  return(MPI_SUCCESS);
}
コード例 #10
0
ファイル: testany_f.c プロジェクト: 315234/OpenFOAM-2.2.x-OSX
void mpi_testany_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr)
{
    MPI_Request *c_req;
    MPI_Status c_status;
    int i;
    OMPI_LOGICAL_NAME_DECL(flag);
    OMPI_SINGLE_NAME_DECL(index);

    c_req = (MPI_Request *) malloc(OMPI_FINT_2_INT(*count) * sizeof(MPI_Request));
    if (c_req == NULL) {
        *ierr = OMPI_INT_2_FINT(OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, 
                                                       MPI_ERR_NO_MEM,
                                                       FUNC_NAME));
        return;
    }

    for (i = 0; i < OMPI_FINT_2_INT(*count); ++i) {
        c_req[i] = MPI_Request_f2c(array_of_requests[i]);
    }

    *ierr = OMPI_INT_2_FINT(MPI_Testany(OMPI_FINT_2_INT(*count), c_req,
                                        OMPI_SINGLE_NAME_CONVERT(index),
                                        OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag),
                                        &c_status));

    OMPI_SINGLE_INT_2_LOGICAL(flag);
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {

        /* Increment index by one for fortran conventions */

        OMPI_SINGLE_INT_2_FINT(index);
        if (*flag &&
            MPI_UNDEFINED != *(OMPI_SINGLE_NAME_CONVERT(index))) {
            array_of_requests[OMPI_INT_2_FINT(*index)] =
                c_req[OMPI_INT_2_FINT(*index)]->req_f_to_c_index;
            ++(*index);
        }
        if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
            MPI_Status_c2f(&c_status, status); 
        }
    }
    free(c_req);
}
コード例 #11
0
ファイル: waittestnull.c プロジェクト: Julio-Anjos/simgrid
int main(int argc, char **argv)
{
    int errs = 0;
    MPI_Status status, *status_array = 0;
    int count = 0, flag, idx, rc, errlen, *indices=0, outcnt;
    MPI_Request *reqs = 0;
    char errmsg[MPI_MAX_ERROR_STRING];

    MTest_Init(&argc, &argv);

    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    rc = MPI_Testall( count, reqs, &flag, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testall returned failure: %s\n", errmsg );
	errs ++;
    }
    else if (!flag) {
	printf( "MPI_Testall( 0, ... ) did not return a true flag\n") ;
	errs++;
    }

    rc = MPI_Waitall( count, reqs, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitall returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Testany( count, reqs, &idx, &flag, &status );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testany returned failure: %s\n", errmsg );
	errs ++;
    }
    else if (!flag) {
	printf( "MPI_Testany( 0, ... ) did not return a true flag\n") ;
	errs++;
    }

    rc = MPI_Waitany( count, reqs, &idx, &status );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitany returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Testsome( count, reqs, &outcnt, indices, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testsome returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Waitsome( count, reqs, &outcnt, indices, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitsome returned failure: %s\n", errmsg );
	errs ++;
    }
    
    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #12
0
ファイル: dynamic.cpp プロジェクト: JordanBlocher/openmpi
void master(int numprocs, Matrix<T>& img)
{

    // Buffers
    Matrix<T> buf(numprocs+1, cols+1);
    //MPI_Request request;
    std::stringstream ss;
    MPI_Status status;
    MPI_Request request[numprocs+1];
  
    int i, j, k, err=0, complete, index;
    
    // Initialize processes
    for(i=0; i<numprocs; ++i)
    {
        buf[i][0] = i;
        std::cout<<"giving row "<<buf[i][0] << " to rank "<<i<<std::endl;
        err = MPI_Isend(buf[i], 1, MPI_Vector, i, WORKTAG, MPI_COMM_WORLD, &request[i]);
        CHECKMPI(err); 
    }
    
    k = 0;
    // Assign processes more rows
    int stop = std::ceil((rows+1)/2);
    while(k + numprocs < stop)
    {
            err = MPI_Testany(numprocs, request, &index, &complete, &status);
            CHECKMPI(err); 
            if(complete && index != MPI_UNDEFINED)
            {
                err = MPI_Recv(buf[index], 1, MPI_Vector, index, WORKTAG, MPI_COMM_WORLD, &status);
                std::cout<<"got row " << buf[index][0]<<" from " <<index + 1<< " \n";
                CHECKMPI(err); 
                
                    // Add data
                for(j=0; j<cols; j++)
                {
                    img[buf[index][0]][j] = (buf[index][j+1] < 0.001) ? 0 : buf[index][j+1];
             //       std::cout<<img[k][j]<<" "<< img[i][j] <<" " << img[i][j] << " ";
                }
                buf[index][0] = k + numprocs - 1;
                std::cout<<"giving row "<<buf[index][0] << " to index "<<index<<std::endl;
                err = MPI_Isend(buf[index], 1 ,MPI_Vector, index, WORKTAG, MPI_COMM_WORLD, &request[index]);
                CHECKMPI(err); 
                k++;
            }
    }

    //std::cout<<"No more rows\n";
    // No more rows, wait for processes to finish.
    for(i=0; i<numprocs; ++i)
    {
        err = MPI_Recv(buf[i], 1, MPI_Vector, MPI_ANY_SOURCE, WORKTAG, MPI_COMM_WORLD, &status);
            std::cout<<"got row " << buf[i][0]<<" from " <<status.MPI_SOURCE<< " \n";
        CHECKMPI(err); 
        for(j=0; j<cols; j++)
           img[buf[i][0]][j] = (buf[i][j+1] < 0.001) ? 0 : buf[i][j+1];
    }

    // Exit all slaves.
    for(i=0; i<numprocs; ++i)
    {
        err = MPI_Send(0, 0, MPI_INT, i, DIETAG, MPI_COMM_WORLD);
        CHECKMPI(err); 
    }

    return;
}
コード例 #13
0
ファイル: boggle.c プロジェクト: dsj36/FB-Engineering-Puzzles
int doBoards(const node* dictionary, const boggleBoard* boards,
             int boardCount, int rank, int size, int* pStart, int timing)
{
    assert(size > 1);

    /* Use tag 0 for completion messages, 1 for steal requests, and 2 for work given */
    int i, j, total, boardSize, target, completed, someoneWants, who, remaining, work;
    int gotWork, donorFinished;
    point pt;
    queue* queue = qinit();
    int done[size-1];
    int want[size-1];
    int notDone[size-1];
    MPI_Request doneRequests[size-1];
    MPI_Request wantRequests[size-1];
    MPI_Request workRequest;

    /* We don't want everyone choosing the same increments randomly, so add the rank to the seed */
    struct mt19937p state;
    sgenrand(10302011UL + rank, &state);

    /* Index the requests such that current thread is -1 (i.e. not present), and all */
    /* the other threads follow round robin, wrapping around at 'size'. We're not    */
    /* explicitly interested in the contents of 'done' or 'want', only the signals.  */
    for (i = 1; i < size; ++i) {
        want[i-1] = done[i-1] = 0;
        target = (rank + i) % size;

        MPI_Irecv(done + i - 1, 1, MPI_INT, target, 0, MPI_COMM_WORLD, doneRequests + i - 1);
        MPI_Irecv(want + i - 1, 1, MPI_INT, target, 1, MPI_COMM_WORLD, wantRequests + i - 1);
    }

    completed = 0;
    total = 0;
    boardSize = boards[0].n;

    /* Maintain a local work queue with assigned board indices */
    for (i = pStart[rank]; i < pStart[rank+1]; ++i)
        qpush(queue, i);

    /* Do assigned work and listen for requests for extra work */
    while (!qempty(queue)) {
        i = qpop(queue);

        /* Try to find someone who's asking for work */
        MPI_Testany(size-1, wantRequests, &who, &someoneWants, MPI_STATUS_IGNORE);
        who = (rank + 1 + who) % size;

        if (someoneWants) {
            /* printf("Who let thread %d steal my (thread %d) work (#%d)?!\n", who, rank, i); */
            MPI_Send(&i, 1, MPI_INT, who, 2, MPI_COMM_WORLD);

            /* Reopen asynchronous receive to thread */
            MPI_Irecv(want + i - 1, 1, MPI_INT, who, 1, MPI_COMM_WORLD,
                      wantRequests - 1 + (size + who - rank) % size);
        }
        else {
            for (j = 0; j < boardSize * boardSize; ++j) {
                pt.x = j / boardSize;
                pt.y = j % boardSize;
                total += exploreOne(dictionary, boards + i, pt, timing);
            }
        }
    }

    /* Broadcast that we are done (MPI_Scatter is a little annoying) */
    completed = 1;
    for (i = 1; i < size; ++i)
        MPI_Send(&completed, 1, MPI_INT, (rank + i) % size, 0, MPI_COMM_WORLD);

    /* Loop while everyone isn't done */
    while(!allDone(doneRequests, size)) {
        remaining = 0;

        /* Find all of the workers that have not completed */
        for (i = 0; i < size - 1; ++i) {
            MPI_Test(doneRequests + i, &donorFinished, MPI_STATUS_IGNORE);

            if (!donorFinished)
                notDone[remaining++] = i;
        }

        if (remaining > 0) {
            i = notDone[genrand(&state) % remaining];
            target = (rank + i + 1) % size;

            /* Pick one of the guys at random and send him a request */
            MPI_Send(&target, 1, MPI_INT, target, 1, MPI_COMM_WORLD);
            MPI_Irecv(&work, 1, MPI_INT, target, 2, MPI_COMM_WORLD, &workRequest);

            gotWork = donorFinished = 0;

            /* Check if he wrote back or if he finished in the meantime */
            while (!gotWork && !donorFinished) {
                MPI_Test(&workRequest, &gotWork, MPI_STATUS_IGNORE);
                MPI_Test(doneRequests + i, &donorFinished, MPI_STATUS_IGNORE);

                if (gotWork) {
                    for (j = 0; j < boardSize * boardSize; ++j) {
                        pt.x = j / boardSize;
                        pt.y = j % boardSize;
                        total += exploreOne(dictionary, boards + work, pt, timing);
                    }
                }
            }
        }
    }

    qdest(queue);

    return total;
}
コード例 #14
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  MPI_Comm comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  int buf[BUF_SIZE * 2];
  int i, j, k, index, outcount, flag;
  int indices[2];
  MPI_Request aReq[2];
  MPI_Status aStatus[2];

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  if (rank == 0) {
    /* set up persistent sends... */
    MPI_Send_init (&buf[0], BUF_SIZE, MPI_INT, 1, 0, comm, &aReq[0]);
    MPI_Send_init (&buf[BUF_SIZE], BUF_SIZE, MPI_INT, 1, 1, comm, &aReq[1]);

    /* initialize the send buffers */
    for (i = 0; i < BUF_SIZE; i++) {
      buf[i] = i;
      buf[BUF_SIZE + i] = BUF_SIZE - 1 - i;
    }
  }

  for (k = 0; k < 4; k++) {
    if (rank == 1) {
      /* zero out the receive buffers */
      bzero (buf, sizeof(int) * BUF_SIZE * 2);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0) {
      /* start the persistent sends... */
      if (k % 2) {
	MPI_Startall (2, &aReq[0]);
      }
      else {
	for (j = 0; j < 2; j++) {
	  MPI_Start (&aReq[j]);
	}
      }

      /* complete the sends */
      if (k < 2) {
	/* use MPI_Testany */
	for (j = 0; j < 2; j++) {
	  flag = 0;
	  while (!flag) {
	    MPI_Testany (2, aReq, &index, &flag, aStatus);
	  }
	}
      }
      else {
	/* use MPI_Testsome */
	j = 0;
	while (j < 2) {
	  outcount = 0;
	  while (!outcount) {
	    MPI_Testsome (2, aReq, &outcount, indices, aStatus);
	  }
	  j += outcount;
	}
      }
    }
    else if (rank == 1) {
      /* set up receives for all of the sends */
      for (j = 0; j < 2; j++) {
	MPI_Irecv (&buf[j * BUF_SIZE], BUF_SIZE,
		   MPI_INT, 0, j, comm, &aReq[j]);
      }
      /* complete all of the receives... */
      MPI_Waitall (2, aReq, aStatus);
    }
  }

  MPI_Barrier(MPI_COMM_WORLD);

  if (rank == 0) {
    /* free the persistent requests */
    for (i = 0 ; i < 2; i++) {
      MPI_Request_free (&aReq[i]);
    }
  }

  MPI_Finalize ();
  printf ("(%d) Finished normally\n", rank);
}
コード例 #15
0
ファイル: cancel3.c プロジェクト: MartinLidh/tddc78
int main( int argc, char **argv )
{
    MPI_Request r1;
    int         size, rank;
    int         err = 0;
    int         partner, buf[10], flag, idx, index;
    MPI_Status  status;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    
    if (size < 2) {
	printf( "Cancel test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    /* 
     * Here is the test.  First, we ensure an unsatisfied Irecv:
     *       process 0             process size-1
     *       Sendrecv              Sendrecv
     *       Irecv                    ----
     *       Cancel                   ----
     *       Sendrecv              Sendrecv
     * Next, we confirm receipt before canceling
     *       Irecv                 Send
     *       Sendrecv              Sendrecv
     *       Cancel
     */
    if (rank == 0) {
	partner = size - 1;
	/* Cancel succeeds for wait/waitall */
	MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Wait( &r1, &status );
	MPI_Test_cancelled( &status, &flag ); 
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++; 
	    printf( "Cancel of a send failed where it should succeed (Wait).\n" );
	}
	MPI_Request_free( &r1 ); 

	/* Cancel fails for test/testall */
	buf[0] = 3;
	MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Test( &r1, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a send succeeded where it shouldn't (Test).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitany */
	MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitany( 1, &r1, &idx, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a send failed where it should succeed (Waitany).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testany */
        buf[0] = 3;
	MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testany( 1, &r1, &idx, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a send succeeded where it shouldn't (Testany).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitsome */
	MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a send failed where it should succeed (Waitsome).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testsome*/
        buf[0] = 3;
	MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a send succeeded where it shouldn't (Testsome).\n" );
	}
	MPI_Request_free( &r1 );

	if (err) {
	    printf( "Test failed with %d errors.\n", err );
	}
	else {
	    printf( "Test passed\n" );
	}
    }
    else if (rank == size - 1) {
	partner = 0;
	/* Cancel succeeds for wait/waitall */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	/* Cancel fails for test/testall */
	buf[0] = -1;
	MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	if (buf[0] == -1) {
	    printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" );
	    }

	/* Cancel succeeds for waitany */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails  for testany */
	buf[0] = -1;
	MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (buf[0] == -1) {
	    printf( "Receive buffer did not change even though cancel should not have suceeded! (Testany).\n" );
	    }

	/* Cancel succeeds for waitsome */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails for testsome */
	buf[0] = -1;
	MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	if (buf[0] == -1) {
	    printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" );
	    }

    }

    MPI_Finalize();
    return 0;
}
コード例 #16
0
ファイル: nonblocking3.c プロジェクト: R7R8/simgrid
static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[], int *outcount, int indices[])
{
    int i, idx, flag;

#define COMPLETION_CASES (8)
    switch (rand_range(rndnum, 0, COMPLETION_CASES)) {
        case 0:
            MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
            *outcount = numreqs;
            for (i = 0; i < numreqs; ++i) {
                indices[i] = i;
            }
            break;

        case 1:
            MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
            if (*outcount == MPI_UNDEFINED) {
                *outcount = 0;
            }
            break;

        case 2:
            MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
            if (*outcount == MPI_UNDEFINED) {
                *outcount = 0;
            }
            break;

        case 3:
            MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
            if (idx == MPI_UNDEFINED) {
                *outcount = 0;
            }
            else {
                *outcount = 1;
                indices[0] = idx;
            }
            break;

        case 4:
            MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
            if (idx == MPI_UNDEFINED) {
                *outcount = 0;
            }
            else {
                *outcount = 1;
                indices[0] = idx;
            }
            break;

        case 5:
            MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
            if (flag) {
                *outcount = numreqs;
                for (i = 0; i < numreqs; ++i) {
                    indices[i] = i;
                }
            }
            else {
                *outcount = 0;
            }
            break;

        case 6:
            /* select a new random index and wait on it */
            rndnum = gen_prn(rndnum);
            idx = rand_range(rndnum, 0, numreqs);
            MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
            *outcount = 1;
            indices[0] = idx;
            break;

        case 7:
            /* select a new random index and wait on it */
            rndnum = gen_prn(rndnum);
            idx = rand_range(rndnum, 0, numreqs);
            MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
            *outcount = (flag ? 1 : 0);
            indices[0] = idx;
            break;

        default:
            assert(0);
            break;
    }
#undef COMPLETION_CASES
}
コード例 #17
0
ファイル: allpairc.c プロジェクト: Jiawen1991/GitHub
static void test_pair (void)
{
  int prev, next, count, tag, index, i, outcount, indices[2];
  int rank, size, flag, ierr, reqcount;
  double send_buf[TEST_SIZE], recv_buf[TEST_SIZE];
  double buffered_send_buf[TEST_SIZE * 2 + MPI_BSEND_OVERHEAD]; /* factor of two is based on guessing - only dynamic allocation would be safe */
  void *buffer;
  MPI_Status statuses[2];
  MPI_Status status;
  MPI_Request requests[2];
  MPI_Comm dupcom, intercom;
#ifdef V_T

  struct _VT_FuncFrameHandle {
      char *name;
      int func;
      int frame;
  };
  typedef struct _VT_FuncFrameHandle VT_FuncFrameHandle_t;

  VT_FuncFrameHandle_t normal_sends,
      buffered_sends,
      buffered_persistent_sends,
      ready_sends,
      sync_sends,
      nblock_sends,
      nblock_rsends,
      nblock_ssends,
      pers_sends,
      pers_rsends,
      pers_ssends,
      sendrecv,
      sendrecv_repl,
      intercomm;

  int classid;
  VT_classdef( "Application:test_pair", &classid );


#define VT_REGION_DEF( _name, _nameframe, _class ) \
        (_nameframe).name=_name; \
        VT_funcdef( (_nameframe).name, _class, &((_nameframe).func) );
#define VT_BEGIN_REGION( _nameframe ) \
        LOCDEF(); \
        VT_begin( (_nameframe).func )
#define VT_END_REGION( _nameframe ) \
        LOCDEF(); VT_end( (_nameframe).func )
#else
#define VT_REGION_DEF( _name, _nameframe, _class )
#define VT_BEGIN_REGION( _nameframe )
#define VT_END_REGION( _nameframe )

#endif




  ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  ierr = MPI_Comm_size(MPI_COMM_WORLD, &size);
  if ( size < 2 ) {
      if ( rank == 0 ) {
	  printf("Program needs to be run on at least 2 processes.\n");
      }
      ierr = MPI_Abort( MPI_COMM_WORLD, 66 );
  }
  ierr = MPI_Comm_dup(MPI_COMM_WORLD, &dupcom);

  if ( rank >= 2 ) {
      /*      printf( "%d Calling finalize.\n", rank ); */
      ierr = MPI_Finalize( );
      exit(0);
  }

  next = rank + 1;
  if (next >= 2)
    next = 0;

  prev = rank - 1;
  if (prev < 0)
    prev = 1;

  VT_REGION_DEF( "Normal_Sends", normal_sends, classid );
  VT_REGION_DEF( "Buffered_Sends", buffered_sends, classid );
  VT_REGION_DEF( "Buffered_Persistent_Sends", buffered_persistent_sends, classid );
  VT_REGION_DEF( "Ready_Sends", ready_sends, classid );
  VT_REGION_DEF( "Sync_Sends", sync_sends, classid );
  VT_REGION_DEF( "nblock_Sends", nblock_sends, classid );
  VT_REGION_DEF( "nblock_RSends", nblock_rsends, classid );
  VT_REGION_DEF( "nblock_SSends", nblock_ssends, classid );
  VT_REGION_DEF( "Pers_Sends", pers_sends, classid );
  VT_REGION_DEF( "Pers_RSends", pers_rsends, classid );
  VT_REGION_DEF( "Pers_SSends", pers_ssends, classid );
  VT_REGION_DEF( "SendRecv", sendrecv, classid );
  VT_REGION_DEF( "SendRevc_Repl", sendrecv_repl, classid );
  VT_REGION_DEF( "InterComm", intercomm, classid );



/*
 * Normal sends
 */

  VT_BEGIN_REGION( normal_sends );

  if (rank == 0)
    printf ("Send\n");

  tag = 0x100;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( normal_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_sends );

  if (rank == 0)
    printf ("Buffered Send\n");

  tag = 138;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_persistent_sends );

  if (rank == 0)
    printf ("Buffered Persistent Send\n");

  tag = 238;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests);
    MPI_Start(requests);
    MPI_Wait(requests, statuses);
    MPI_Request_free(requests);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_persistent_sends );


/*
 * Ready sends.  Note that we must insure that the receive is posted
 * before the rsend; this requires using Irecv.
 */


  VT_BEGIN_REGION( ready_sends );

  if (rank == 0)
    printf ("Rsend\n");

  tag = 1456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Recv(MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD, &status);
    MPI_Rsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Probe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status);
    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);
    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Send( MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD);
    MPI_Wait(requests, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( ready_sends );

/*
 * Synchronous sends
 */

  VT_BEGIN_REGION( sync_sends );

  if (rank == 0)
    printf ("Ssend\n");

  tag = 1789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);
    if (flag)
      printf ("Iprobe succeeded! source %d, tag %d\n",status.MPI_SOURCE,
                                                      status.MPI_TAG);

    MPI_Ssend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

    while (!flag)
      MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);

    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);

    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Ssend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sync_sends );

/*
 * Nonblocking normal sends
 */

  VT_BEGIN_REGION( nblock_sends );

  if (rank == 0)
    printf ("Isend\n");

  tag = 2123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Isend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
              (requests+1));
    MPI_Waitall(2, requests, statuses);
    rq_check( requests, 2, "isend and irecv" );

    msg_check(recv_buf,prev,tag,count,statuses, TEST_SIZE,"isend and irecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"isend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Isend(recv_buf, count, MPI_DOUBLE, next, tag,MPI_COMM_WORLD,
              (requests));
    MPI_Wait((requests), &status);
    rq_check(requests, 1, "isend (and recv)");
  }



  VT_END_REGION( nblock_sends );

/*
 * Nonblocking ready sends
 */


  VT_BEGIN_REGION( nblock_rsends );

  if (rank == 0)
    printf ("Irsend\n");

  tag = 2456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    MPI_Irsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitany( 2, requests, &index, statuses);
      if( index == 0 ) {
	  memcpy( &status, statuses, sizeof(status) );
      }
      reqcount++;
    }

    rq_check( requests, 1, "irsend and irecv");
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"irsend and irecv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    flag = 0;
    while (!flag)
      MPI_Test(requests, &flag, &status);

    rq_check( requests, 1, "irsend and irecv (test)");
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "irsend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Irsend(recv_buf, count, MPI_DOUBLE, next, tag,
               MPI_COMM_WORLD, requests);
    MPI_Waitall(1, requests, statuses);
    rq_check( requests, 1, "irsend and irecv");
  }

  VT_END_REGION( nblock_rsends );

/*
 * Nonblocking synchronous sends
 */

  VT_BEGIN_REGION( nblock_ssends );

  if (rank == 0)
    printf ("Issend\n");

  tag = 2789;
  count = TEST_SIZE / 3;
  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests );
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Issend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    flag = 0;
    while (!flag)
      MPI_Testall(2, requests, &flag, statuses);

    rq_check( requests, 2, "issend and irecv (testall)");
    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, 
               "issend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "issend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Issend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,requests);

    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    rq_check( requests, 1, "issend and recv (testany)");
  }


  VT_END_REGION( nblock_ssends );


/*
 * Persistent normal sends
 */

  VT_BEGIN_REGION( pers_sends );

  if (rank == 0)
    printf ("Send_init\n");

  tag = 3123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Send_init(send_buf, count, MPI_DOUBLE, next, tag,
                MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
                MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);
    MPI_Waitall(2, requests, statuses);
    msg_check( recv_buf, prev, tag, count, (statuses+1),
               TEST_SIZE, "persistent send/recv");
  }
  else {
    MPI_Start((requests+1));
    MPI_Wait((requests+1), &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "persistent send/recv");
    init_test_data(send_buf,TEST_SIZE,1);


    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_sends );

/*
 * Persistent ready sends
 */

  VT_BEGIN_REGION( pers_rsends );

  if (rank == 0)
    printf ("Rsend_init\n");

  tag = 3456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Rsend_init(send_buf, count, MPI_DOUBLE, next, tag,
                  MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0); MPI_Barrier( MPI_COMM_WORLD );
    MPI_Startall(2, requests);
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 1) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "waitsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start((requests+1)); MPI_Barrier( MPI_COMM_WORLD );
    flag = 0;
    while (!flag)
      MPI_Test((requests+1), &flag, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "test");

    init_test_data(send_buf,TEST_SIZE,1);

 
    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_rsends );


/*
 * Persistent synchronous sends
 */


  VT_BEGIN_REGION( pers_ssends );

  if (rank == 0)
    printf ("Ssend_init\n");

  tag = 3789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Ssend_init(send_buf, count, MPI_DOUBLE, next, tag,
                 MPI_COMM_WORLD, (requests+1));
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, requests);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);

    reqcount = 0;
    while (reqcount != 2) {
      MPI_Testsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 0) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "testsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start(requests);
    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "testany" );

    init_test_data(send_buf,TEST_SIZE,1);


     MPI_Start((requests+1));
     MPI_Wait((requests+1), &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_ssends );


/*
 * Send/receive.
 */


  VT_BEGIN_REGION( sendrecv );

  if (rank == 0)
    printf ("Sendrecv\n");

  tag = 4123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv(send_buf, count, MPI_DOUBLE, next, tag,
                 recv_buf, count, MPI_DOUBLE, prev, tag,
                 MPI_COMM_WORLD, &status );

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }


  VT_END_REGION( sendrecv );

#ifdef V_T
  VT_flush();
#endif


/*
 * Send/receive replace.
 */

  VT_BEGIN_REGION( sendrecv_repl );

  if (rank == 0)
    printf ("Sendrecv_replace\n");

  tag = 4456;
  count = TEST_SIZE / 3;

  if (rank == 0) {
      init_test_data(recv_buf, TEST_SIZE,0);
    for (i=count; i< TEST_SIZE; i++)
      recv_buf[i] = 0.0;

    MPI_Sendrecv_replace(recv_buf, count, MPI_DOUBLE,
                         next, tag, prev, tag, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecvreplace");
  }
  else {
    clear_test_data(recv_buf,TEST_SIZE);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send for replace"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sendrecv_repl );


/*
 * Send/Receive via inter-communicator
 */

  VT_BEGIN_REGION( intercomm );

  MPI_Intercomm_create(MPI_COMM_SELF, 0, MPI_COMM_WORLD, next, 1, &intercom);

  if (rank == 0)
    printf ("Send via inter-communicator\n");

  tag = 4018;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, 0, tag, intercom);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, intercom, &status);
    msg_check(recv_buf, 0, tag, count, &status, TEST_SIZE, "send and recv via inter-communicator");
  }
  else if (rank == 1) {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             intercom, &status);
    msg_check( recv_buf, 0, tag, count, &status, TEST_SIZE,"send and recv via inter-communicator");
    init_test_data(recv_buf,TEST_SIZE,0);
    MPI_Send(recv_buf, count, MPI_DOUBLE, 0, tag, intercom);

  }

  VT_END_REGION( normal_sends );



  MPI_Comm_free(&intercom);
  MPI_Comm_free(&dupcom);
} 
コード例 #18
0
ファイル: MPI-api.c プロジェクト: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
コード例 #19
0
void doWorkQueueManager(int size, StealStack *s)
{
	MPI_Request request[size*3]; //make one array so we can do a Waitall on all comm
	MPI_Request *req_make_global = &request[0];
	MPI_Request *req_work_request = &request[size];
	MPI_Request *req_response = &request[2*size];
	MPI_Status request_status; //, send_status;
	MPI_Status wait_all_status[3*size];
	void *shared_work_buf[size];
	unsigned long work_request_buf[size];
	int flag, who, i;
	struct waiting_entry waiting[size];
	unsigned long timestamps[size];
	unsigned long msg_counts[size];
	int work_response_send_count=0;
	int done=0;

#ifdef TRACE_RELEASES
	/* Track releases */
	ss_setState(s, SS_WORK);
#else
	/* Attribute the WQM's time to overhead */
	ss_setState(s, SS_WORK);
	ss_setState(s, SS_IDLE);
#endif 

	/* Init the receieve buffers */
	for(i = 0; i < size; i++) {
		waiting[i].flag = 0;   /*init waiting to not waiting*/
		waiting[i].buf  = NULL;    /*init waiting to not waiting*/
		timestamps[i] = 0;
		msg_counts[i] = 0;
		shared_work_buf[i] = malloc(s->work_size*s->chunk_size);
	}

	/* Setup non-block recieves for communicating with workers */
	for(i=0; i < size; i++) {
		/* Listen for work releases */
		MPI_Irecv(shared_work_buf[i], s->work_size*s->chunk_size, MPI_BYTE, i,
				MPI_MAKEWORKGLOBAL_TAG, MPI_COMM_WORLD, &req_make_global[i]);

		/* Listen for work requests (A WORKREQUEST should be the chunksize requested) */
		MPI_Irecv(&work_request_buf[i], 1, MPI_LONG, i, MPI_WORKREQUEST_TAG,
				MPI_COMM_WORLD, &req_work_request[i]);
	}

	/** BEGIN WORK MANAGEMENT LOOP */
	while(!done) {
		/* Wait for someone to send work or to request work */
		MPI_Waitany(2*size, request, &who, &request_status);

		/* Sending shared work to the queue */
		if(who < size) {
			void *w = malloc(s->work_size*s->chunk_size);

#ifdef TRACE_RELEASES
			/* Mark this release as a "steal" event */
			ss_markSteal(s, who);
			ss_setState(s, SS_SEARCH);
			ss_setState(s, SS_WORK);
#endif 
			/* Update timestamp */
			msg_counts[who]++;

			memcpy(w, shared_work_buf[who], s->work_size*s->chunk_size);
			deq_pushFront(globalQueue, w);
			s->globalWork += s->chunk_size;

			MPI_Irecv(shared_work_buf[who], s->work_size*s->chunk_size, MPI_BYTE, who,
					MPI_MAKEWORKGLOBAL_TAG, MPI_COMM_WORLD, &req_make_global[who]);
		}

		/* Requesting shared work from the queue */
		else { // (who >= size)
			who -= size;
			/* mark this id is waiting for work */
			waiting[who].flag = 1;

			/* Update timestamp */
			msg_counts[who]++;
			timestamps[who] = work_request_buf[who];
			/* This should be an invariant.. */
			if (timestamps[who] < msg_counts[who]) {
				ss_error("WQM: message delivery failure!\n", 10);
			}


			MPI_Irecv(&work_request_buf[who], 1, MPI_LONG, who, MPI_WORKREQUEST_TAG, MPI_COMM_WORLD, &req_work_request[who]);
		}

		/* finish last round of sends before start to send more data */
		if (work_response_send_count) {
			MPI_Waitall(work_response_send_count, req_response, wait_all_status);

                        // Free all the buffers used in the last round
                        for (i = 0; i < size; i++) {
                          if (waiting[i].buf != NULL) {
                            free(waiting[i].buf);
                            waiting[i].buf = NULL;
                          }
                        }
		}

		/* Attempt to send work to everyone who is waiting. */
		work_response_send_count = 0;
		for (i = 0; i < size; i++) {
			if (waiting[i].flag && !deq_isEmpty(globalQueue)) {
				void* work_ptr = deq_popFront(globalQueue);

				MPI_Isend(work_ptr, s->work_size*s->chunk_size, MPI_BYTE, i,
						MPI_RESPONDWORK_TAG, MPI_COMM_WORLD, &req_response[work_response_send_count]);

				work_response_send_count++;
				s->globalWork -= s->chunk_size;
				waiting[i].flag = 0;
                                waiting[i].buf  = work_ptr;
			}
		}

		/** Check for termination **/
		/* If everyone is still waiting and there are no outstanding messages
		   then we are done.  */
		done = 1;
		for(i=0; i < size; i++) {
			if(!waiting[i].flag || (msg_counts[i] != timestamps[i])) {
				done=0;
				break; //no need to check everyone else
			}
		}

		/* Sanity check */
		if(done && !deq_isEmpty(globalQueue)) {
			ss_error("WQM: Something evil happened.  We are terminating but I still have work!", 13);
		}
	} /* END: while (!done) */

	if (DEBUG & 2) printf("Queue Manager: We are done.  Letting everyone know.\n");

	/* This is a sanity test to make sure our prioritazation above works.  If this testany were to
	   return true, the cancels below would error out. */
	MPI_Testany(2*size, request, &who, &flag, &request_status);
	if (flag) {
		ss_error("WQM: Attempted to terminate with inbound work!", 13);
	}

	/* Cancel the outstanding MPI_Irecvs */
	for (i = 0; i < size; i++) {
		MPI_Cancel(&req_make_global[i]);
		MPI_Cancel(&req_work_request[i]);
	}

	/* send a msg to everyone that no work exists, everyone should be waiting on an MPI_recv here */
	work_response_send_count = 0;
	for(i=0; i < size; i++) {
		MPI_Isend(NULL, 0, MPI_BYTE, i, MPI_RESPONDWORK_TAG, MPI_COMM_WORLD, &req_response[i]);
		work_response_send_count++;
	}

	MPI_Waitall(work_response_send_count, req_response, wait_all_status);

	ss_setState(s, SS_IDLE);
}
コード例 #20
0
ファイル: MPIComm.c プロジェクト: jberthold/ghc
rtsBool MP_send(PEId node, OpCode tag, StgWord8 *data, uint32_t length) {
    /* MPI normally uses blocking send operations (MPI_*send). When
     * using nonblocking operations (MPI_I*send), dataspace must remain
     * untouched until the message has been delivered (MPI_Wait)!
     *
     * We copy the data to be sent into the mpiMsgBuffer and call MPI_Isend.
     * We can reuse slots in the buffer where messages are already delivered.
     * The requests array stores a request for each send operation which
     * can be tested by MPI_Testany for delivered messages.
     * MP_send should return false to indicate a send failure (the
     * message buffer has 0 free slots).
     *
     */
    int sendIndex;
    int hasFreeSpace;
    //MPI_Status* status;

    StgPtr sendPos; // used for pointer arithmetics, based on assumption that
    // sizeof(void*)==sizeof(StgWord) (see includes/stg/Types.h)

    ASSERT(node > 0 && node <= nPEs);


    IF_PAR_DEBUG(mpcomm,
                 debugBelch("MPI sending message to PE %u "
                            "(tag %d (%s), datasize %u)\n",
                            node, tag, getOpName(tag), length));
    // adjust node no.
    node--;
    //case each slot in buffer has been used
    if (msgCount == maxMsgs) {
        // looking for free space in buffer
        IF_PAR_DEBUG(mpcomm,
                     debugBelch("looking for free space in buffer\n"));
        MPI_Testany(msgCount, requests, &sendIndex, &hasFreeSpace,
                    MPI_STATUS_IGNORE);
        // if (status->MPI_ERROR)
        //   barf("a send operation returned an error with code %d"
        //        "and sendIndex is %d and hasFreeSpace %d",
        //        status->MPI_ERROR,  sendIndex,  hasFreeSpace);
    }
    //case still slots in buffer unused
    else {
        hasFreeSpace = 1;
        sendIndex = msgCount++;
    }
    // send the message
    if (!hasFreeSpace) {
        IF_PAR_DEBUG(mpcomm,
                     debugBelch("MPI CANCELED sending message to PE %u "
                                "(tag %d (%s), datasize %u)\n",
                                node, tag, getOpName(tag), length));
        return rtsFalse;
    }
    //calculate offset in mpiMsgBuffer
    // using ptr. arithmetics and void* size (see includes/stg/Types.h)
    sendPos = ((StgPtr)mpiMsgBuffer) + sendIndex * DATASPACEWORDS;
    memcpy((void*)sendPos, data, length);

    if (ISSYSCODE(tag)) {
        // case system message (workaroud: send it on both communicators,
        // because there is no receive on two comunicators.)
        MPI_Isend(&pingMessage, 1, MPI_INT, node, tag,
                  sysComm, &sysRequest);
    }
    MPI_Isend(sendPos, length, MPI_BYTE, node, tag,
              MPI_COMM_WORLD, &(requests[sendIndex]));
    IF_PAR_DEBUG(mpcomm,
                 debugBelch("Done sending message to PE %u\n", node+1));
    return rtsTrue;
}
コード例 #21
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  MPI_Comm comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES];
  int buf[BUF_SIZE * 2 * NUM_SEND_TYPES];
  int i, j, k, at_size, send_t_number, index, outcount, total, flag;
  int num_errors, error_count, indices[2 * NUM_SEND_TYPES];
  MPI_Request aReq[2 * NUM_SEND_TYPES];
  MPI_Status aStatus[2 * NUM_SEND_TYPES];

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Buffer_attach (bbuf, sizeof(int) * 
		     (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);

  if (rank == 0) {
    /* set up persistent sends... */
    send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES;

    MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);

    send_t_number++;

    MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);


    send_t_number++;

    MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);

    send_t_number++;

    MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);
  }

  for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) {
    if (rank == 0) {
      /* initialize all of the send buffers */
      for (j = 0; j < NUM_SEND_TYPES; j++) {
	for (i = 0; i < BUF_SIZE; i++) {
	  buf[2 * j * BUF_SIZE + i] = i;
	  buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i;
	}
      }
    }
    else if (rank == 1) {
      /* zero out all of the receive buffers */
      bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0) {
      /* set up transient sends... */
      send_t_number = 0;
    
      MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		 comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;
      
      MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;

      /* Barrier to ensure receives are posted for rsends... */
      MPI_Barrier(MPI_COMM_WORLD);

      MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;

      MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      /* just to be paranoid */
      send_t_number++;
      assert (send_t_number == NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);

      /* start the persistent sends... */
      if (k % 2) {
	MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2, &aReq[2 * send_t_number]);
      }
      else {
	for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) {
	  MPI_Start (&aReq[2 * send_t_number + j]);
	}
      }
    
      /* NOTE: Changing the send buffer of a Bsend is NOT an error... */
      for (j = 0; j < NUM_SEND_TYPES; j++) {
	/* muck the buffers */
	buf[j * 2 * BUF_SIZE + (BUF_SIZE >> 1)] = BUF_SIZE;
      }

      printf ("USER MSG: 6 change send buffer errors in iteration #%d:\n", k);

      /* complete the sends */
      switch (k/2) {
      case 0:
	/* use MPI_Wait */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  MPI_Wait (&aReq[j], &aStatus[j]);
	}
	break;
	
      case 1:
	/* use MPI_Waitall */
	MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
	break;

      case 2:
	/* use MPI_Waitany */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
	}

	break;
	
      case 3:
	/* use MPI_Waitsome */
	total = 0;
	while (total < NUM_SEND_TYPES * 2) {
	  MPI_Waitsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus);

	  total += outcount;
	}

	break;

      case 4:
	/* use MPI_Test */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  flag = 0;

	  while (!flag) {
	    MPI_Test (&aReq[j], &flag, &aStatus[j]);
	  }
	}

	break;
	
      case 5:
	/* use MPI_Testall */
	flag = 0;
	while (!flag) {
	  MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
	}

	break;

      case 6:
	/* use MPI_Testany */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  flag = 0;
	  while (!flag) {
	    MPI_Testany (NUM_SEND_TYPES * 2, aReq, &index, &flag, aStatus);
	  }
	}

	break;
	
      case 7:
	/* use MPI_Testsome */
	total = 0;
	while (total < NUM_SEND_TYPES * 2) {
	  outcount = 0;

	  while (!outcount) {
	    MPI_Testsome (NUM_SEND_TYPES * 2, aReq, 
			  &outcount, indices, aStatus);
	  }

	  total += outcount;
	}

	break;

      default:
	assert (0);
	break;
      }
    }
    else if (rank == 1) {
コード例 #22
0
ファイル: main.c プロジェクト: RSE-Cambridge/IPM
int main( int argc, char* argv[] )
{
  int myrank, nprocs;
  int val, val2;
  int idx, idx2[2];
  int flag;


  MPI_Request req;
  MPI_Request req2[2];
  MPI_Status stat;

  MPI_Init( &argc, &argv );

  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
  MPI_Comm_size( MPI_COMM_WORLD, &nprocs );

  if( nprocs<2 ) {
    fprintf(stderr, "Need at least 2 procs to run this program\n");
    MPI_Abort(MPI_COMM_WORLD, 1);
    return 1;
  }

  /* MPI_STATUS_IGNORE in MPI_Recv */
  switch(myrank) {
  case 0:
    MPI_Send( &val, 1, MPI_INTEGER, 1, 33, MPI_COMM_WORLD);
    break;

  case 1:
    MPI_Recv( &val, 1, MPI_INTEGER, 0, 33, MPI_COMM_WORLD, MPI_STATUS_IGNORE );
    break;
  }

  /* MPI_STATUS_IGNORE in MPI_Wait, MPI_Test */
  switch(myrank) {
  case 0:
    MPI_Isend( &val, 1, MPI_INTEGER, 1, 34, MPI_COMM_WORLD, &req);
    MPI_Test( &req, &flag, MPI_STATUS_IGNORE );
    MPI_Wait( &req, MPI_STATUS_IGNORE );

    break;

  case 1:
    MPI_Recv( &val, 1, MPI_INTEGER, 0, 34, MPI_COMM_WORLD, &stat );
    break;
  }

  /* MPI_STATUS_IGNORE in MPI_Waitany, MPI_Testany */
  switch(myrank) {
  case 0:
    MPI_Isend( &val,  1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0]));
    MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1]));
    MPI_Testany( 2, req2, &idx, &flag, MPI_STATUS_IGNORE );
    MPI_Waitany( 2, req2, &idx, MPI_STATUS_IGNORE );
    break;

  case 1:
    MPI_Recv( &val,  1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat );
    MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat );
    break;
  }

  /* MPI_STATUSES_IGNORE in MPI_Waitall, MPI_Testall */
  switch(myrank) {
  case 0:
    MPI_Isend( &val,  1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0]));
    MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1]));
    MPI_Testall( 2, req2, &flag, MPI_STATUSES_IGNORE );
    MPI_Waitall( 2, req2, MPI_STATUSES_IGNORE );
    break;

  case 1:
    MPI_Recv( &val,  1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat );
    MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat );
    break;
  }

  /* MPI_STATUSES_IGNORE in MPI_Waitsome */
  switch(myrank) {
  case 0:
    MPI_Isend( &val,  1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0]));
    MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1]));
    MPI_Testsome( 2, req2, &idx, idx2, MPI_STATUSES_IGNORE );
    MPI_Waitsome( 2, req2, &idx, idx2, MPI_STATUSES_IGNORE );
    break;

  case 1:
    MPI_Recv( &val,  1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat );
    MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat );
    break;
  }




  MPI_Barrier(MPI_COMM_WORLD);
  fprintf(stderr, "%5d: DONE\n", myrank);

  MPI_Finalize();
}
コード例 #23
0
ファイル: cancel2.c プロジェクト: Shurakai/SimGrid
int main( int argc, char **argv )
{
    MPI_Request r1;
    int         size, rank;
    int         err = 0;
    int         partner, buf[10], flag, idx, index;
    MPI_Status  status;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    
    if (size < 2) {
	printf( "Cancel test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    /* 
     * Here is the test.  First, we ensure an unsatisfied Irecv:
     *       process 0             process size-1
     *       Sendrecv              Sendrecv
     *       Irecv                    ----
     *       Cancel                   ----
     *       Sendrecv              Sendrecv
     * Next, we confirm receipt before canceling
     *       Irecv                 Send
     *       Sendrecv              Sendrecv
     *       Cancel
     */
    if (rank == 0) {
	partner = size - 1;
	/* Cancel succeeds for wait/waitall */
	MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Wait( &r1, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++; 
	    printf( "Cancel of a receive failed where it should succeed (Wait).\n" );
	}

	MPI_Request_free( &r1 );

	/* Cancel fails for test/testall */
	buf[0] = -1;
	MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Test( &r1, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a receive succeeded where it shouldn't (Test).\n" );
	    if (buf[0] != -1) {
		printf( "Receive buffer changed even though cancel suceeded! (Test).\n" );
	    }
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitany */
	MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitany( 1, &r1, &idx, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a receive failed where it should succeed (Waitany).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testany */
        buf[0] = -1;
	MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testany( 1, &r1, &idx, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a receive succeeded where it shouldn't (Testany).\n" );
	    if (buf[0] != -1) {
		printf( "Receive buffer changed even though cancel suceeded! (Test).\n" );
	    }
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitsome */
	MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a receive failed where it should succeed (Waitsome).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testsome*/
        buf[0] = -1;
	MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a receive succeeded where it shouldn't (Testsome).\n" );
	    if (buf[0] != -1) {
		printf( "Receive buffer changed even though cancel suceeded! (Testsome).\n" );
	    }
	}
	MPI_Request_free( &r1 );

	if (err) {
	    printf( "Test failed with %d errors.\n", err );
	}
	else {
	    printf( " No Errors\n" );
	}
    }

    else if (rank == size - 1) {
	partner = 0;
	/* Cancel succeeds for wait/waitall */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails for test/testall */
	buf[0] = 3;
	MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	/* Cancel succeeds for waitany */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails  for testany */
	MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	/* Cancel succeeds for waitsome */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails  for waitsome */
	MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

    /* 
       Next test - check that a cancel for a request receive from
       MPI_PROC_NULL succeeds (there is some suspicion that some
       systems can't handle this - also, MPI_REQUEST_NULL 
     */
    /* A null request is an error. (null objects are errors unless otherwise
       allowed)
    r1 = MPI_REQUEST_NULL;
    MPI_Cancel( &r1 );
    */
	MPI_Recv_init( buf, 10, MPI_INT, MPI_PROC_NULL, 0, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Request_free( &r1 );    /* Must complete cancel.  We know that it 
				       won't complete, so we don't need to do
				       anything else */
    }

    MPI_Finalize();
    return 0;
}