예제 #1
0
파일: req.c 프로젝트: Katetc/cime
FC_FUNC(mpi_testsome, MPI_TESTSOME)
         (int * incount, int * array_of_requests, int * outcount,
          int * array_of_indices, int * array_of_statuses, int * ierr)
{
  *ierr = MPI_Testsome(*incount, array_of_requests, outcount,
		       array_of_indices, mpi_c_statuses(array_of_statuses));
}
예제 #2
0
파일: greq_test.c 프로젝트: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int provided;
    MPI_Request request;
    int flag;
    int outcount = -1;
    int indices[1] = { -1 };
    MPI_Status status;
    char *env;

    env = getenv("MPITEST_VERBOSE");
    if (env) {
        if (*env != '0')
            verbose = 1;
    }

    MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
    if (provided != MPI_THREAD_MULTIPLE) {
        printf("This test requires MPI_THREAD_MULTIPLE\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    IF_VERBOSE(("Post Init ...\n"));

    MPI_Grequest_start(query_fn, free_fn, cancel_fn, NULL, &request);
    grequest = request; /* copy the handle */
    MTest_Start_thread(do_work, &grequest);
    IF_VERBOSE(("Testing ...\n"));
    flag = 0;
    while (!flag) {
        MPI_Test(&request, &flag, &status);
    }
    MTest_Join_threads();

    MPI_Grequest_start(query_fn, free_fn, cancel_fn, NULL, &request);
    grequest = request; /* copy the handle */
    MTest_Start_thread(do_work, &grequest);
    IF_VERBOSE(("Testing ...\n"));
    outcount = 0;
    while (!outcount) {
        MPI_Testsome(1, &request, &outcount, indices, &status);
    }
    MTest_Join_threads();

    MPI_Grequest_start(query_fn, free_fn, cancel_fn, NULL, &request);
    grequest = request; /* copy the handle */
    MTest_Start_thread(do_work, &grequest);
    IF_VERBOSE(("Testing ...\n"));
    flag = 0;
    while (!flag) {
        MPI_Testall(1, &request, &flag, &status);
    }
    MTest_Join_threads();

    IF_VERBOSE(("Goodbye !!!\n"));
    MTest_Finalize(0);
    MPI_Finalize();
    return 0;
}
예제 #3
0
파일: req.c 프로젝트: Katetc/cime
int MPI_Waitsome(int incount, MPI_Request *array_of_requests, int *outcount,
                 int *array_of_indices, MPI_Status *array_of_statuses)
{
  MPI_Testsome(incount, array_of_requests, outcount,
               array_of_indices, array_of_statuses);

  if (!outcount)
  {
    fprintf(stderr,"Waitsome: No requests complete, deadlock\n");
    abort();
  }

  return MPI_SUCCESS;
}
예제 #4
0
파일: testsomef.c 프로젝트: agrimaldi/pmap
FORT_DLL_SPEC void FORT_CALL mpi_testsome_ ( MPI_Fint *v1, MPI_Fint v2[], MPI_Fint *v3, MPI_Fint v4[], MPI_Fint v5[], MPI_Fint *ierr ){

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif

    if (v5 == MPI_F_STATUSES_IGNORE) { v5 = (MPI_Fint *)MPI_STATUSES_IGNORE; }
    *ierr = MPI_Testsome( (int)*v1, (MPI_Request *)(v2),  v3, v4, (MPI_Status *)v5 );

    {int li;
     for (li=0; li<*v3; li++) {
        if (v4[li] >= 0) v4[li] += 1;
     }
    }
}
예제 #5
0
void AsyncAcks::cleanup() {
	//if no requests, then nothing to do
	if (next_slot_ == 0)
		return;

	//check for satisfied requests
	int outcount;
	int array_of_indices[next_slot_];
	SIPMPIUtils::check_err(
			MPI_Testsome(next_slot_, posted_async_, &outcount, array_of_indices,
					MPI_STATUSES_IGNORE));

//	std::cout << " in cleanup, " << outcount << " acks released" << std::endl << std::flush;
	remove_completed_requests(outcount, array_of_indices);
}
예제 #6
0
void mpi_process_group::poll_requests(int block) const
{
  int size = impl_->requests.size();
  if (size==0)
    return;
  std::vector<MPI_Status> statuses(size);
  std::vector<int> indices(size);
  
  while (true) {
    MPI_Testsome(impl_->requests.size(),&impl_->requests[0],
       &size,&indices[0],&statuses[0]);
    if (size==0)
      return; // no message waiting

    // remove handled requests before we get the chance to be recursively called
    if (size) {
      std::vector<MPI_Request> active_requests;
      std::size_t i=0;
      int j=0;
      for (;i< impl_->requests.size() && j< size; ++i) {
        if (int(i)==indices[j])
          // release the dealt-with request 
          ++j;
        else // copy and keep the request
          active_requests.push_back(impl_->requests[i]);
      }    
      while (i < impl_->requests.size())
        active_requests.push_back(impl_->requests[i++]);
      impl_->requests.swap(active_requests);
    }

    optional<std::pair<int, int> > result;
    for (int i=0;i < size; ++i) {
      std::pair<int, int> decoded = decode_tag(statuses[i].MPI_TAG);
      block_type* block = impl_->blocks[decoded.first];
      
      BOOST_ASSERT (decoded.second < static_cast<int>(block->triggers.size()) && block->triggers[decoded.second]);
        // We have a trigger for this message; use it
      trigger_receive_context old_context = impl_->trigger_context;
      impl_->trigger_context = trc_irecv_out_of_band;
      block->triggers[decoded.second]->receive(*this, statuses[i].MPI_SOURCE, 
            decoded.second, impl_->trigger_context, decoded.first);
      impl_->trigger_context = old_context;
    }
  }
}
예제 #7
0
outbox_t* comm_remove_completed(outbox_t* outbox)
{
  assert(NULL != outbox);
#ifdef WITH_MPI
  const int size = requests_list_size(outbox->requests);
  if (size > 0) {
    int count = 0;
    int* completed = xcalloc(sizeof(int), size);
    MPI_Status* statuses = xmalloc(sizeof(MPI_Status)*size);
    // check if some test messages have arrived...
    MPI_Testsome(size, outbox->requests->storage,
                 &count, completed, statuses);
    // ...and copy incomplete requests into a new outbox
    outbox_t* new_outbox = outbox_new(size - count);
    for (int n = 0; n < size; ++n) {
      bool copy = true;
      // do not copy if index is in array of indices returned by MPI_Testsome()
      for (int m = 0; m < count; ++m) {
        if (completed[m] == n) {
          copy = false;
          break;
        };
      }; // for (int m = ...)
      if (copy) {
        MPI_Request* req_p = requests_list_extend1(&(new_outbox->requests));
        *req_p = outbox->requests->storage[n];
        row_t* row = rows_list_extend1(&(new_outbox->rows));
        row->data = outbox->rows->storage[n].data;
        row->kind = outbox->rows->storage[n].kind;
      }
      else { // free rows corresponding to completed requests
        if (ROW_SPARSE == outbox->rows->storage[n].kind)
          sparse_row_free((sparse_row_t*) outbox->rows->storage[n].data);
        else
          dense_row_free((dense_row_t*) outbox->rows->storage[n].data);
      };
    }; // for (int n = ...)
    outbox_free(outbox);
    // finally, free local resources and return
    free(completed);
    free(statuses);
    return new_outbox;
  };
#endif // WITH_MPI
  return outbox;
}
예제 #8
0
JNIEXPORT jobjectArray JNICALL Java_mpi_Request_testSomeStatus(
        JNIEnv *env, jclass clazz, jlongArray requests)
{
    int incount = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    MPI_Status *statuses = (MPI_Status*)calloc(incount, sizeof(MPI_Status));
    int *indices = (int*)calloc(incount, sizeof(int));
    int outcount;
    int rc = MPI_Testsome(incount, cReq, &outcount, indices, statuses);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    jobjectArray jStatuses = newStatusesIndices(env, statuses, indices, outcount);
    free(statuses);
    free(indices);
    return jStatuses;
}
예제 #9
0
JNIEXPORT jintArray JNICALL Java_mpi_Request_testSome(
        JNIEnv *env, jclass clazz, jlongArray requests)
{
    int incount = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    int *indices = (int*)calloc(incount, sizeof(int));
    int outcount;
    int rc = MPI_Testsome(incount, cReq, &outcount, indices, MPI_STATUSES_IGNORE);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    jintArray jindices = NULL;

    if(outcount != MPI_UNDEFINED)
    {
        jindices = (*env)->NewIntArray(env, outcount);
        setIndices(env, jindices, indices, outcount);
    }

    free(indices);
    return jindices;
}
예제 #10
0
int main( int argc, char *argv[])
{
    int myid, numprocs;

    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD,&myid);

    printf("Hello from processor %i of %i\n", myid, numprocs);

    int size = 0;
    int num_done = 0;
    MPI_Status* stat = 0;
    MPI_Request* req = 0;
    int* done_indices = 0;

    MPI_Testsome( size, req, &num_done, done_indices, stat);

    printf("Finalizing on processor %i of %i\n", myid, numprocs);

    MPI_Finalize();

    return 0;
}
예제 #11
0
static void test_pair (void)
{
  int prev, next, count, tag, index, i, outcount, indices[2];
  int rank, size, flag, ierr, reqcount;
  double send_buf[TEST_SIZE], recv_buf[TEST_SIZE];
  double buffered_send_buf[TEST_SIZE * 2 + MPI_BSEND_OVERHEAD]; /* factor of two is based on guessing - only dynamic allocation would be safe */
  void *buffer;
  MPI_Status statuses[2];
  MPI_Status status;
  MPI_Request requests[2];
  MPI_Comm dupcom, intercom;
#ifdef V_T

  struct _VT_FuncFrameHandle {
      char *name;
      int func;
      int frame;
  };
  typedef struct _VT_FuncFrameHandle VT_FuncFrameHandle_t;

  VT_FuncFrameHandle_t normal_sends,
      buffered_sends,
      buffered_persistent_sends,
      ready_sends,
      sync_sends,
      nblock_sends,
      nblock_rsends,
      nblock_ssends,
      pers_sends,
      pers_rsends,
      pers_ssends,
      sendrecv,
      sendrecv_repl,
      intercomm;

  int classid;
  VT_classdef( "Application:test_pair", &classid );


#define VT_REGION_DEF( _name, _nameframe, _class ) \
        (_nameframe).name=_name; \
        VT_funcdef( (_nameframe).name, _class, &((_nameframe).func) );
#define VT_BEGIN_REGION( _nameframe ) \
        LOCDEF(); \
        VT_begin( (_nameframe).func )
#define VT_END_REGION( _nameframe ) \
        LOCDEF(); VT_end( (_nameframe).func )
#else
#define VT_REGION_DEF( _name, _nameframe, _class )
#define VT_BEGIN_REGION( _nameframe )
#define VT_END_REGION( _nameframe )

#endif




  ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  ierr = MPI_Comm_size(MPI_COMM_WORLD, &size);
  if ( size < 2 ) {
      if ( rank == 0 ) {
	  printf("Program needs to be run on at least 2 processes.\n");
      }
      ierr = MPI_Abort( MPI_COMM_WORLD, 66 );
  }
  ierr = MPI_Comm_dup(MPI_COMM_WORLD, &dupcom);

  if ( rank >= 2 ) {
      /*      printf( "%d Calling finalize.\n", rank ); */
      ierr = MPI_Finalize( );
      exit(0);
  }

  next = rank + 1;
  if (next >= 2)
    next = 0;

  prev = rank - 1;
  if (prev < 0)
    prev = 1;

  VT_REGION_DEF( "Normal_Sends", normal_sends, classid );
  VT_REGION_DEF( "Buffered_Sends", buffered_sends, classid );
  VT_REGION_DEF( "Buffered_Persistent_Sends", buffered_persistent_sends, classid );
  VT_REGION_DEF( "Ready_Sends", ready_sends, classid );
  VT_REGION_DEF( "Sync_Sends", sync_sends, classid );
  VT_REGION_DEF( "nblock_Sends", nblock_sends, classid );
  VT_REGION_DEF( "nblock_RSends", nblock_rsends, classid );
  VT_REGION_DEF( "nblock_SSends", nblock_ssends, classid );
  VT_REGION_DEF( "Pers_Sends", pers_sends, classid );
  VT_REGION_DEF( "Pers_RSends", pers_rsends, classid );
  VT_REGION_DEF( "Pers_SSends", pers_ssends, classid );
  VT_REGION_DEF( "SendRecv", sendrecv, classid );
  VT_REGION_DEF( "SendRevc_Repl", sendrecv_repl, classid );
  VT_REGION_DEF( "InterComm", intercomm, classid );



/*
 * Normal sends
 */

  VT_BEGIN_REGION( normal_sends );

  if (rank == 0)
    printf ("Send\n");

  tag = 0x100;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( normal_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_sends );

  if (rank == 0)
    printf ("Buffered Send\n");

  tag = 138;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_persistent_sends );

  if (rank == 0)
    printf ("Buffered Persistent Send\n");

  tag = 238;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests);
    MPI_Start(requests);
    MPI_Wait(requests, statuses);
    MPI_Request_free(requests);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_persistent_sends );


/*
 * Ready sends.  Note that we must insure that the receive is posted
 * before the rsend; this requires using Irecv.
 */


  VT_BEGIN_REGION( ready_sends );

  if (rank == 0)
    printf ("Rsend\n");

  tag = 1456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Recv(MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD, &status);
    MPI_Rsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Probe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status);
    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);
    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Send( MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD);
    MPI_Wait(requests, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( ready_sends );

/*
 * Synchronous sends
 */

  VT_BEGIN_REGION( sync_sends );

  if (rank == 0)
    printf ("Ssend\n");

  tag = 1789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);
    if (flag)
      printf ("Iprobe succeeded! source %d, tag %d\n",status.MPI_SOURCE,
                                                      status.MPI_TAG);

    MPI_Ssend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

    while (!flag)
      MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);

    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);

    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Ssend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sync_sends );

/*
 * Nonblocking normal sends
 */

  VT_BEGIN_REGION( nblock_sends );

  if (rank == 0)
    printf ("Isend\n");

  tag = 2123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Isend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
              (requests+1));
    MPI_Waitall(2, requests, statuses);
    rq_check( requests, 2, "isend and irecv" );

    msg_check(recv_buf,prev,tag,count,statuses, TEST_SIZE,"isend and irecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"isend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Isend(recv_buf, count, MPI_DOUBLE, next, tag,MPI_COMM_WORLD,
              (requests));
    MPI_Wait((requests), &status);
    rq_check(requests, 1, "isend (and recv)");
  }



  VT_END_REGION( nblock_sends );

/*
 * Nonblocking ready sends
 */


  VT_BEGIN_REGION( nblock_rsends );

  if (rank == 0)
    printf ("Irsend\n");

  tag = 2456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    MPI_Irsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitany( 2, requests, &index, statuses);
      if( index == 0 ) {
	  memcpy( &status, statuses, sizeof(status) );
      }
      reqcount++;
    }

    rq_check( requests, 1, "irsend and irecv");
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"irsend and irecv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    flag = 0;
    while (!flag)
      MPI_Test(requests, &flag, &status);

    rq_check( requests, 1, "irsend and irecv (test)");
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "irsend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Irsend(recv_buf, count, MPI_DOUBLE, next, tag,
               MPI_COMM_WORLD, requests);
    MPI_Waitall(1, requests, statuses);
    rq_check( requests, 1, "irsend and irecv");
  }

  VT_END_REGION( nblock_rsends );

/*
 * Nonblocking synchronous sends
 */

  VT_BEGIN_REGION( nblock_ssends );

  if (rank == 0)
    printf ("Issend\n");

  tag = 2789;
  count = TEST_SIZE / 3;
  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests );
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Issend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    flag = 0;
    while (!flag)
      MPI_Testall(2, requests, &flag, statuses);

    rq_check( requests, 2, "issend and irecv (testall)");
    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, 
               "issend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "issend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Issend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,requests);

    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    rq_check( requests, 1, "issend and recv (testany)");
  }


  VT_END_REGION( nblock_ssends );


/*
 * Persistent normal sends
 */

  VT_BEGIN_REGION( pers_sends );

  if (rank == 0)
    printf ("Send_init\n");

  tag = 3123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Send_init(send_buf, count, MPI_DOUBLE, next, tag,
                MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
                MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);
    MPI_Waitall(2, requests, statuses);
    msg_check( recv_buf, prev, tag, count, (statuses+1),
               TEST_SIZE, "persistent send/recv");
  }
  else {
    MPI_Start((requests+1));
    MPI_Wait((requests+1), &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "persistent send/recv");
    init_test_data(send_buf,TEST_SIZE,1);


    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_sends );

/*
 * Persistent ready sends
 */

  VT_BEGIN_REGION( pers_rsends );

  if (rank == 0)
    printf ("Rsend_init\n");

  tag = 3456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Rsend_init(send_buf, count, MPI_DOUBLE, next, tag,
                  MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0); MPI_Barrier( MPI_COMM_WORLD );
    MPI_Startall(2, requests);
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 1) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "waitsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start((requests+1)); MPI_Barrier( MPI_COMM_WORLD );
    flag = 0;
    while (!flag)
      MPI_Test((requests+1), &flag, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "test");

    init_test_data(send_buf,TEST_SIZE,1);

 
    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_rsends );


/*
 * Persistent synchronous sends
 */


  VT_BEGIN_REGION( pers_ssends );

  if (rank == 0)
    printf ("Ssend_init\n");

  tag = 3789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Ssend_init(send_buf, count, MPI_DOUBLE, next, tag,
                 MPI_COMM_WORLD, (requests+1));
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, requests);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);

    reqcount = 0;
    while (reqcount != 2) {
      MPI_Testsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 0) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "testsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start(requests);
    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "testany" );

    init_test_data(send_buf,TEST_SIZE,1);


     MPI_Start((requests+1));
     MPI_Wait((requests+1), &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_ssends );


/*
 * Send/receive.
 */


  VT_BEGIN_REGION( sendrecv );

  if (rank == 0)
    printf ("Sendrecv\n");

  tag = 4123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv(send_buf, count, MPI_DOUBLE, next, tag,
                 recv_buf, count, MPI_DOUBLE, prev, tag,
                 MPI_COMM_WORLD, &status );

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }


  VT_END_REGION( sendrecv );

#ifdef V_T
  VT_flush();
#endif


/*
 * Send/receive replace.
 */

  VT_BEGIN_REGION( sendrecv_repl );

  if (rank == 0)
    printf ("Sendrecv_replace\n");

  tag = 4456;
  count = TEST_SIZE / 3;

  if (rank == 0) {
      init_test_data(recv_buf, TEST_SIZE,0);
    for (i=count; i< TEST_SIZE; i++)
      recv_buf[i] = 0.0;

    MPI_Sendrecv_replace(recv_buf, count, MPI_DOUBLE,
                         next, tag, prev, tag, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecvreplace");
  }
  else {
    clear_test_data(recv_buf,TEST_SIZE);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send for replace"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sendrecv_repl );


/*
 * Send/Receive via inter-communicator
 */

  VT_BEGIN_REGION( intercomm );

  MPI_Intercomm_create(MPI_COMM_SELF, 0, MPI_COMM_WORLD, next, 1, &intercom);

  if (rank == 0)
    printf ("Send via inter-communicator\n");

  tag = 4018;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, 0, tag, intercom);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, intercom, &status);
    msg_check(recv_buf, 0, tag, count, &status, TEST_SIZE, "send and recv via inter-communicator");
  }
  else if (rank == 1) {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             intercom, &status);
    msg_check( recv_buf, 0, tag, count, &status, TEST_SIZE,"send and recv via inter-communicator");
    init_test_data(recv_buf,TEST_SIZE,0);
    MPI_Send(recv_buf, count, MPI_DOUBLE, 0, tag, intercom);

  }

  VT_END_REGION( normal_sends );



  MPI_Comm_free(&intercom);
  MPI_Comm_free(&dupcom);
} 
예제 #12
0
int main(int argc, char **argv)
{
    int errs = 0;
    MPI_Status status, *status_array = 0;
    int count = 0, flag, idx, rc, errlen, *indices=0, outcnt;
    MPI_Request *reqs = 0;
    char errmsg[MPI_MAX_ERROR_STRING];

    MTest_Init(&argc, &argv);

    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    rc = MPI_Testall( count, reqs, &flag, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testall returned failure: %s\n", errmsg );
	errs ++;
    }
    else if (!flag) {
	printf( "MPI_Testall( 0, ... ) did not return a true flag\n") ;
	errs++;
    }

    rc = MPI_Waitall( count, reqs, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitall returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Testany( count, reqs, &idx, &flag, &status );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testany returned failure: %s\n", errmsg );
	errs ++;
    }
    else if (!flag) {
	printf( "MPI_Testany( 0, ... ) did not return a true flag\n") ;
	errs++;
    }

    rc = MPI_Waitany( count, reqs, &idx, &status );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitany returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Testsome( count, reqs, &outcnt, indices, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testsome returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Waitsome( count, reqs, &outcnt, indices, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitsome returned failure: %s\n", errmsg );
	errs ++;
    }
    
    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
예제 #13
0
파일: match.c 프로젝트: nuskarthik/cs3210
void fieldAction(int rank, int teamPos[2][5][2], int teamSkill[2][5][3]) {
    int ballPos[2] = {64, 32};
    int sendbuf[SIZE_PLAYER_RECV];
    int recvbuf[SIZE_PLAYER_SEND*NUM_PLAYERS];
    int teamId = rank - 10;
    int otherFieldRank = 21 - rank;
    int i, j;
    int round = 1;
    int teamPoints[NUM_TEAMS] = {0, 0};
    int scorePost[2][2] = {{128, 32}, {0, 32}};
    int side = 0;
    do {
    memset(recvbuf, 0, SIZE_PLAYER_SEND*NUM_PLAYERS*sizeof(int));
    sendbuf[0] = ballPos[0];
    sendbuf[1] = ballPos[1];

    for (i = 2; i < 12; i+=2) {
        // Put pos of all players in the same team
        sendbuf[i] = teamPos[teamId][i/2 - 1][0];
        sendbuf[i+1] = teamPos[teamId][i/2 - 1][1];
    }
    // Send to one team
    for (i = 0; i < 5; i++) {
        MPI_Request tempReq;
        MPI_Isend(sendbuf, SIZE_PLAYER_RECV, MPI_INT, 5*teamId + i, TAG_ROUND_START + round, MPI_COMM_WORLD, &tempReq);
        MPI_Request_free(&tempReq);
    }
    MPI_Request playerReqs[NUM_PLAYERS];
    MPI_Request otherField;

    for (i = 0; i < NUM_PLAYERS; i++) {
        MPI_Irecv(recvbuf + i*SIZE_PLAYER_SEND, SIZE_PLAYER_SEND, MPI_INT, i, TAG_PLAYER_SEND + round,
            MPI_COMM_WORLD, playerReqs + i);
    }

    int playersReceived[1] = {0};
    int playersOtherReceived[1] = {0};
    int flag = 0;
    MPI_Irecv(playersOtherReceived, 1, MPI_INT, 11 - teamId, TAG_FIELD_STAT + round, MPI_COMM_WORLD, &otherField);

    int outcount = 0;
    int outIndices[NUM_PLAYERS];
    int playersReceivedArray[NUM_PLAYERS];
    for (i = 0; i < NUM_PLAYERS; i++) {
        playersReceivedArray[i] = 0;
    }
    while (true) {
        MPI_Testsome(NUM_PLAYERS, playerReqs, &outcount, outIndices, MPI_STATUS_IGNORE);
        if (outcount == MPI_UNDEFINED || outcount > 0) {

            if (outcount == MPI_UNDEFINED) {
                outcount = 10 - playersReceived[0];
                for (i = 0; i < 10; i++) {
                    outIndices[i] = i;
                }
            }
            for (i = 0; i < outcount; i++) {
                playersReceivedArray[outIndices[i]] = 1;
            }

            playersReceived[0] += outcount;
            if (rank == F1) {
                MPI_Request tempReq;
                MPI_Isend(playersReceived, 1, MPI_INT, F0, TAG_FIELD_STAT + round, MPI_COMM_WORLD, &tempReq);
                MPI_Request_free(&tempReq);
                if (playersReceived[0] == NUM_PLAYERS) {
                    MPI_Wait(&otherField, MPI_STATUS_IGNORE);
                }
            } else if (playersReceived[0] + playersOtherReceived[0] == NUM_PLAYERS) {
                MPI_Request tempReq;
                MPI_Isend(playersReceived, 1, MPI_INT, F1, TAG_FIELD_STAT + round, MPI_COMM_WORLD, &tempReq);
                MPI_Request_free(&tempReq);
                break;
            }
        }
        MPI_Test(&otherField, &flag, MPI_STATUS_IGNORE);
        if (flag) {
            if (rank == F1) {
                break;
            }
            if (playersReceived[0] + playersOtherReceived[0] == NUM_PLAYERS) {
                MPI_Request tempReq;
                MPI_Isend(playersReceived, 1, MPI_INT, F1, TAG_FIELD_STAT + round, MPI_COMM_WORLD, &tempReq);
                MPI_Request_free(&tempReq);
                break;
            }
            MPI_Irecv(playersOtherReceived, 1, MPI_INT, F1, TAG_FIELD_STAT + round, MPI_COMM_WORLD, &otherField);
        }
    }
    // Cancel all extra team recv requests
    for (i = 0; i < NUM_PLAYERS; i++) {
        if (playerReqs[i] != MPI_REQUEST_NULL) {
            MPI_Request_free(&playerReqs[i]);
        }
    }
    int numReached = 0;
    int reachedRanks[NUM_PLAYERS];
    int maxBallChallenge = 1;
    for (i = 0; i < NUM_PLAYERS; i++) {
        if (playersReceivedArray[i]) {
           recvbuf[i*SIZE_PLAYER_SEND + INDEX_RANK] = FLAG_UPDATED;
           if (recvbuf[i*SIZE_PLAYER_SEND + INDEX_NEWX] == ballPos[0]
                && recvbuf[i*SIZE_PLAYER_SEND + INDEX_NEWY] == ballPos[1]) {
               int chall = recvbuf[i*SIZE_PLAYER_SEND + INDEX_CHALLENGE];
               if (chall > maxBallChallenge) {
                   maxBallChallenge = chall;
                   numReached = 0;
               }
               reachedRanks[numReached++] = i;
           }
        }
    }
    int winnerRank = -1;
    if (numReached > 0) {
        winnerRank = reachedRanks[rand() % numReached];
        int newBallPos[2];
        newBallPos[0] = recvbuf[winnerRank*SIZE_PLAYER_SEND + INDEX_BALLX];
        newBallPos[1] = recvbuf[winnerRank*SIZE_PLAYER_SEND + INDEX_BALLY];
        int d = abs(recvbuf[winnerRank*SIZE_PLAYER_SEND + INDEX_NEWX] - newBallPos[0]) +
                abs(recvbuf[winnerRank*SIZE_PLAYER_SEND + INDEX_NEWY] - newBallPos[1]);
        int winnerShootSkill = teamSkill[winnerRank/5][winnerRank%5][2];
        int prob = calcProb(winnerShootSkill, d);

        int hit = -1;
        if (prob > 0) {
            hit = rand() % (100 / prob);
        }

        ballPos[0] = newBallPos[0];
        ballPos[1] = newBallPos[1];
        if (hit != 0) {
            // Random 8
            int x8 = rand() % 8 + 1;
            int y8 = rand() % 8 + 1;
            if (rand() % 2) x8 = -x8;
            if (rand() % 2) y8 = -y8;
            ballPos[0] = recvbuf[winnerRank*SIZE_PLAYER_SEND + INDEX_BALLX] + x8;
            ballPos[1] = recvbuf[winnerRank*SIZE_PLAYER_SEND + INDEX_BALLY] + y8;
            if (ballPos[0] < 0 || ballPos[1] < 0) {
                ballPos[0] = 0;
                ballPos[1] = 0;
            } else if (ballPos[0] > 128 || ballPos[1] > 64) {
                ballPos[0] = 128;
                ballPos[1] = 64;
            }
        } else {
            if (ballPos[0] == scorePost[winnerRank/5][0] && ballPos[1] == scorePost[winnerRank/5][1]) {
                ballPos[0] = 64;
                ballPos[1] = 32;
                teamPoints[winnerRank/5] += (d > 24) ? 3:2;
            }
        }
    }
    int adminDetails[SIZE_FIELD_ADMIN] = {winnerRank, ballPos[0], ballPos[1], teamPoints[0], teamPoints[1]};
    int recvbuf2[NUM_PLAYERS*SIZE_PLAYER_SEND];
    int adminDetails2[SIZE_FIELD_ADMIN];
    memset(recvbuf2, 0, NUM_PLAYERS*SIZE_PLAYER_SEND*sizeof(int));

    MPI_Request temp, temp2, roundOverRequest;
    MPI_Isend(adminDetails, SIZE_FIELD_ADMIN, MPI_INT, otherFieldRank, TAG_FIELD_SYNC_ADMIN + round, MPI_COMM_WORLD, &temp);
    MPI_Request_free(&temp);
    MPI_Isend(recvbuf, NUM_PLAYERS*SIZE_PLAYER_SEND, MPI_INT, otherFieldRank, TAG_FIELD_SYNC_PLAYER + round, MPI_COMM_WORLD, &temp2);
    MPI_Request_free(&temp2);

    MPI_Recv(adminDetails2, SIZE_FIELD_ADMIN, MPI_INT, otherFieldRank, TAG_FIELD_SYNC_ADMIN + round, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    MPI_Recv(recvbuf2, NUM_PLAYERS*SIZE_PLAYER_SEND, MPI_INT, otherFieldRank, TAG_FIELD_SYNC_PLAYER + round, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

    if (adminDetails2[INDEX_WINNER_RANK] != -1) {
       winnerRank = adminDetails2[INDEX_WINNER_RANK];
       ballPos[0] = adminDetails2[1];
       ballPos[1] = adminDetails2[2];
       teamPoints[0] = adminDetails2[3];
       teamPoints[1] = adminDetails2[4];
    }

    for (i = 0; i < NUM_PLAYERS; i++) {
        if (recvbuf2[i*SIZE_PLAYER_SEND + INDEX_RANK] == FLAG_UPDATED) {
            for (j = 1; j < SIZE_PLAYER_SEND; j++) {
                recvbuf[i*SIZE_PLAYER_SEND + j] = recvbuf2[i*SIZE_PLAYER_SEND + j];
            }
        }
    }
    if (rank == F1) {
        int tempVal[1];
        int iTeamId, iPlayerId;
        for (i = 0; i < NUM_PLAYERS; ++i) {
            iTeamId = i/5;
            iPlayerId = i%5;
            teamPos[iTeamId][iPlayerId][0] = recvbuf[i*SIZE_PLAYER_SEND + INDEX_NEWX];
            teamPos[iTeamId][iPlayerId][1] = recvbuf[i*SIZE_PLAYER_SEND + INDEX_NEWY];
        }
        MPI_Irecv(tempVal, 1, MPI_INT, F0, TAG_FIELD_ROUND_OVER + round, MPI_COMM_WORLD, &roundOverRequest);
        MPI_Wait(&roundOverRequest, MPI_STATUS_IGNORE);
        round++;
        continue;
    }
    // Printing function
    printf("%d\n", round);
    printf("%d %d\n", teamPoints[0], teamPoints[1]);
    printf("%d %d\n", ballPos[0], ballPos[1]); //New pos
    int newX, newY, iTeamId, iPlayerId;
    for (i = 0; i < NUM_PLAYERS; ++i) {
        iTeamId = i/5;
        iPlayerId = i%5;
        printf("%d ", i); // Player number
        printf("%d %d ", teamPos[iTeamId][iPlayerId][0], teamPos[iTeamId][iPlayerId][1]);
        teamPos[iTeamId][iPlayerId][0] = recvbuf[i*SIZE_PLAYER_SEND + INDEX_NEWX];
        teamPos[iTeamId][iPlayerId][1] = recvbuf[i*SIZE_PLAYER_SEND + INDEX_NEWY];
        printf("%d %d ", teamPos[iTeamId][iPlayerId][0], teamPos[iTeamId][iPlayerId][1]);
        printf("%d ", recvbuf[i*SIZE_PLAYER_SEND + INDEX_CHALLENGE] > -1);
        printf("%d ", winnerRank == i);
        printf("%d ", recvbuf[i*SIZE_PLAYER_SEND + INDEX_CHALLENGE]);
        if (winnerRank == i) {
            printf("%d ", recvbuf[i*SIZE_PLAYER_SEND + INDEX_BALLX]);
            printf("%d", recvbuf[i*SIZE_PLAYER_SEND + INDEX_BALLY]);
        } else {
            printf("-1 -1");
        }
        printf("\n");
    }
    if (round == HALF_ROUNDS) {
        scorePost[0][0] = 0;
        scorePost[0][1] = 32;
        scorePost[1][0] = 128;
        scorePost[1][1] = 32;
    }
        int tempVal[1] = {1};
        MPI_Send(tempVal, 1, MPI_INT, F1, TAG_FIELD_ROUND_OVER + round, MPI_COMM_WORLD);
        round++;
    } while(round <= 2*HALF_ROUNDS);
}
예제 #14
0
int MPIRecvDataBuffer::checkRequests(bool waitForAll) {

	if (mpi_buffer.empty()) return 0;

	if (!non_blocking) return 0;  // blocking comm.  should not have any in MPI queue.

	// get all the requests together
	int active = 0;
	for (std::tr1::unordered_map<MPI_Request*, DataBuffer::DataType>::iterator iter = mpi_buffer.begin();
			iter != mpi_buffer.end(); ++iter) {
		reqs[active] = *(iter->first);
		reqptrs[active] = iter->first;
		++active;
	}

	int completed = 0;
	if (waitForAll) {
		MPI_Waitall(active, reqs, MPI_STATUSES_IGNORE);
		completed = active;
	} else {
		MPI_Testsome(active, reqs, &completed, completedreqs, MPI_STATUSES_IGNORE);
	}

	long long t2 = ::cci::common::event::timestampInUS();
	long long t1 = -1;

	int size = 0;
	MPI_Request* reqptr = NULL;

	if (completed == MPI_UNDEFINED) {
		cci::common::Debug::print("ERROR: testing completion received a complete count of MPI_UNDEFINED\n");
	} else if (completed == 0) {
		// cci::common::Debug::print("no mpi requests completed\n");
	} else {
		//cci::common::Debug::print("MPI Recv Buffer active = %d, number completed = %d, total = %ld\n", active, completed, mpi_buffer.size());

		char len[21];  // max length of uint64 is 20 digits

		for (int i = 0; i < completed; ++i) {
			if (waitForAll) {
				reqptr = reqptrs[i];
			} else {
				reqptr = reqptrs[completedreqs[i]];
			}
			//printf("recv MPI error status: %d\n", stati[i].MPI_ERROR);
			size = mpi_buffer[reqptr].first;

			buffer.push(mpi_buffer[reqptr]);
			mpi_buffer.erase(reqptr);

			t1 = mpi_req_starttimes[reqptr];
			mpi_req_starttimes.erase(reqptr);

			free(reqptr);

			memset(len, 0, 21);
			sprintf(len, "%d", size);
			if (this->logsession != NULL) this->logsession->log(cci::common::event(0, std::string("MPI NB RECV"), t1, t2, std::string(len), ::cci::common::event::NETWORK_IO_NB));
		}
		//cci::common::Debug::print("MPI Recv Buffer new size: %ld\n", mpi_buffer.size());

		debug_complete_count += completed;
	}

//	cci::common::Debug::print("MPIRecvDataBuffer: popMPI called.  %d load\n", mpi_buffer.size());

	return completed;
}
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  MPI_Comm comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  int buf[BUF_SIZE * 2];
  int i, j, k, index, outcount, flag;
  int indices[2];
  MPI_Request aReq[2];
  MPI_Status aStatus[2];

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  if (rank == 0) {
    /* set up persistent sends... */
    MPI_Send_init (&buf[0], BUF_SIZE, MPI_INT, 1, 0, comm, &aReq[0]);
    MPI_Send_init (&buf[BUF_SIZE], BUF_SIZE, MPI_INT, 1, 1, comm, &aReq[1]);

    /* initialize the send buffers */
    for (i = 0; i < BUF_SIZE; i++) {
      buf[i] = i;
      buf[BUF_SIZE + i] = BUF_SIZE - 1 - i;
    }
  }

  for (k = 0; k < 4; k++) {
    if (rank == 1) {
      /* zero out the receive buffers */
      bzero (buf, sizeof(int) * BUF_SIZE * 2);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0) {
      /* start the persistent sends... */
      if (k % 2) {
	MPI_Startall (2, &aReq[0]);
      }
      else {
	for (j = 0; j < 2; j++) {
	  MPI_Start (&aReq[j]);
	}
      }

      /* complete the sends */
      if (k < 2) {
	/* use MPI_Testany */
	for (j = 0; j < 2; j++) {
	  flag = 0;
	  while (!flag) {
	    MPI_Testany (2, aReq, &index, &flag, aStatus);
	  }
	}
      }
      else {
	/* use MPI_Testsome */
	j = 0;
	while (j < 2) {
	  outcount = 0;
	  while (!outcount) {
	    MPI_Testsome (2, aReq, &outcount, indices, aStatus);
	  }
	  j += outcount;
	}
      }
    }
    else if (rank == 1) {
      /* set up receives for all of the sends */
      for (j = 0; j < 2; j++) {
	MPI_Irecv (&buf[j * BUF_SIZE], BUF_SIZE,
		   MPI_INT, 0, j, comm, &aReq[j]);
      }
      /* complete all of the receives... */
      MPI_Waitall (2, aReq, aStatus);
    }
  }

  MPI_Barrier(MPI_COMM_WORLD);

  if (rank == 0) {
    /* free the persistent requests */
    for (i = 0 ; i < 2; i++) {
      MPI_Request_free (&aReq[i]);
    }
  }

  MPI_Finalize ();
  printf ("(%d) Finished normally\n", rank);
}
예제 #16
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    MPI_Comm comm;
    MPI_Request r[2];
    MPI_Status  s[2];
    int         indices[2], outcount;
    int errval, errclass;
    int b1[20], b2[20], rank, size, src, dest, i, j;

    MTest_Init( &argc, &argv );

    /* Create some receive requests.  tags 0-9 will succeed, tags 10-19 
       will be used for ERR_TRUNCATE (fewer than 20 messages will be used) */
    comm = MPI_COMM_WORLD;

    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

    src  = 1;
    dest = 0;
    if (rank == dest) {
	MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
	errval = MPI_Irecv( b1, 10, MPI_INT, src, 0, comm, &r[0] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}
	errval = MPI_Irecv( b2, 10, MPI_INT, src, 10, comm, &r[1] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}

        /* Wait for Irecvs to be posted before the sender calls send.  This
         * prevents the operation from completing and returning an error in the
         * Irecv. */
        errval = MPI_Recv(NULL, 0, MPI_INT, src, 100, comm, MPI_STATUS_IGNORE);
        if (errval) {
            errs++;
            MTestPrintError( errval );
            printf( "Error returned from Recv\n" );
        }

        /* Wait for sends to complete at the sender before proceeding */
        /* WARNING: This does not guarantee that the sends are ready to
         * complete at the receiver. */
	errval = MPI_Recv(NULL, 0, MPI_INT, src, 10, comm, MPI_STATUS_IGNORE);
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Recv\n" );
	}
	for (i=0; i<2; i++) {
	    s[i].MPI_ERROR = -1;
	}

        /* WARNING: The following assumes that Testsome will complete both
         * send/irecv pairs.  This is *not* guaranteed by the MPI standard. */
	errval = MPI_Testsome( 2, r, &outcount, indices, s );
	MPI_Error_class( errval, &errclass );
	if (errclass != MPI_ERR_IN_STATUS) {
	    errs++;
	    printf( "Did not get ERR_IN_STATUS in Testsome (outcount = %d, should equal 2); class returned was %d\n",
		    outcount, errclass );
	}
	else if (outcount != 2) {
	    errs++;
	    printf( "Test returned outcount = %d\n", outcount );
	}
	else {
	    /* Check for success */
	    for (i=0; i<outcount; i++) {
		j = i;
		/* Indices is the request index */
		if (s[j].MPI_TAG < 10 && s[j].MPI_ERROR != MPI_SUCCESS) {
		    errs++;
		    printf( "correct msg had error class %d\n", 
			    s[j].MPI_ERROR );
		}
		else if (s[j].MPI_TAG >= 10 && s[j].MPI_ERROR == MPI_SUCCESS) {
		    errs++;
		    printf( "truncated msg had MPI_SUCCESS\n" );
		}
	    }
	}

    }
    else if (rank == src) {
        /* Wait for Irecvs to be posted before the sender calls send */
        MPI_Ssend( NULL, 0, MPI_INT, dest, 100, comm );

	/* Send test messages, then send another message so that the test does
	   not start until we are sure that the sends have begun */
	MPI_Send( b1, 10, MPI_INT, dest, 0, comm );
	MPI_Send( b2, 11, MPI_INT, dest, 10, comm );

        /* Wait for sends to complete before proceeding to the testsome. */
	MPI_Ssend( NULL, 0, MPI_INT, dest, 10, comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
  
}
예제 #17
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  MPI_Comm comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES];
  int buf[BUF_SIZE * 2 * NUM_SEND_TYPES];
  int i, j, k, at_size, send_t_number, index, outcount, total, flag;
  int num_errors, error_count, indices[2 * NUM_SEND_TYPES];
  MPI_Request aReq[2 * NUM_SEND_TYPES];
  MPI_Status aStatus[2 * NUM_SEND_TYPES];

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Buffer_attach (bbuf, sizeof(int) * 
		     (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);

  if (rank == 0) {
    /* set up persistent sends... */
    send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES;

    MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);

    send_t_number++;

    MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);


    send_t_number++;

    MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);

    send_t_number++;

    MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 
		    1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
    MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], 
		    BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		    comm, &aReq[send_t_number * 2 + 1]);
  }

  for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) {
    if (rank == 0) {
      /* initialize all of the send buffers */
      for (j = 0; j < NUM_SEND_TYPES; j++) {
	for (i = 0; i < BUF_SIZE; i++) {
	  buf[2 * j * BUF_SIZE + i] = i;
	  buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i;
	}
      }
    }
    else if (rank == 1) {
      /* zero out all of the receive buffers */
      bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0) {
      /* set up transient sends... */
      send_t_number = 0;
    
      MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		 comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;
      
      MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;

      /* Barrier to ensure receives are posted for rsends... */
      MPI_Barrier(MPI_COMM_WORLD);

      MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      send_t_number++;

      MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
		  1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
      MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
		  BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, 
		  comm, &aReq[send_t_number * 2 + 1]);

      /* just to be paranoid */
      send_t_number++;
      assert (send_t_number == NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);

      /* start the persistent sends... */
      if (k % 2) {
	MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2, &aReq[2 * send_t_number]);
      }
      else {
	for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) {
	  MPI_Start (&aReq[2 * send_t_number + j]);
	}
      }
    
      /* NOTE: Changing the send buffer of a Bsend is NOT an error... */
      for (j = 0; j < NUM_SEND_TYPES; j++) {
	/* muck the buffers */
	buf[j * 2 * BUF_SIZE + (BUF_SIZE >> 1)] = BUF_SIZE;
      }

      printf ("USER MSG: 6 change send buffer errors in iteration #%d:\n", k);

      /* complete the sends */
      switch (k/2) {
      case 0:
	/* use MPI_Wait */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  MPI_Wait (&aReq[j], &aStatus[j]);
	}
	break;
	
      case 1:
	/* use MPI_Waitall */
	MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
	break;

      case 2:
	/* use MPI_Waitany */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
	}

	break;
	
      case 3:
	/* use MPI_Waitsome */
	total = 0;
	while (total < NUM_SEND_TYPES * 2) {
	  MPI_Waitsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus);

	  total += outcount;
	}

	break;

      case 4:
	/* use MPI_Test */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  flag = 0;

	  while (!flag) {
	    MPI_Test (&aReq[j], &flag, &aStatus[j]);
	  }
	}

	break;
	
      case 5:
	/* use MPI_Testall */
	flag = 0;
	while (!flag) {
	  MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
	}

	break;

      case 6:
	/* use MPI_Testany */
	for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
	  flag = 0;
	  while (!flag) {
	    MPI_Testany (NUM_SEND_TYPES * 2, aReq, &index, &flag, aStatus);
	  }
	}

	break;
	
      case 7:
	/* use MPI_Testsome */
	total = 0;
	while (total < NUM_SEND_TYPES * 2) {
	  outcount = 0;

	  while (!outcount) {
	    MPI_Testsome (NUM_SEND_TYPES * 2, aReq, 
			  &outcount, indices, aStatus);
	  }

	  total += outcount;
	}

	break;

      default:
	assert (0);
	break;
      }
    }
    else if (rank == 1) {
예제 #18
0
파일: cancel2.c 프로젝트: Shurakai/SimGrid
int main( int argc, char **argv )
{
    MPI_Request r1;
    int         size, rank;
    int         err = 0;
    int         partner, buf[10], flag, idx, index;
    MPI_Status  status;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    
    if (size < 2) {
	printf( "Cancel test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    /* 
     * Here is the test.  First, we ensure an unsatisfied Irecv:
     *       process 0             process size-1
     *       Sendrecv              Sendrecv
     *       Irecv                    ----
     *       Cancel                   ----
     *       Sendrecv              Sendrecv
     * Next, we confirm receipt before canceling
     *       Irecv                 Send
     *       Sendrecv              Sendrecv
     *       Cancel
     */
    if (rank == 0) {
	partner = size - 1;
	/* Cancel succeeds for wait/waitall */
	MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Wait( &r1, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++; 
	    printf( "Cancel of a receive failed where it should succeed (Wait).\n" );
	}

	MPI_Request_free( &r1 );

	/* Cancel fails for test/testall */
	buf[0] = -1;
	MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Test( &r1, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a receive succeeded where it shouldn't (Test).\n" );
	    if (buf[0] != -1) {
		printf( "Receive buffer changed even though cancel suceeded! (Test).\n" );
	    }
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitany */
	MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitany( 1, &r1, &idx, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a receive failed where it should succeed (Waitany).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testany */
        buf[0] = -1;
	MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testany( 1, &r1, &idx, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a receive succeeded where it shouldn't (Testany).\n" );
	    if (buf[0] != -1) {
		printf( "Receive buffer changed even though cancel suceeded! (Test).\n" );
	    }
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitsome */
	MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a receive failed where it should succeed (Waitsome).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testsome*/
        buf[0] = -1;
	MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a receive succeeded where it shouldn't (Testsome).\n" );
	    if (buf[0] != -1) {
		printf( "Receive buffer changed even though cancel suceeded! (Testsome).\n" );
	    }
	}
	MPI_Request_free( &r1 );

	if (err) {
	    printf( "Test failed with %d errors.\n", err );
	}
	else {
	    printf( " No Errors\n" );
	}
    }

    else if (rank == size - 1) {
	partner = 0;
	/* Cancel succeeds for wait/waitall */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails for test/testall */
	buf[0] = 3;
	MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	/* Cancel succeeds for waitany */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails  for testany */
	MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	/* Cancel succeeds for waitsome */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails  for waitsome */
	MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

    /* 
       Next test - check that a cancel for a request receive from
       MPI_PROC_NULL succeeds (there is some suspicion that some
       systems can't handle this - also, MPI_REQUEST_NULL 
     */
    /* A null request is an error. (null objects are errors unless otherwise
       allowed)
    r1 = MPI_REQUEST_NULL;
    MPI_Cancel( &r1 );
    */
	MPI_Recv_init( buf, 10, MPI_INT, MPI_PROC_NULL, 0, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Request_free( &r1 );    /* Must complete cancel.  We know that it 
				       won't complete, so we don't need to do
				       anything else */
    }

    MPI_Finalize();
    return 0;
}
예제 #19
0
파일: cancel3.c 프로젝트: MartinLidh/tddc78
int main( int argc, char **argv )
{
    MPI_Request r1;
    int         size, rank;
    int         err = 0;
    int         partner, buf[10], flag, idx, index;
    MPI_Status  status;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    
    if (size < 2) {
	printf( "Cancel test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    /* 
     * Here is the test.  First, we ensure an unsatisfied Irecv:
     *       process 0             process size-1
     *       Sendrecv              Sendrecv
     *       Irecv                    ----
     *       Cancel                   ----
     *       Sendrecv              Sendrecv
     * Next, we confirm receipt before canceling
     *       Irecv                 Send
     *       Sendrecv              Sendrecv
     *       Cancel
     */
    if (rank == 0) {
	partner = size - 1;
	/* Cancel succeeds for wait/waitall */
	MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Wait( &r1, &status );
	MPI_Test_cancelled( &status, &flag ); 
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++; 
	    printf( "Cancel of a send failed where it should succeed (Wait).\n" );
	}
	MPI_Request_free( &r1 ); 

	/* Cancel fails for test/testall */
	buf[0] = 3;
	MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Test( &r1, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a send succeeded where it shouldn't (Test).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitany */
	MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitany( 1, &r1, &idx, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a send failed where it should succeed (Waitany).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testany */
        buf[0] = 3;
	MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testany( 1, &r1, &idx, &flag, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a send succeeded where it shouldn't (Testany).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel succeeds for waitsome */
	MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Waitsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++;
	    printf( "Cancel of a send failed where it should succeed (Waitsome).\n" );
	}
	MPI_Request_free( &r1 );

	/* Cancel fails for testsome*/
        buf[0] = 3;
	MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 );
	MPI_Start( &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Cancel( &r1 );
	MPI_Testsome( 1, &r1, &idx, &index, &status );
	MPI_Test_cancelled( &status, &flag );
	if (flag) {
	    err++;
	    printf( "Cancel of a send succeeded where it shouldn't (Testsome).\n" );
	}
	MPI_Request_free( &r1 );

	if (err) {
	    printf( "Test failed with %d errors.\n", err );
	}
	else {
	    printf( "Test passed\n" );
	}
    }
    else if (rank == size - 1) {
	partner = 0;
	/* Cancel succeeds for wait/waitall */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	/* Cancel fails for test/testall */
	buf[0] = -1;
	MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	if (buf[0] == -1) {
	    printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" );
	    }

	/* Cancel succeeds for waitany */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails  for testany */
	buf[0] = -1;
	MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (buf[0] == -1) {
	    printf( "Receive buffer did not change even though cancel should not have suceeded! (Testany).\n" );
	    }

	/* Cancel succeeds for waitsome */
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	/* Cancel fails for testsome */
	buf[0] = -1;
	MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );

	if (buf[0] == -1) {
	    printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" );
	    }

    }

    MPI_Finalize();
    return 0;
}
예제 #20
0
파일: MPI-api.c 프로젝트: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
예제 #21
0
/**
 * @brief Calls MPI_Testsome on the provided queue, to check for finished operations.
 *
 * @param[in] q queue to check
 * @param[in] me pointer to the PE
 * @param[in] finish pointer to function that will perform the appropriate send/recv
 * finish functionality
 *
 * @return 0 if MPI_Testsome did not return any finished operations, 1 otherwise.
 */
static int
test_q(
       struct act_q *q,
       tw_pe *me,
       void (*finish)(tw_pe *, tw_event *, char *))
{
  int ready, i, n;

  if (!q->cur)
    return 0;

  if (MPI_Testsome(
		   q->cur,
		   q->req_list,
		   &ready,
		   q->idx_list,
		   q->status_list) != MPI_SUCCESS) {
    tw_error(
	     TW_LOC,
	     "MPI_testsome failed with %u items in %s",
	     q->cur,
	     q->name);
  }

  if (1 > ready)
    return 0;

  for (i = 0; i < ready; i++)
    {
      tw_event *e;

      n = q->idx_list[i];
      e = q->event_list[n];
      q->event_list[n] = NULL;

      finish(me, e, NULL);
    }

  /* Collapse the lists to remove any holes we left. */
  for (i = 0, n = 0; (unsigned int)i < q->cur; i++)
  {
    if (q->event_list[i])
    {
      if (i != n)
      {
	// swap the event pointers
	  q->event_list[n] = q->event_list[i];

	// copy the request handles
	  memcpy(
	      &q->req_list[n],
	      &q->req_list[i],
	      sizeof(q->req_list[0]));

      } // endif (i != n)
      n++;
    } // endif (q->event_list[i])
  }
  q->cur -= ready;

  return 1;
}
예제 #22
0
파일: nonblocking3.c 프로젝트: R7R8/simgrid
static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[], int *outcount, int indices[])
{
    int i, idx, flag;

#define COMPLETION_CASES (8)
    switch (rand_range(rndnum, 0, COMPLETION_CASES)) {
        case 0:
            MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
            *outcount = numreqs;
            for (i = 0; i < numreqs; ++i) {
                indices[i] = i;
            }
            break;

        case 1:
            MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
            if (*outcount == MPI_UNDEFINED) {
                *outcount = 0;
            }
            break;

        case 2:
            MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
            if (*outcount == MPI_UNDEFINED) {
                *outcount = 0;
            }
            break;

        case 3:
            MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
            if (idx == MPI_UNDEFINED) {
                *outcount = 0;
            }
            else {
                *outcount = 1;
                indices[0] = idx;
            }
            break;

        case 4:
            MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
            if (idx == MPI_UNDEFINED) {
                *outcount = 0;
            }
            else {
                *outcount = 1;
                indices[0] = idx;
            }
            break;

        case 5:
            MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
            if (flag) {
                *outcount = numreqs;
                for (i = 0; i < numreqs; ++i) {
                    indices[i] = i;
                }
            }
            else {
                *outcount = 0;
            }
            break;

        case 6:
            /* select a new random index and wait on it */
            rndnum = gen_prn(rndnum);
            idx = rand_range(rndnum, 0, numreqs);
            MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
            *outcount = 1;
            indices[0] = idx;
            break;

        case 7:
            /* select a new random index and wait on it */
            rndnum = gen_prn(rndnum);
            idx = rand_range(rndnum, 0, numreqs);
            MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
            *outcount = (flag ? 1 : 0);
            indices[0] = idx;
            break;

        default:
            assert(0);
            break;
    }
#undef COMPLETION_CASES
}
예제 #23
0
/*! 
 * \brief Complete the message propagation process
 * \param[in] node address of CommQueue node
 * \return Return Code
 *
 * This routine is to be executed during the ::MB_COMM_OLD_PROPAGATION stage.
 * 
 * Steps:
 * -# if node->pending_in != 0, check receives using MPI_Testany().
 *    For each completed comm:
 *  -# Decrement node->pending_in
 *  -# Check buffer header if delayed_filtering is set
 *   - If set, run each message in buffer through node->board->filter
 *     before adding to local board
 *   - If not set, add all messages in buffer to local board
 *  -# Free node->inbuf[i]
 * 
 * -# if node->pending_out != 0, check sends using MPI_Testany().
 *    For each completed comm:
 *  -# Decrement node->pending_out
 *  -# if node->flag_shareOutbuf == ::MB_FALSE, free node->outbuf[i]
 *  -# if node->flag_shareOutbuf == ::MB_TRUE, free node->outbuf[0] if
 *     node->pending_out == 0
 *
 * -# Check if comms completed?
 *  - if node->pending_in == 0 and node->pending_out == 0
 *   -# free node->incount
 *   -# free node->inbuf
 *   -# free node->outbuf
 *   -# free node->sendreq
 *   -# free node->recvreq
 *   -# Capture node->board->syncLock
 *   -# set node->board->syncCompleted = ::MB_TRUE
 *   -# Release node->board->syncLock
 *   -# Signal node->board->syncCond
 *   -# set node->stage = ::MB_COMM_END
 *   -# return ::MB_SUCCESS_2
 *  - else
 *   -# return ::MB_SUCCESS
 * 
 * 
 * 
 * Post:
 * - if node->pending_in == 0 and node->pending_out == 0
 *  - node->incount == \c NULL
 *  - node->inbuf == \c NULL
 *  - node->outbuf == \c NULL
 *  - node->sendreq == \c NULL
 *  - node->recvreq == \c NULL
 *  - return code == MB_SUCCESS_2
 *  - node->board->syncCompleted == ::MB_TRUE
 *  - node->stage == ::MB_COMM_END
 */ 
int MBI_CommRoutine_OLD_CompletePropagation(struct MBIt_commqueue *node) {
    
    int i, m, p, rc;
    int completed;
    int filter_required;
    void *ptr_new, *msg;
    char *header_byte, *bufptr;

    assert(node->stage == MB_COMM_OLD_PROPAGATION);
    assert(node->outcount == NULL);
    assert(node->incount  != NULL);
    assert(node->sendreq  != NULL);
    assert(node->recvreq  != NULL);
    assert(node->inbuf    != NULL);
    assert(node->outbuf   != NULL);
    assert(node->board    != NULL);
    
    /* ---------- check for completed sends -------------- */
    if (node->pending_out > 0)
    {
        /* check if any of the sends completed */
        rc = MPI_Testsome(MBI_CommSize, node->sendreq, &completed,
                MBI_comm_indices, MPI_STATUSES_IGNORE);
        assert(rc == MPI_SUCCESS);
        if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        
        if (completed > 0)
        {
            /* decrement counter */
            node->pending_out -= completed;

            if (node->flag_shareOutbuf == MB_FALSE)
            {
                assert(node->flag_fdrFallback == MB_FALSE);
                
                /* free buffer of completed sends */
                for (p = 0; p < completed; p++) 
                {
                    i = MBI_comm_indices[p];
                    assert(i != MBI_CommRank);
                    assert(node->outbuf[i] != NULL);
                    assert(node->sendreq[i] == MPI_REQUEST_NULL);
                    
                    free(node->outbuf[i]);
                    node->outbuf[i] = NULL;
                    
                    P_INFO("COMM: (Board %d) send to P%d completed", (int)node->mb, i);
                }
            }
            else if (node->pending_out == 0) /* outbuf shared */
            {
                assert(node->outbuf[0] != NULL);
                assert(node->flag_fdrFallback == MB_TRUE || 
                        node->board->filter == (MBIt_filterfunc)NULL);
                
                /* free shared buffer */
                free(node->outbuf[0]);
                node->outbuf[0] = NULL;
                
                P_INFO("COMM: (Board %d) all sends completed", (int)node->mb);
            }
        }
    }
    
    /* ---------- check for completed receives -------------- */
    if (node->pending_in > 0)
    {
        /* check if any of the sends completed */
        rc = MPI_Testsome(MBI_CommSize, node->recvreq, &completed,
                MBI_comm_indices, MPI_STATUSES_IGNORE);
        assert(rc == MPI_SUCCESS);
        if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        
        if (completed > 0)
        {
            /* decrement counter */
            node->pending_in -= completed;
            
            /* for each completed receive, load messages and clear buffer */
            for (p = 0; p < completed; p++)
            {
                /* which receive completed? */
                i = MBI_comm_indices[p];
                
                assert(node->inbuf[i] != NULL);
                assert(node->recvreq[i] == MPI_REQUEST_NULL); 
                
                /* get reference to header byte */
                header_byte = (char*)(node->inbuf[i]);
                
                /* get flag indicating if filter should be run */
                filter_required = BIT_IS_SET(*header_byte, MBI_COMM_HEADERBYTE_FDR);                 
                
                P_INFO("COMM: (Board %d) receive from P%d completed", (int)node->mb, i);
                #ifdef _EXTRA_INFO
                if (filter_required)
                {
                    P_INFO("COMM: (Board %d) performing delayed filtering on messages", 
                            (int)node->mb);
                }
                #endif
                
                /* location of message buffer is after header (of size 1 byte) */
                bufptr = (char*)(node->inbuf[i]) + 1;
                
                /* for each received message */
                for (m = 0; m < node->incount[i]; m++)
                {
                    /* get pointer to message in buffer */
                    msg = (void*)(bufptr + (node->board->data->elem_size * m));
                    
                    /* do we need to run msg thru filter before storing? */
                    if (filter_required)
                    {
                        assert(node->board->filter != (MBIt_filterfunc)NULL);
                        if (0 == (*node->board->filter)(msg, MBI_CommRank))
                           continue; /* we don't want this message */
                    }
                    
                    /* add new node to local board */
                    rc = pl_newnode(node->board->data, &ptr_new);
                    assert(rc == PL_SUCCESS);
                    /* copy message into node */
                    memcpy(ptr_new, msg, node->board->data->elem_size);

                }
                
                /* we can now free the buffer */
                free(node->inbuf[i]);
                node->inbuf[i] = NULL;

            }
        }
        
        #ifdef _EXTRA_INFO
        if (node->pending_in == 0)
        {
            P_INFO("COMM: (Board %d) all receives completed", (int)node->mb);
        }
        #endif
    }
    
    /* ------------ if all comms completed, clean up and end ------------ */
    
    if (node->pending_in == 0 && node->pending_out == 0)
    {
        /* free up memory */
        free(node->incount);  node->incount = NULL;
        free(node->inbuf);    node->inbuf = NULL;
        free(node->outbuf);   node->outbuf = NULL;
        free(node->sendreq);  node->sendreq = NULL;
        free(node->recvreq);  node->recvreq = NULL;
        
        /* move cursor */
        node->board->synced_cursor = node->board->data->count_current;
        
        /* mark sync as completed */
        node->board->syncCompleted = MB_TRUE;
        
        /* move to end state and indicate that we're done */
        P_INFO("COMM: (Board %d) sync process completed", node->mb);
        node->stage = MB_COMM_END;
        return MB_SUCCESS_2; /* node can be removed from queue */
    }
    else
    {
        /* there are still pending comms. No state change  */
        return MB_SUCCESS;
    }
}
예제 #24
0
파일: main.c 프로젝트: RSE-Cambridge/IPM
int main( int argc, char* argv[] )
{
  int myrank, nprocs;
  int val, val2;
  int idx, idx2[2];
  int flag;


  MPI_Request req;
  MPI_Request req2[2];
  MPI_Status stat;

  MPI_Init( &argc, &argv );

  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
  MPI_Comm_size( MPI_COMM_WORLD, &nprocs );

  if( nprocs<2 ) {
    fprintf(stderr, "Need at least 2 procs to run this program\n");
    MPI_Abort(MPI_COMM_WORLD, 1);
    return 1;
  }

  /* MPI_STATUS_IGNORE in MPI_Recv */
  switch(myrank) {
  case 0:
    MPI_Send( &val, 1, MPI_INTEGER, 1, 33, MPI_COMM_WORLD);
    break;

  case 1:
    MPI_Recv( &val, 1, MPI_INTEGER, 0, 33, MPI_COMM_WORLD, MPI_STATUS_IGNORE );
    break;
  }

  /* MPI_STATUS_IGNORE in MPI_Wait, MPI_Test */
  switch(myrank) {
  case 0:
    MPI_Isend( &val, 1, MPI_INTEGER, 1, 34, MPI_COMM_WORLD, &req);
    MPI_Test( &req, &flag, MPI_STATUS_IGNORE );
    MPI_Wait( &req, MPI_STATUS_IGNORE );

    break;

  case 1:
    MPI_Recv( &val, 1, MPI_INTEGER, 0, 34, MPI_COMM_WORLD, &stat );
    break;
  }

  /* MPI_STATUS_IGNORE in MPI_Waitany, MPI_Testany */
  switch(myrank) {
  case 0:
    MPI_Isend( &val,  1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0]));
    MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1]));
    MPI_Testany( 2, req2, &idx, &flag, MPI_STATUS_IGNORE );
    MPI_Waitany( 2, req2, &idx, MPI_STATUS_IGNORE );
    break;

  case 1:
    MPI_Recv( &val,  1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat );
    MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat );
    break;
  }

  /* MPI_STATUSES_IGNORE in MPI_Waitall, MPI_Testall */
  switch(myrank) {
  case 0:
    MPI_Isend( &val,  1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0]));
    MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1]));
    MPI_Testall( 2, req2, &flag, MPI_STATUSES_IGNORE );
    MPI_Waitall( 2, req2, MPI_STATUSES_IGNORE );
    break;

  case 1:
    MPI_Recv( &val,  1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat );
    MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat );
    break;
  }

  /* MPI_STATUSES_IGNORE in MPI_Waitsome */
  switch(myrank) {
  case 0:
    MPI_Isend( &val,  1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0]));
    MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1]));
    MPI_Testsome( 2, req2, &idx, idx2, MPI_STATUSES_IGNORE );
    MPI_Waitsome( 2, req2, &idx, idx2, MPI_STATUSES_IGNORE );
    break;

  case 1:
    MPI_Recv( &val,  1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat );
    MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat );
    break;
  }




  MPI_Barrier(MPI_COMM_WORLD);
  fprintf(stderr, "%5d: DONE\n", myrank);

  MPI_Finalize();
}
예제 #25
0
파일: ddi_server.c 프로젝트: ryanolson/ddi
/* -------------------------------------------------------------------- *\
   DDI_Server()
   ============
   
   Called by DDI processes that specialize to become data servers.
\* -------------------------------------------------------------------- */
   void DDI_Server() {
   
   /* --------------- *\
      Local Variables
   \* --------------- */
      int from;
      char ack=57;
      char server=1;
      DDI_Patch *msg;
      DDI_Patch patch;
      size_t counter_value = 0;
      const DDI_Comm *comm = (const DDI_Comm *) Comm_find(DDI_COMM_WORLD);
 

    # ifdef CRAY_MPI
      int i;
      int nfinished =  0;
      int last      = -1;
      int size      = sizeof(DDI_Patch);
      index_ds = (int *) Malloc(comm->np*sizeof(int));
      p = (DDI_Patch *) Malloc(comm->np*sizeof(DDI_Patch));
      s = (MPI_Status *) Malloc(comm->np*sizeof(MPI_Status));
      r = (MPI_Request *) Malloc(comm->np*sizeof(MPI_Request));

   /* ----------------------------------------------------------- *\
      Post IRecvs for remote data requests from all the processes
   \* ----------------------------------------------------------- */
      DEBUG_OUT(LVL2,(stdout,"%s: (DS) Posting MPI_IRecvs for data requests.\n",DDI_Id()))
      for(i=0; i<comm->np; i++) {
         MPI_Irecv(&p[i],size,MPI_BYTE,i,37,comm->world_comm,&r[i]);
      }
      NRequests = comm->np;
    # endif

      DEBUG_OUT(LVL2,(stdout,"%s: (DS) Starting DDI data server.\n",DDI_Id()))

   /* -------------------- *\
      DDI Data Server Loop
   \* -------------------- */
      do {
 
       # ifdef CRAY_MPI
         MPI_Testsome(NRequests,r,&nfinished,index_ds,s); 
         for(i=0; i<nfinished; i++) {
            msg = &p[index_ds[i]];
            from  = s[i].MPI_SOURCE;
       # else
         DDI_Recv_request(&patch,&from);
         msg = (DDI_Patch *) &patch;
       # endif
   
         switch(msg->oper) {

           case DDI_DEBUGFLAG:
              DebugOutput(msg->handle);
              break;

   
           case DDI_MEMORY:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_MEMORY request.\n",DDI_Id()))
              DDI_Memory_server(msg->size);
              Comm_send(&ack,1,from,comm);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) DDI_MEMORY requested completed.\n",DDI_Id()))
              break;
   
   
           case DDI_CREATE:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_CREATE[%i] request.\n",DDI_Id(),msg->handle))
              DDI_Index_create(msg);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Array[%i] created successfully.\n",DDI_Id(),msg->handle))
              break;
   
   
           case DDI_DESTROY:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_DESTROY[%i] request.\n",DDI_Id(),msg->handle))
              DDI_Index_destroy(msg); 
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Array[%i] destroyed successfully.\n",DDI_Id(),msg->handle))
              break;
   
   
           case DDI_ZERO:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_ZERO request from %i.\n",DDI_Id(),from))
              DDI_Array_zero(msg->handle);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Finished DDI_ZERO request from %i.\n",DDI_Id(),from))
              break;
   
   
           case DDI_GET:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_GET request from %i.\n",DDI_Id(),from))
              DDI_Get_server(msg,from);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Finished DDI_GET request from %i.\n",DDI_Id(),from))
              break;
   
           
           case DDI_PUT:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_PUT request from %i.\n",DDI_Id(),from))
              DDI_Put_server(msg,from);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Finished DDI_PUT request from %i.\n",DDI_Id(),from))
              break;
   
   
           case DDI_ACC:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_ACC request from %i.\n",DDI_Id(),from))
              DDI_Acc_server(msg,from);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Finished DDI_ACC request from %i.\n",DDI_Id(),from))
              break;
              
              
           case DDI_GETACC:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_GETACC request from %i.\n",DDI_Id(),from))
              DDI_GetAcc_server(msg,from);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Finished DDI_GETACC request from %i.\n",DDI_Id(),from))
              break;
              
              
           case DDI_DLBRESET:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_DLBRESET request from %i.\n",DDI_Id(),from))
              DDI_DLBReset_local();
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Finished DDI_DLBRESET request from %i.\n",DDI_Id(),from))
              break;
              break;
   
    
           case DDI_DLBNEXT:
              DEBUG_OUT(LVL2,(stdout,"%s: (DS) Received DDI_DLBNEXT request from %i.\n",DDI_Id(),from))
              DDI_DLBNext_local(&counter_value);
              Comm_send(&counter_value,sizeof(size_t),from,comm);
              DEBUG_OUT(LVL3,(stdout,"%s: (DS) Finished DDI_DLBNEXT request from %i.\n",DDI_Id(),from))
              break;


           case DDI_GDLBRESET:
              DDI_GDLBReset_local();
              break;
      
      
           case DDI_GDLBNEXT: 
              DDI_GDLBNext_local(&counter_value);
              DDI_Send(&counter_value,sizeof(size_t),from);
              break;

   
           case DDI_QUIT: /* Quit server loop, synchronize, and exit */
             DEBUG_OUT(LVL3,(stdout,"%s: (DS) Received DDI_QUIT request\n",DDI_Id()))
          /* if(me == np) DB_Close(); */
             DDI_Memory_finalize(); 
             Comm_send(&ack,1,from,comm);
             server=0;
             break;

   
          /* --------------------------------------------- *\
             Clean-up distributed-memory & check for leaks
          \* --------------------------------------------- */
/*
           case DB_CREATE_ENTRY:
             DEBUG_OUT(LVL3,(stdout,"%s: Recieved DB_CREATE_ENTRY request.\n",DDI_Id()))
             if(me != np) {
                fprintf(stdout,"%s: recieved DB request but is not master data server.\n",DDI_Id());
                Fatal_error(911);
             }
             DB_Create_server(&msg,from);
             DEBUG_OUT(LVL3,(stdout,"%s: Finished DB_CREATE_ENTRY request.\n",DDI_Id()))
             break;

           case DB_READ_ENTRY:
             DEBUG_OUT(LVL3,(stdout,"%s: Recieved DB_READ_ENTRY request.\n",DDI_Id()))
             if(me != np) {
                fprintf(stdout,"%s: recieved DB request but is not master data server.\n",DDI_Id());
                Fatal_error(911);
             }
             DB_Read_server(&msg,from);
             DEBUG_OUT(LVL3,(stdout,"%s: Finished DB_READ_ENTRY request.\n",DDI_Id()))
             break;

           case DB_WRITE_ENTRY:
             DEBUG_OUT(LVL3,(stdout,"%s: Recieved DB_WRITE_ENTRY request.\n",DDI_Id()))
             if(me != np) {
                fprintf(stdout,"%s: recieved DB request but is not master data server.\n",DDI_Id());
                Fatal_error(911);
             }
             DB_Write_server(&msg,from);
             DEBUG_OUT(LVL3,(stdout,"%s: Finished DB_WRITE_ENTRY request.\n",DDI_Id()))
             break;
*/
         }

     # ifdef CRAY_MPI
       /* ----------------------------------------------------- *\
          Repost the asynchronus IRecv for remote data requests
       \* ----------------------------------------------------- */
          MPI_Irecv(&p[index_ds[i]],size,MPI_BYTE,from,37,comm->world_comm,&r[index_ds[i]]);
       }
     # endif

     } while(server);


   /* -------------------------------------------------------------------------- *\
      If using MPI and not TCP socekts -- cancel/release the persistent receives
   \* -------------------------------------------------------------------------- */
    # if defined DDI_MPI && !defined DDI_SOC

      /* Working on this bit */

    # endif


   /* ------------------------------- *\
      Finalize communication and exit
   \* ------------------------------- */
      DDI_Finalize();
      exit(0);
   }