FC_FUNC(mpi_waitsome, MPI_WAITSOME) (int * incount, int * array_of_requests, int * outcount, int * array_of_indices, int *array_of_statuses, int * ierr) { *ierr = MPI_Waitsome(*incount, array_of_requests, outcount, array_of_indices, mpi_c_statuses(array_of_statuses)); }
int SAMRAI_MPI::Waitsome( int incount, Request* array_of_requests, int* outcount, int* array_of_indices, Status* array_of_statuses) { #ifndef HAVE_MPI NULL_USE(incount); NULL_USE(array_of_requests); NULL_USE(outcount); NULL_USE(array_of_indices); NULL_USE(array_of_statuses); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Waitsome is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Waitsome(incount, array_of_requests, outcount, array_of_indices, array_of_statuses); } #endif return rval; }
SEXP spmd_waitsome(SEXP R_count){ int countn = INTEGER(R_count)[0]; SEXP R_indices; PROTECT(R_indices = allocVector(INTSXP, countn + 1)); spmd_errhandler( MPI_Waitsome(countn, request, &INTEGER(R_indices)[0], &INTEGER(R_indices)[1], status)); UNPROTECT(1); return(R_indices); } /* End of spmd_waitsome(). */
int main(int argc, char *argv[]) { int provided; MPI_Request request; int outcount = -1; int indices[1] = {-1}; MPI_Status status; char *env; env = getenv("MPITEST_VERBOSE"); if (env) { if (*env != '0') verbose = 1; } MPI_Init_thread( &argc, &argv, MPI_THREAD_MULTIPLE, &provided); if (provided != MPI_THREAD_MULTIPLE) { printf( "This test requires MPI_THREAD_MULTIPLE\n" ); MPI_Abort( MPI_COMM_WORLD, 1 ); } IF_VERBOSE(("Post Init ...\n")); MPI_Grequest_start(query_fn, free_fn, cancel_fn, NULL, &request); grequest = request; /* copy the handle */ MTest_Start_thread(do_work, &grequest); IF_VERBOSE(("Waiting ...\n")); MPI_Wait(&request, &status); MTest_Join_threads(); MPI_Grequest_start(query_fn, free_fn, cancel_fn, NULL, &request); grequest = request; /* copy the handle */ MTest_Start_thread(do_work, &grequest); IF_VERBOSE(("Waiting ...\n")); MPI_Waitsome(1, &request, &outcount, indices, &status); MTest_Join_threads(); MPI_Grequest_start(query_fn, free_fn, cancel_fn, NULL, &request); grequest = request; /* copy the handle */ MTest_Start_thread(do_work, &grequest); IF_VERBOSE(("Waiting ...\n")); MPI_Waitall(1, &request, &status); MTest_Join_threads(); IF_VERBOSE(("Goodbye !!!\n")); MTest_Finalize(0); MPI_Finalize(); return 0; }
FORT_DLL_SPEC void FORT_CALL mpi_waitsome_ ( MPI_Fint *v1, MPI_Fint v2[], MPI_Fint *v3, MPI_Fint v4[], MPI_Fint v5[], MPI_Fint *ierr ){ #ifndef HAVE_MPI_F_INIT_WORKS_WITH_C if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; } #endif if (v5 == MPI_F_STATUSES_IGNORE) { v5 = (MPI_Fint *)MPI_STATUSES_IGNORE; } *ierr = MPI_Waitsome( (int)*v1, (MPI_Request *)(v2), v3, v4, (MPI_Status *)v5 ); {int li; for (li=0; li<*v3; li++) { if (v4[li] >= 0) v4[li] += 1; } } }
void AsyncAcks::wait_and_cleanup() { //if no requests, then nothing to do if (next_slot_ == 0) return; //check for satisfied requests int outcount; int array_of_indices[next_slot_]; SIPMPIUtils::check_err( MPI_Waitsome(next_slot_, posted_async_, &outcount, array_of_indices, MPI_STATUSES_IGNORE)); // std::cout << " in wait_and_cleanup, "<< outcount << " acks released" << std::endl << std::flush; remove_completed_requests(outcount, array_of_indices); }
JNIEXPORT jobjectArray JNICALL Java_mpi_Request_waitSomeStatus( JNIEnv *env, jclass clazz, jlongArray requests) { int incount = (*env)->GetArrayLength(env, requests); jlong* jReq; MPI_Request *cReq; ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq); MPI_Status *statuses = (MPI_Status*)calloc(incount, sizeof(MPI_Status)); int *indices = (int*)calloc(incount, sizeof(int)); int outcount; int rc = MPI_Waitsome(incount, cReq, &outcount, indices, statuses); ompi_java_exceptionCheck(env, rc); ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq); jobjectArray jStatuses = newStatusesIndices(env, statuses, indices, outcount); free(statuses); free(indices); return jStatuses; }
JNIEXPORT jintArray JNICALL Java_mpi_Request_waitSome( JNIEnv *env, jclass clazz, jlongArray requests) { int incount = (*env)->GetArrayLength(env, requests); jlong* jReq; MPI_Request *cReq; ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq); int *indices = (int*)calloc(incount, sizeof(int)); int outcount; int rc = MPI_Waitsome(incount, cReq, &outcount, indices, MPI_STATUSES_IGNORE); ompi_java_exceptionCheck(env, rc); ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq); jintArray jindices = NULL; if(outcount != MPI_UNDEFINED) { jindices = (*env)->NewIntArray(env, outcount); setIndices(env, jindices, indices, outcount); } free(indices); return jindices; }
int main (int argc, char **argv) { int nprocs = -1; int rank = -1; MPI_Comm comm = MPI_COMM_WORLD; char processor_name[128]; int namelen = 128; int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES]; int buf[BUF_SIZE * 2 * NUM_SEND_TYPES]; int i, j, k, at_size, send_t_number, index, outcount, total, flag; int num_errors, error_count, indices[2 * NUM_SEND_TYPES]; MPI_Request aReq[2 * NUM_SEND_TYPES]; MPI_Status aStatus[2 * NUM_SEND_TYPES]; /* init */ MPI_Init (&argc, &argv); MPI_Comm_size (comm, &nprocs); MPI_Comm_rank (comm, &rank); MPI_Get_processor_name (processor_name, &namelen); printf ("(%d) is alive on %s\n", rank, processor_name); fflush (stdout); MPI_Buffer_attach (bbuf, sizeof(int) * (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES); if (rank == 0) { /* set up persistent sends... */ send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES; MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); } for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) { if (rank == 0) { /* initialize all of the send buffers */ for (j = 0; j < NUM_SEND_TYPES; j++) { for (i = 0; i < BUF_SIZE; i++) { buf[2 * j * BUF_SIZE + i] = i; buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i; } } } else if (rank == 1) { /* zero out all of the receive buffers */ bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES); } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { /* set up transient sends... */ send_t_number = 0; MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; /* Barrier to ensure receives are posted for rsends... */ MPI_Barrier(MPI_COMM_WORLD); MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); /* just to be paranoid */ send_t_number++; assert (send_t_number == NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES); /* start the persistent sends... */ if (k % 2) { MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2, &aReq[2 * send_t_number]); } else { for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) { MPI_Start (&aReq[2 * send_t_number + j]); } } /* NOTE: Changing the send buffer of a Bsend is NOT an error... */ for (j = 0; j < NUM_SEND_TYPES; j++) { /* muck the buffers */ buf[j * 2 * BUF_SIZE + (BUF_SIZE >> 1)] = BUF_SIZE; } printf ("USER MSG: 6 change send buffer errors in iteration #%d:\n", k); /* complete the sends */ switch (k/2) { case 0: /* use MPI_Wait */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { MPI_Wait (&aReq[j], &aStatus[j]); } break; case 1: /* use MPI_Waitall */ MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus); break; case 2: /* use MPI_Waitany */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus); } break; case 3: /* use MPI_Waitsome */ total = 0; while (total < NUM_SEND_TYPES * 2) { MPI_Waitsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus); total += outcount; } break; case 4: /* use MPI_Test */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { flag = 0; while (!flag) { MPI_Test (&aReq[j], &flag, &aStatus[j]); } } break; case 5: /* use MPI_Testall */ flag = 0; while (!flag) { MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus); } break; case 6: /* use MPI_Testany */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { flag = 0; while (!flag) { MPI_Testany (NUM_SEND_TYPES * 2, aReq, &index, &flag, aStatus); } } break; case 7: /* use MPI_Testsome */ total = 0; while (total < NUM_SEND_TYPES * 2) { outcount = 0; while (!outcount) { MPI_Testsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus); } total += outcount; } break; default: assert (0); break; } } else if (rank == 1) {
static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[], int *outcount, int indices[]) { int i, idx, flag; #define COMPLETION_CASES (8) switch (rand_range(rndnum, 0, COMPLETION_CASES)) { case 0: MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE); *outcount = numreqs; for (i = 0; i < numreqs; ++i) { indices[i] = i; } break; case 1: MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE); if (*outcount == MPI_UNDEFINED) { *outcount = 0; } break; case 2: MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE); if (*outcount == MPI_UNDEFINED) { *outcount = 0; } break; case 3: MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE); if (idx == MPI_UNDEFINED) { *outcount = 0; } else { *outcount = 1; indices[0] = idx; } break; case 4: MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE); if (idx == MPI_UNDEFINED) { *outcount = 0; } else { *outcount = 1; indices[0] = idx; } break; case 5: MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE); if (flag) { *outcount = numreqs; for (i = 0; i < numreqs; ++i) { indices[i] = i; } } else { *outcount = 0; } break; case 6: /* select a new random index and wait on it */ rndnum = gen_prn(rndnum); idx = rand_range(rndnum, 0, numreqs); MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE); *outcount = 1; indices[0] = idx; break; case 7: /* select a new random index and wait on it */ rndnum = gen_prn(rndnum); idx = rand_range(rndnum, 0, numreqs); MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE); *outcount = (flag ? 1 : 0); indices[0] = idx; break; default: assert(0); break; } #undef COMPLETION_CASES }
int main(int argc, char **argv) { int errs = 0; MPI_Status status, *status_array = 0; int count = 0, flag, idx, rc, errlen, *indices=0, outcnt; MPI_Request *reqs = 0; char errmsg[MPI_MAX_ERROR_STRING]; MTest_Init(&argc, &argv); MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN ); rc = MPI_Testall( count, reqs, &flag, status_array ); if (rc != MPI_SUCCESS) { MPI_Error_string( rc, errmsg, &errlen ); printf( "MPI_Testall returned failure: %s\n", errmsg ); errs ++; } else if (!flag) { printf( "MPI_Testall( 0, ... ) did not return a true flag\n") ; errs++; } rc = MPI_Waitall( count, reqs, status_array ); if (rc != MPI_SUCCESS) { MPI_Error_string( rc, errmsg, &errlen ); printf( "MPI_Waitall returned failure: %s\n", errmsg ); errs ++; } rc = MPI_Testany( count, reqs, &idx, &flag, &status ); if (rc != MPI_SUCCESS) { MPI_Error_string( rc, errmsg, &errlen ); printf( "MPI_Testany returned failure: %s\n", errmsg ); errs ++; } else if (!flag) { printf( "MPI_Testany( 0, ... ) did not return a true flag\n") ; errs++; } rc = MPI_Waitany( count, reqs, &idx, &status ); if (rc != MPI_SUCCESS) { MPI_Error_string( rc, errmsg, &errlen ); printf( "MPI_Waitany returned failure: %s\n", errmsg ); errs ++; } rc = MPI_Testsome( count, reqs, &outcnt, indices, status_array ); if (rc != MPI_SUCCESS) { MPI_Error_string( rc, errmsg, &errlen ); printf( "MPI_Testsome returned failure: %s\n", errmsg ); errs ++; } rc = MPI_Waitsome( count, reqs, &outcnt, indices, status_array ); if (rc != MPI_SUCCESS) { MPI_Error_string( rc, errmsg, &errlen ); printf( "MPI_Waitsome returned failure: %s\n", errmsg ); errs ++; } MTest_Finalize( errs ); MPI_Finalize(); return 0; }
int main( int argc, char *argv[] ) { int errs = 0; MPI_Comm comm; MPI_Request r[2]; MPI_Status s[2]; int indices[2], outcount; int errval, errclass; int b1[20], b2[20], rank, size, src, dest, i, j; MTest_Init( &argc, &argv ); /* Create some receive requests. tags 0-9 will succeed, tags 10-19 will be used for ERR_TRUNCATE (fewer than 20 messages will be used) */ comm = MPI_COMM_WORLD; MPI_Comm_rank( comm, &rank ); MPI_Comm_size( comm, &size ); src = 1; dest = 0; if (rank == dest) { MPI_Errhandler_set( comm, MPI_ERRORS_RETURN ); errval = MPI_Irecv( b1, 10, MPI_INT, src, 0, comm, &r[0] ); if (errval) { errs++; MTestPrintError( errval ); printf( "Error returned from Irecv\n" ); } errval = MPI_Irecv( b2, 10, MPI_INT, src, 10, comm, &r[1] ); if (errval) { errs++; MTestPrintError( errval ); printf( "Error returned from Irecv\n" ); } /* synchronize */ errval = MPI_Recv(NULL, 0, MPI_INT, src, 10, comm, MPI_STATUS_IGNORE); if (errval) { errs++; MTestPrintError( errval ); printf( "Error returned from Recv\n" ); } for (i=0; i<2; i++) { s[i].MPI_ERROR = -1; } errval = MPI_Waitsome( 2, r, &outcount, indices, s ); MPI_Error_class( errval, &errclass ); if (errclass != MPI_ERR_IN_STATUS) { errs++; printf( "Did not get ERR_IN_STATUS in Waitsome. Got %d.\n", errval ); } else if (outcount != 2) { errs++; printf( "Wait returned outcount = %d\n", outcount ); } else { /* Check for success */ for (i=0; i<outcount; i++) { j = i; /* Indices is the request index */ if (s[j].MPI_TAG < 10 && s[j].MPI_ERROR != MPI_SUCCESS) { errs++; printf( "correct msg had error class %d\n", s[j].MPI_ERROR ); } else if (s[j].MPI_TAG >= 10 && s[j].MPI_ERROR == MPI_SUCCESS) { errs++; printf( "truncated msg had MPI_SUCCESS\n" ); } } } } else if (rank == src) { /* Send test messages, then send another message so that the test does not start until we are sure that the sends have begun */ MPI_Send( b1, 10, MPI_INT, dest, 0, comm ); MPI_Send( b2, 11, MPI_INT, dest, 10, comm ); /* synchronize */ MPI_Ssend( NULL, 0, MPI_INT, dest, 10, comm ); } MTest_Finalize( errs ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { MPI_Request r1; int size, rank; int err = 0; int partner, buf[10], flag, idx, index; MPI_Status status; MPI_Init( &argc, &argv ); MPI_Comm_size( MPI_COMM_WORLD, &size ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (size < 2) { printf( "Cancel test requires at least 2 processes\n" ); MPI_Abort( MPI_COMM_WORLD, 1 ); } /* * Here is the test. First, we ensure an unsatisfied Irecv: * process 0 process size-1 * Sendrecv Sendrecv * Irecv ---- * Cancel ---- * Sendrecv Sendrecv * Next, we confirm receipt before canceling * Irecv Send * Sendrecv Sendrecv * Cancel */ if (rank == 0) { partner = size - 1; /* Cancel succeeds for wait/waitall */ MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Wait( &r1, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a receive failed where it should succeed (Wait).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for test/testall */ buf[0] = -1; MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Test( &r1, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a receive succeeded where it shouldn't (Test).\n" ); if (buf[0] != -1) { printf( "Receive buffer changed even though cancel suceeded! (Test).\n" ); } } MPI_Request_free( &r1 ); /* Cancel succeeds for waitany */ MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitany( 1, &r1, &idx, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a receive failed where it should succeed (Waitany).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testany */ buf[0] = -1; MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testany( 1, &r1, &idx, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a receive succeeded where it shouldn't (Testany).\n" ); if (buf[0] != -1) { printf( "Receive buffer changed even though cancel suceeded! (Test).\n" ); } } MPI_Request_free( &r1 ); /* Cancel succeeds for waitsome */ MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a receive failed where it should succeed (Waitsome).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testsome*/ buf[0] = -1; MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a receive succeeded where it shouldn't (Testsome).\n" ); if (buf[0] != -1) { printf( "Receive buffer changed even though cancel suceeded! (Testsome).\n" ); } } MPI_Request_free( &r1 ); if (err) { printf( "Test failed with %d errors.\n", err ); } else { printf( " No Errors\n" ); } } else if (rank == size - 1) { partner = 0; /* Cancel succeeds for wait/waitall */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for test/testall */ buf[0] = 3; MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel succeeds for waitany */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for testany */ MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel succeeds for waitsome */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for waitsome */ MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Next test - check that a cancel for a request receive from MPI_PROC_NULL succeeds (there is some suspicion that some systems can't handle this - also, MPI_REQUEST_NULL */ /* A null request is an error. (null objects are errors unless otherwise allowed) r1 = MPI_REQUEST_NULL; MPI_Cancel( &r1 ); */ MPI_Recv_init( buf, 10, MPI_INT, MPI_PROC_NULL, 0, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Request_free( &r1 ); /* Must complete cancel. We know that it won't complete, so we don't need to do anything else */ } MPI_Finalize(); return 0; }
// **************************************************************************** // Method: avtGTCFileFormat::CommunicateData // // Purpose: // Communicate the data to the owning processors via non blocking send/recv. // // Arguments: // dim Dimension of the variable // shareMatrix Matrix with share data. // array Array of dynamic arrays for storing bined data. // var Input data variables. // ids IDs for the variables. // myVarsPtr Upon exit, this points to the next location in vars where data // can be stored. // myIdsPtr Upon exit, this points to the next location in vars where data // can be stored. // // Programmer: Dave Pugmire // Creation: Mon Dec 3 16:17:45 EST 2007 // // Modifications: // // Dave Pugmire, Thu Dec 20 16:23:48 EST 2007 // Use MPI_Waitsome instead of MPI_Waitall. Process things as they become ready. // // Jeremy Meredith, Thu Aug 7 13:58:44 EDT 2008 // MPI_Request does not have a well-specified type by the MPI spec. // I removed its use from within a printf, since it was debugging code. // // **************************************************************************** void avtGTCFileFormat::CommunicateData( int dim, int *shareMatrix, parallelBuffer **array, float **myVarsPtr, float **myIdsPtr ) { int err; // Everyone knows what to expect now. Do the send/recvs via nonblocking send/recv. std::vector<MPI_Request> requests; std::vector<int> requestRank; int numSends = 0; for ( int i = 0; i < nProcs; i++ ) { MPI_Request req; int sz = (dim+1) * array[i]->Size(); if ( sz == 0 ) continue; err = MPI_Isend( array[i]->Get(0), sz, MPI_FLOAT, i, rank, VISIT_MPI_COMM, &req ); if ( err != MPI_SUCCESS ) EXCEPTION1(InvalidDBTypeException, "GTC Reader: MPI_Isend() failure." ); char str[512]; sprintf( str, "%d: sending to %d [%d]\n", rank, i, sz ); debug5 << str; requests.push_back( req ); requestRank.push_back( i ); numSends++; } //Do the recvs. float **bufs = new float*[nProcs]; float *myVars = *myVarsPtr, *myIds = *myIdsPtr; int numRecvs = 0; for ( int i = 0; i < nProcs; i++ ) { int sz = (dim+1) * shareMatrix[i*nProcs + rank]; bufs[i] = NULL; if ( i == rank || sz == 0 ) continue; bufs[i] = new float[sz]; MPI_Request req; err = MPI_Irecv( bufs[i], sz, MPI_FLOAT, i, i, VISIT_MPI_COMM, &req ); if ( err != MPI_SUCCESS ) EXCEPTION1(InvalidDBTypeException, "GTC Reader: MPI_Irecv() failure." ); char str[512]; sprintf( str, "%d: receiving from %d [%d]\n", rank, i, sz ); debug5 << str; requests.push_back( req ); requestRank.push_back( i ); numRecvs++; } // Process the send/recvs as they complete. int numRequests = numSends+numRecvs; if ( numRequests > 0 ) { int num, nTotalReq = numRequests, *idxArray = new int[numRequests]; MPI_Status *statusArray = new MPI_Status[numRequests]; while ( numRequests > 0 ) { err = MPI_Waitsome( nTotalReq, &requests[0], &num, idxArray, statusArray ); if ( err != MPI_SUCCESS || num == MPI_UNDEFINED ) EXCEPTION1(InvalidDBTypeException, "GTC Reader: MPI_Waitany() failure." ); debug5 << "Waitsome=: " << num << endl; for ( int i = 0; i < num; i++ ) { int idx = idxArray[i]; if ( idx < numSends ) { //Nothing to do for send. } else { int src = requestRank[idx]; int cnt = shareMatrix[src*nProcs + rank]; //Copy ID, var. float *bufPtr = bufs[src]; for ( int j = 0; j < cnt; j++ ) { *myIds++ = *bufPtr++; // ID for ( int k = 0; k < dim; k++ ) *myVars++ = *bufPtr++; } delete [] bufs[src]; bufs[src] = NULL; } requests[idx] = MPI_REQUEST_NULL; } numRequests -= num; } delete [] idxArray; delete [] statusArray; } //Cleanup. delete [] bufs; *myIdsPtr = myIds; *myVarsPtr = myVars; }
/* * schedule thread will manage the xml file participate work, if there is an idle slave, we transfer * the xml file part we just get from pre-scan thread to the idle slave. * */ void* schedule(void* c_arg){ #ifdef MASTER_TIME_TEST struct timeval tim; gettimeofday(&tim,NULL); double start = tim.tv_sec + (tim.tv_usec/1000000.0); #endif MPI_Request request[3]; int indices[slaves_num],num_completed; int j; for(j=0;j<slaves_num;j++) request[j] = MPI_REQUEST_NULL; while(1){ if(!multi_read_over() || !task_queue_is_empty()){ parse_task* task = get_task(); #ifndef READ_TEST tag_info* host_tags_info = task->info; int tag_num = task->tag_num; // change the struct Text and tag_info to bytes for transferring char* bytes_of_tags_info = (char*)host_tags_info; int dest = -1; while( dest <= 0){ int i; for(i=1;i<=slaves_num;i++){ if (idle_node[i]== 1) { idle_node[i] = 0; dest = i; break; } } if(dest <= 0){ for(i=1;i<=slaves_num;i++) MPI_Irecv(&idle_node[i],1,MPI_INT,i,MSG_IDLE,MPI_COMM_WORLD,&request[i-1]); MPI_Waitsome(slaves_num,request,&num_completed,indices,MPI_STATUSES_IGNORE); } } idle_node[dest] = 0;//indicates this slave is busy now // send partial file and tag information if(bytes_of_tags_info == NULL) printf("ERROR : tags info is null \n"); MPI_Ssend(bytes_of_tags_info,tag_num*sizeof(tag_info),MPI_CHAR,dest,MSG_SEND_TAG_INFO,MPI_COMM_WORLD); free(task); // printf("%d,%d\n",multi_read_over(),task_queue_is_empty()); #endif }else{ #ifdef MASTER_TIME_TEST gettimeofday(&tim,NULL); double stop = tim.tv_sec + (tim.tv_usec/1000000.0); double dur = stop - start; printf(" master time report\n%ld--(%f,%f,%f)\n",task_counter,start,stop,dur); #endif #ifdef QUERY_TEST printf(" please input a tag name: \n"); char s[31]; scanf("%30s",s); int sum_count = 0; if(strlen(s)!=0){ array* condition = (array*)malloc(sizeof(array)); array_init(condition); parse_query(s,condition); array** results = (array*)malloc(sizeof(array*)*(condition->size)); for(j=0;j<condition->size;j++){ char* s = ((char**)(condition->data))[j]; results[j] = query(s); } printf("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\n"); } #endif #ifndef QUERY_TEST int i; char end; for(i=1;i<=slaves_num;i++) MPI_Send(&end,0,MPI_CHAR,i,MSG_EXIT,MPI_COMM_WORLD); return NULL; #endif } } printf("master exit\n"); return NULL; }
static PetscErrorCode VecAssemblyEnd_MPI_BTS(Vec X) { Vec_MPI *x = (Vec_MPI*)X->data; PetscInt bs = X->map->bs; PetscMPIInt npending,*some_indices,r; MPI_Status *some_statuses; PetscScalar *xarray; PetscErrorCode ierr; VecAssemblyFrame *frame; PetscFunctionBegin; if (X->stash.donotstash) { X->stash.insertmode = NOT_SET_VALUES; X->bstash.insertmode = NOT_SET_VALUES; PetscFunctionReturn(0); } ierr = VecGetArray(X,&xarray);CHKERRQ(ierr); ierr = PetscSegBufferExtractInPlace(x->segrecvframe,&frame);CHKERRQ(ierr); ierr = PetscMalloc2(4*x->nrecvranks,&some_indices,x->use_status?4*x->nrecvranks:0,&some_statuses);CHKERRQ(ierr); for (r=0,npending=0; r<x->nrecvranks; r++) npending += frame[r].pendings + frame[r].pendingb; while (npending>0) { PetscMPIInt ndone,ii; /* Filling MPI_Status fields requires some resources from the MPI library. We skip it on the first assembly, or * when VEC_SUBSET_OFF_PROC_ENTRIES has not been set, because we could exchange exact sizes in the initial * rendezvous. When the rendezvous is elided, however, we use MPI_Status to get actual message lengths, so that * subsequent assembly can set a proper subset of the values. */ ierr = MPI_Waitsome(4*x->nrecvranks,x->recvreqs,&ndone,some_indices,x->use_status?some_statuses:MPI_STATUSES_IGNORE);CHKERRQ(ierr); for (ii=0; ii<ndone; ii++) { PetscInt i = some_indices[ii]/4,j,k; InsertMode imode = (InsertMode)x->recvhdr[i].insertmode; PetscInt *recvint; PetscScalar *recvscalar; PetscBool intmsg = (PetscBool)(some_indices[ii]%2 == 0); PetscBool blockmsg = (PetscBool)((some_indices[ii]%4)/2 == 1); npending--; if (!blockmsg) { /* Scalar stash */ PetscMPIInt count; if (--frame[i].pendings > 0) continue; if (x->use_status) { ierr = MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);CHKERRQ(ierr); } else count = x->recvhdr[i].count; for (j=0,recvint=frame[i].ints,recvscalar=frame[i].scalars; j<count; j++,recvint++) { PetscInt loc = *recvint - X->map->rstart; if (*recvint < X->map->rstart || X->map->rend <= *recvint) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Received vector entry %D out of local range [%D,%D)]",*recvint,X->map->rstart,X->map->rend); switch (imode) { case ADD_VALUES: xarray[loc] += *recvscalar++; break; case INSERT_VALUES: xarray[loc] = *recvscalar++; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode); } } } else { /* Block stash */ PetscMPIInt count; if (--frame[i].pendingb > 0) continue; if (x->use_status) { ierr = MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);CHKERRQ(ierr); if (!intmsg) count /= bs; /* Convert from number of scalars to number of blocks */ } else count = x->recvhdr[i].bcount; for (j=0,recvint=frame[i].intb,recvscalar=frame[i].scalarb; j<count; j++,recvint++) { PetscInt loc = (*recvint)*bs - X->map->rstart; switch (imode) { case ADD_VALUES: for (k=loc; k<loc+bs; k++) xarray[k] += *recvscalar++; break; case INSERT_VALUES: for (k=loc; k<loc+bs; k++) xarray[k] = *recvscalar++; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode); } } } } } ierr = VecRestoreArray(X,&xarray);CHKERRQ(ierr); ierr = MPI_Waitall(4*x->nsendranks,x->sendreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr); ierr = PetscFree2(some_indices,some_statuses);CHKERRQ(ierr); if (x->assembly_subset) { void *dummy; /* reset segbuffers */ ierr = PetscSegBufferExtractInPlace(x->segrecvint,&dummy);CHKERRQ(ierr); ierr = PetscSegBufferExtractInPlace(x->segrecvscalar,&dummy);CHKERRQ(ierr); } else { ierr = VecAssemblyReset_MPI(X);CHKERRQ(ierr); } X->stash.insertmode = NOT_SET_VALUES; X->bstash.insertmode = NOT_SET_VALUES; ierr = VecStashScatterEnd_Private(&X->stash);CHKERRQ(ierr); ierr = VecStashScatterEnd_Private(&X->bstash);CHKERRQ(ierr); PetscFunctionReturn(0); }
static void test_pair (void) { int prev, next, count, tag, index, i, outcount, indices[2]; int rank, size, flag, ierr, reqcount; double send_buf[TEST_SIZE], recv_buf[TEST_SIZE]; double buffered_send_buf[TEST_SIZE * 2 + MPI_BSEND_OVERHEAD]; /* factor of two is based on guessing - only dynamic allocation would be safe */ void *buffer; MPI_Status statuses[2]; MPI_Status status; MPI_Request requests[2]; MPI_Comm dupcom, intercom; #ifdef V_T struct _VT_FuncFrameHandle { char *name; int func; int frame; }; typedef struct _VT_FuncFrameHandle VT_FuncFrameHandle_t; VT_FuncFrameHandle_t normal_sends, buffered_sends, buffered_persistent_sends, ready_sends, sync_sends, nblock_sends, nblock_rsends, nblock_ssends, pers_sends, pers_rsends, pers_ssends, sendrecv, sendrecv_repl, intercomm; int classid; VT_classdef( "Application:test_pair", &classid ); #define VT_REGION_DEF( _name, _nameframe, _class ) \ (_nameframe).name=_name; \ VT_funcdef( (_nameframe).name, _class, &((_nameframe).func) ); #define VT_BEGIN_REGION( _nameframe ) \ LOCDEF(); \ VT_begin( (_nameframe).func ) #define VT_END_REGION( _nameframe ) \ LOCDEF(); VT_end( (_nameframe).func ) #else #define VT_REGION_DEF( _name, _nameframe, _class ) #define VT_BEGIN_REGION( _nameframe ) #define VT_END_REGION( _nameframe ) #endif ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); ierr = MPI_Comm_size(MPI_COMM_WORLD, &size); if ( size < 2 ) { if ( rank == 0 ) { printf("Program needs to be run on at least 2 processes.\n"); } ierr = MPI_Abort( MPI_COMM_WORLD, 66 ); } ierr = MPI_Comm_dup(MPI_COMM_WORLD, &dupcom); if ( rank >= 2 ) { /* printf( "%d Calling finalize.\n", rank ); */ ierr = MPI_Finalize( ); exit(0); } next = rank + 1; if (next >= 2) next = 0; prev = rank - 1; if (prev < 0) prev = 1; VT_REGION_DEF( "Normal_Sends", normal_sends, classid ); VT_REGION_DEF( "Buffered_Sends", buffered_sends, classid ); VT_REGION_DEF( "Buffered_Persistent_Sends", buffered_persistent_sends, classid ); VT_REGION_DEF( "Ready_Sends", ready_sends, classid ); VT_REGION_DEF( "Sync_Sends", sync_sends, classid ); VT_REGION_DEF( "nblock_Sends", nblock_sends, classid ); VT_REGION_DEF( "nblock_RSends", nblock_rsends, classid ); VT_REGION_DEF( "nblock_SSends", nblock_ssends, classid ); VT_REGION_DEF( "Pers_Sends", pers_sends, classid ); VT_REGION_DEF( "Pers_RSends", pers_rsends, classid ); VT_REGION_DEF( "Pers_SSends", pers_ssends, classid ); VT_REGION_DEF( "SendRecv", sendrecv, classid ); VT_REGION_DEF( "SendRevc_Repl", sendrecv_repl, classid ); VT_REGION_DEF( "InterComm", intercomm, classid ); /* * Normal sends */ VT_BEGIN_REGION( normal_sends ); if (rank == 0) printf ("Send\n"); tag = 0x100; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Send(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv"); } else { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( normal_sends ); /* * Buffered sends */ VT_BEGIN_REGION( buffered_sends ); if (rank == 0) printf ("Buffered Send\n"); tag = 138; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf)); MPI_Bsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); MPI_Buffer_detach(&buffer, &size); if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) { printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf)); MPI_Abort(MPI_COMM_WORLD, 201); } MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv"); } else { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( buffered_sends ); /* * Buffered sends */ VT_BEGIN_REGION( buffered_persistent_sends ); if (rank == 0) printf ("Buffered Persistent Send\n"); tag = 238; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf)); MPI_Bsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Start(requests); MPI_Wait(requests, statuses); MPI_Request_free(requests); MPI_Buffer_detach(&buffer, &size); if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) { printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf)); MPI_Abort(MPI_COMM_WORLD, 201); } MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv"); } else { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( buffered_persistent_sends ); /* * Ready sends. Note that we must insure that the receive is posted * before the rsend; this requires using Irecv. */ VT_BEGIN_REGION( ready_sends ); if (rank == 0) printf ("Rsend\n"); tag = 1456; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Recv(MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD, &status); MPI_Rsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); MPI_Probe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status); if (status.MPI_SOURCE != prev) printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE); if (status.MPI_TAG != tag) printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG); MPI_Get_count(&status, MPI_DOUBLE, &i); if (i != count) printf ("Incorrect count, expected %d, got %d\n",count,i); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "rsend and recv"); } else { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); MPI_Send( MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD); MPI_Wait(requests, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "rsend and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( ready_sends ); /* * Synchronous sends */ VT_BEGIN_REGION( sync_sends ); if (rank == 0) printf ("Ssend\n"); tag = 1789; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status); if (flag) printf ("Iprobe succeeded! source %d, tag %d\n",status.MPI_SOURCE, status.MPI_TAG); MPI_Ssend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); while (!flag) MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status); if (status.MPI_SOURCE != prev) printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE); if (status.MPI_TAG != tag) printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG); MPI_Get_count(&status, MPI_DOUBLE, &i); if (i != count) printf ("Incorrect count, expected %d, got %d\n",count,i); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Ssend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( sync_sends ); /* * Nonblocking normal sends */ VT_BEGIN_REGION( nblock_sends ); if (rank == 0) printf ("Isend\n"); tag = 2123; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); init_test_data(send_buf,TEST_SIZE,0); MPI_Isend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); MPI_Waitall(2, requests, statuses); rq_check( requests, 2, "isend and irecv" ); msg_check(recv_buf,prev,tag,count,statuses, TEST_SIZE,"isend and irecv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"isend and irecv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Isend(recv_buf, count, MPI_DOUBLE, next, tag,MPI_COMM_WORLD, (requests)); MPI_Wait((requests), &status); rq_check(requests, 1, "isend (and recv)"); } VT_END_REGION( nblock_sends ); /* * Nonblocking ready sends */ VT_BEGIN_REGION( nblock_rsends ); if (rank == 0) printf ("Irsend\n"); tag = 2456; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); init_test_data(send_buf,TEST_SIZE,0); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0, MPI_BOTTOM, 0, MPI_INT, next, 0, dupcom, &status); MPI_Irsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); reqcount = 0; while (reqcount != 2) { MPI_Waitany( 2, requests, &index, statuses); if( index == 0 ) { memcpy( &status, statuses, sizeof(status) ); } reqcount++; } rq_check( requests, 1, "irsend and irecv"); msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"irsend and irecv"); } else { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0, MPI_BOTTOM, 0, MPI_INT, next, 0, dupcom, &status); flag = 0; while (!flag) MPI_Test(requests, &flag, &status); rq_check( requests, 1, "irsend and irecv (test)"); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "irsend and irecv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Irsend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Waitall(1, requests, statuses); rq_check( requests, 1, "irsend and irecv"); } VT_END_REGION( nblock_rsends ); /* * Nonblocking synchronous sends */ VT_BEGIN_REGION( nblock_ssends ); if (rank == 0) printf ("Issend\n"); tag = 2789; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests ); init_test_data(send_buf,TEST_SIZE,0); MPI_Issend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); flag = 0; while (!flag) MPI_Testall(2, requests, &flag, statuses); rq_check( requests, 2, "issend and irecv (testall)"); msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "issend and recv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "issend and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Issend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,requests); flag = 0; while (!flag) MPI_Testany(1, requests, &index, &flag, statuses); rq_check( requests, 1, "issend and recv (testany)"); } VT_END_REGION( nblock_ssends ); /* * Persistent normal sends */ VT_BEGIN_REGION( pers_sends ); if (rank == 0) printf ("Send_init\n"); tag = 3123; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); MPI_Send_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1)); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Startall(2, requests); MPI_Waitall(2, requests, statuses); msg_check( recv_buf, prev, tag, count, (statuses+1), TEST_SIZE, "persistent send/recv"); } else { MPI_Start((requests+1)); MPI_Wait((requests+1), &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "persistent send/recv"); init_test_data(send_buf,TEST_SIZE,1); MPI_Start(requests); MPI_Wait(requests, &status); } MPI_Request_free(requests); MPI_Request_free((requests+1)); VT_END_REGION( pers_sends ); /* * Persistent ready sends */ VT_BEGIN_REGION( pers_rsends ); if (rank == 0) printf ("Rsend_init\n"); tag = 3456; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); MPI_Rsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1)); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Barrier( MPI_COMM_WORLD ); MPI_Startall(2, requests); reqcount = 0; while (reqcount != 2) { MPI_Waitsome(2, requests, &outcount, indices, statuses); for (i=0; i<outcount; i++) { if (indices[i] == 1) { msg_check( recv_buf, prev, tag, count, (statuses+i), TEST_SIZE, "waitsome"); } reqcount++; } } } else { MPI_Start((requests+1)); MPI_Barrier( MPI_COMM_WORLD ); flag = 0; while (!flag) MPI_Test((requests+1), &flag, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "test"); init_test_data(send_buf,TEST_SIZE,1); MPI_Start(requests); MPI_Wait(requests, &status); } MPI_Request_free(requests); MPI_Request_free((requests+1)); VT_END_REGION( pers_rsends ); /* * Persistent synchronous sends */ VT_BEGIN_REGION( pers_ssends ); if (rank == 0) printf ("Ssend_init\n"); tag = 3789; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); MPI_Ssend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Startall(2, requests); reqcount = 0; while (reqcount != 2) { MPI_Testsome(2, requests, &outcount, indices, statuses); for (i=0; i<outcount; i++) { if (indices[i] == 0) { msg_check( recv_buf, prev, tag, count, (statuses+i), TEST_SIZE, "testsome"); } reqcount++; } } } else { MPI_Start(requests); flag = 0; while (!flag) MPI_Testany(1, requests, &index, &flag, statuses); msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "testany" ); init_test_data(send_buf,TEST_SIZE,1); MPI_Start((requests+1)); MPI_Wait((requests+1), &status); } MPI_Request_free(requests); MPI_Request_free((requests+1)); VT_END_REGION( pers_ssends ); /* * Send/receive. */ VT_BEGIN_REGION( sendrecv ); if (rank == 0) printf ("Sendrecv\n"); tag = 4123; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Sendrecv(send_buf, count, MPI_DOUBLE, next, tag, recv_buf, count, MPI_DOUBLE, prev, tag, MPI_COMM_WORLD, &status ); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "sendrecv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "recv/send"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( sendrecv ); #ifdef V_T VT_flush(); #endif /* * Send/receive replace. */ VT_BEGIN_REGION( sendrecv_repl ); if (rank == 0) printf ("Sendrecv_replace\n"); tag = 4456; count = TEST_SIZE / 3; if (rank == 0) { init_test_data(recv_buf, TEST_SIZE,0); for (i=count; i< TEST_SIZE; i++) recv_buf[i] = 0.0; MPI_Sendrecv_replace(recv_buf, count, MPI_DOUBLE, next, tag, prev, tag, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "sendrecvreplace"); } else { clear_test_data(recv_buf,TEST_SIZE); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "recv/send for replace"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( sendrecv_repl ); /* * Send/Receive via inter-communicator */ VT_BEGIN_REGION( intercomm ); MPI_Intercomm_create(MPI_COMM_SELF, 0, MPI_COMM_WORLD, next, 1, &intercom); if (rank == 0) printf ("Send via inter-communicator\n"); tag = 4018; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Send(send_buf, count, MPI_DOUBLE, 0, tag, intercom); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, intercom, &status); msg_check(recv_buf, 0, tag, count, &status, TEST_SIZE, "send and recv via inter-communicator"); } else if (rank == 1) { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, intercom, &status); msg_check( recv_buf, 0, tag, count, &status, TEST_SIZE,"send and recv via inter-communicator"); init_test_data(recv_buf,TEST_SIZE,0); MPI_Send(recv_buf, count, MPI_DOUBLE, 0, tag, intercom); } VT_END_REGION( normal_sends ); MPI_Comm_free(&intercom); MPI_Comm_free(&dupcom); }
int main( int argc, char **argv ) { MPI_Request r1; int size, rank; int err = 0; int partner, buf[10], flag, idx, index; MPI_Status status; MPI_Init( &argc, &argv ); MPI_Comm_size( MPI_COMM_WORLD, &size ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (size < 2) { printf( "Cancel test requires at least 2 processes\n" ); MPI_Abort( MPI_COMM_WORLD, 1 ); } /* * Here is the test. First, we ensure an unsatisfied Irecv: * process 0 process size-1 * Sendrecv Sendrecv * Irecv ---- * Cancel ---- * Sendrecv Sendrecv * Next, we confirm receipt before canceling * Irecv Send * Sendrecv Sendrecv * Cancel */ if (rank == 0) { partner = size - 1; /* Cancel succeeds for wait/waitall */ MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Wait( &r1, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a send failed where it should succeed (Wait).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for test/testall */ buf[0] = 3; MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Test( &r1, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a send succeeded where it shouldn't (Test).\n" ); } MPI_Request_free( &r1 ); /* Cancel succeeds for waitany */ MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitany( 1, &r1, &idx, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a send failed where it should succeed (Waitany).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testany */ buf[0] = 3; MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testany( 1, &r1, &idx, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a send succeeded where it shouldn't (Testany).\n" ); } MPI_Request_free( &r1 ); /* Cancel succeeds for waitsome */ MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a send failed where it should succeed (Waitsome).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testsome*/ buf[0] = 3; MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a send succeeded where it shouldn't (Testsome).\n" ); } MPI_Request_free( &r1 ); if (err) { printf( "Test failed with %d errors.\n", err ); } else { printf( "Test passed\n" ); } } else if (rank == size - 1) { partner = 0; /* Cancel succeeds for wait/waitall */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for test/testall */ buf[0] = -1; MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (buf[0] == -1) { printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" ); } /* Cancel succeeds for waitany */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for testany */ buf[0] = -1; MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (buf[0] == -1) { printf( "Receive buffer did not change even though cancel should not have suceeded! (Testany).\n" ); } /* Cancel succeeds for waitsome */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for testsome */ buf[0] = -1; MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (buf[0] == -1) { printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" ); } } MPI_Finalize(); return 0; }
void declareBindings (void) { /* === Point-to-point === */ void* buf; int count; MPI_Datatype datatype; int dest; int tag; MPI_Comm comm; MPI_Send (buf, count, datatype, dest, tag, comm); // L12 int source; MPI_Status status; MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15 MPI_Get_count (&status, datatype, &count); MPI_Bsend (buf, count, datatype, dest, tag, comm); MPI_Ssend (buf, count, datatype, dest, tag, comm); MPI_Rsend (buf, count, datatype, dest, tag, comm); void* buffer; int size; MPI_Buffer_attach (buffer, size); // L22 MPI_Buffer_detach (buffer, &size); MPI_Request request; MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25 MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request); MPI_Issend (buf, count, datatype, dest, tag, comm, &request); MPI_Irsend (buf, count, datatype, dest, tag, comm, &request); MPI_Irecv (buf, count, datatype, source, tag, comm, &request); MPI_Wait (&request, &status); int flag; MPI_Test (&request, &flag, &status); // L32 MPI_Request_free (&request); MPI_Request* array_of_requests; int index; MPI_Waitany (count, array_of_requests, &index, &status); // L36 MPI_Testany (count, array_of_requests, &index, &flag, &status); MPI_Status* array_of_statuses; MPI_Waitall (count, array_of_requests, array_of_statuses); // L39 MPI_Testall (count, array_of_requests, &flag, array_of_statuses); int incount; int outcount; int* array_of_indices; MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L44--45 MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L46--47 MPI_Iprobe (source, tag, comm, &flag, &status); // L48 MPI_Probe (source, tag, comm, &status); MPI_Cancel (&request); MPI_Test_cancelled (&status, &flag); MPI_Send_init (buf, count, datatype, dest, tag, comm, &request); MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Recv_init (buf, count, datatype, source, tag, comm, &request); MPI_Start (&request); MPI_Startall (count, array_of_requests); void* sendbuf; int sendcount; MPI_Datatype sendtype; int sendtag; void* recvbuf; int recvcount; MPI_Datatype recvtype; MPI_Datatype recvtag; MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm, &status); // L67--69 MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag, comm, &status); // L70--71 MPI_Datatype oldtype; MPI_Datatype newtype; MPI_Type_contiguous (count, oldtype, &newtype); // L74 int blocklength; { int stride; MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78 } { MPI_Aint stride; MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82 } int* array_of_blocklengths; { int* array_of_displacements; MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L87--88 } { MPI_Aint* array_of_displacements; MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L92--93 MPI_Datatype* array_of_types; MPI_Type_struct (count, array_of_blocklengths, array_of_displacements, array_of_types, &newtype); // L95--96 } void* location; MPI_Aint address; MPI_Address (location, &address); // L100 MPI_Aint extent; MPI_Type_extent (datatype, &extent); // L102 MPI_Type_size (datatype, &size); MPI_Aint displacement; MPI_Type_lb (datatype, &displacement); // L105 MPI_Type_ub (datatype, &displacement); MPI_Type_commit (&datatype); MPI_Type_free (&datatype); MPI_Get_elements (&status, datatype, &count); void* inbuf; void* outbuf; int outsize; int position; MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114 int insize; MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype, comm); // L116--117 MPI_Pack_size (incount, datatype, comm, &size); /* === Collectives === */ MPI_Barrier (comm); // L121 int root; MPI_Bcast (buffer, count, datatype, root, comm); // L123 MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L124--125 int* recvcounts; int* displs; MPI_Gatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm); // L128--130 MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L131--132 int* sendcounts; MPI_Scatterv (sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); // L134--135 MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L136--137 MPI_Allgatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); // L138--140 MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L141--142 int* sdispls; int* rdispls; MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm); // L145--147 MPI_Op op; MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149 #if 0 MPI_User_function function; int commute; MPI_Op_create (function, commute, &op); // L153 #endif MPI_Op_free (&op); // L155 MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm); MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm); MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm); /* === Groups, contexts, and communicators === */ MPI_Group group; MPI_Group_size (group, &size); // L162 int rank; MPI_Group_rank (group, &rank); // L164 MPI_Group group1; int n; int* ranks1; MPI_Group group2; int* ranks2; MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170 int result; MPI_Group_compare (group1, group2, &result); // L172 MPI_Group newgroup; MPI_Group_union (group1, group2, &newgroup); // L174 MPI_Group_intersection (group1, group2, &newgroup); MPI_Group_difference (group1, group2, &newgroup); int* ranks; MPI_Group_incl (group, n, ranks, &newgroup); // L178 MPI_Group_excl (group, n, ranks, &newgroup); extern int ranges[][3]; MPI_Group_range_incl (group, n, ranges, &newgroup); // L181 MPI_Group_range_excl (group, n, ranges, &newgroup); MPI_Group_free (&group); MPI_Comm_size (comm, &size); MPI_Comm_rank (comm, &rank); MPI_Comm comm1; MPI_Comm comm2; MPI_Comm_compare (comm1, comm2, &result); MPI_Comm newcomm; MPI_Comm_dup (comm, &newcomm); MPI_Comm_create (comm, group, &newcomm); int color; int key; MPI_Comm_split (comm, color, key, &newcomm); // L194 MPI_Comm_free (&comm); MPI_Comm_test_inter (comm, &flag); MPI_Comm_remote_size (comm, &size); MPI_Comm_remote_group (comm, &group); MPI_Comm local_comm; int local_leader; MPI_Comm peer_comm; int remote_leader; MPI_Comm newintercomm; MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag, &newintercomm); // L204--205 MPI_Comm intercomm; MPI_Comm newintracomm; int high; MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209 int keyval; #if 0 MPI_Copy_function copy_fn; MPI_Delete_function delete_fn; void* extra_state; MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215 #endif MPI_Keyval_free (&keyval); // L217 void* attribute_val; MPI_Attr_put (comm, keyval, attribute_val); // L219 MPI_Attr_get (comm, keyval, attribute_val, &flag); MPI_Attr_delete (comm, keyval); /* === Environmental inquiry === */ char* name; int resultlen; MPI_Get_processor_name (name, &resultlen); // L226 MPI_Errhandler errhandler; #if 0 MPI_Handler_function function; MPI_Errhandler_create (function, &errhandler); // L230 #endif MPI_Errhandler_set (comm, errhandler); // L232 MPI_Errhandler_get (comm, &errhandler); MPI_Errhandler_free (&errhandler); int errorcode; char* string; MPI_Error_string (errorcode, string, &resultlen); // L237 int errorclass; MPI_Error_class (errorcode, &errorclass); // L239 MPI_Wtime (); MPI_Wtick (); int argc; char** argv; MPI_Init (&argc, &argv); // L244 MPI_Finalize (); MPI_Initialized (&flag); MPI_Abort (comm, errorcode); }
//--------------------------------------------------------------------------- // // tests count-receive messages and posts payload-receive messages // for those counts that arrived // // wf: wait_factor for nonblocking communication [0.0-1.0] // RecvItemDtype: pointer to user-supplied function // that takes a pointer to a counts message and // creates an MPI datatype for the payloads message // void Neighborhoods::TestMessages(float wf, MPI_Datatype* (*RecvItemDtype)(int *)) { int npr; // number of received points from each process list<ct_t>::iterator ct_it; // request list iterators list<pl_t>::iterator pl_it; // request list iterators int p; // process number char *rcv_p = NULL; // one payload-receive int i, j, k; MPI_Request *reqs; // pending requests MPI_Request *arr; // requests that arrived MPI_Status *stats; // statuses for arrivals int narr; // number of requests that arrived MPI_Status stat; int tot_narr = 0; // total number counts-receive messages arrived this round int nreqs; // number of requests if (!assign->GetStaticMode()) // override wf for dynamic repartitioning wf = 1.0; int min_arr = (int)(wf * pps.size()); // wait for this number of // counts-receives // to arrive in this round if (recv_cts.size() > 0) { reqs = new MPI_Request[recv_cts.size()]; arr = new MPI_Request[recv_cts.size()]; stats = new MPI_Status[recv_cts.size()]; // wait for enough items in count-receive list to arrive while (tot_narr < min_arr) { nreqs = 0; for (ct_it = recv_cts.begin(); ct_it != recv_cts.end(); ct_it++) { if (!ct_it->done) reqs[nreqs++] = ct_it->req; } if (nreqs) { MPI_Waitsome(nreqs, reqs, &narr, arr, stats); // post payload-receive for counts that arrived ct_it = recv_cts.begin(); j = 0; for (i = 0; i < narr; i++) { while (ct_it->done || j < arr[i]) { if (!ct_it->done) j++; ct_it++; } ct_it->done = true; ct_it->tag = stats[i].MPI_TAG; // count number of items expected npr = 0; for (k = 0; k < (ct_it->c)[0]; k++) { npr += (ct_it->c)[k * (nhdr + 2) + 2]; } // post payload-receive if (npr > 0) { // at least one point is expected p = ct_it->proc; MPI_Datatype *itype = RecvItemDtype(&(ct_it->c)[0]); MPI_Datatype *mtype = RecvMsgDtype(&(ct_it->c)[0], rcv_p, itype); MPI_Recv(rcv_p, 1, *mtype, p, ct_it->tag + 1, comm, &stat); pl_t pt; // one payload-receive message pt.req = 0; pt.done = true; pt.proc = p; pt.tag = ct_it->tag + 1; // matching tag for payload-receive pt.p = rcv_p; MPI_Aint lb, extent; MPI_Type_get_extent(*itype, &lb, &extent); pt.item_size = extent; recv_pts.push_back(pt); MPI_Type_free(mtype); MPI_Type_free(itype); delete mtype; delete itype; } // if npr > 0 ct_it++; j++; } // for i < narr } // if nreqs tot_narr += narr; } // tot_narr < min_narr delete[] reqs; delete[] arr; delete[] stats; } // recv_cts.size() > 0 }
int main (int argc, char **argv) { int nprocs = -1; int rank = -1; int comm = MPI_COMM_WORLD; char processor_name[128]; int namelen = 128; int buf[BUF_SIZE * 2]; int i, j, k, index, outcount, flag; int indices[2]; MPI_Request aReq[2]; MPI_Status aStatus[2]; /* init */ MPI_Init (&argc, &argv); MPI_Comm_size (comm, &nprocs); MPI_Comm_rank (comm, &rank); MPI_Get_processor_name (processor_name, &namelen); printf ("(%d) is alive on %s\n", rank, processor_name); fflush (stdout); if (rank == 0) { /* set up persistent sends... */ MPI_Send_init (&buf[0], BUF_SIZE, MPI_INT, 1, 0, comm, &aReq[0]); MPI_Send_init (&buf[BUF_SIZE], BUF_SIZE, MPI_INT, 1, 1, comm, &aReq[1]); /* initialize the send buffers */ for (i = 0; i < BUF_SIZE; i++) { buf[i] = i; buf[BUF_SIZE + i] = BUF_SIZE - 1 - i; } } for (k = 0; k < 4; k++) { if (rank == 1) { /* zero out the receive buffers */ bzero (buf, sizeof(int) * BUF_SIZE * 2); } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { /* start the persistent sends... */ if (k % 2) { MPI_Startall (2, &aReq[0]); } else { for (j = 0; j < 2; j++) { MPI_Start (&aReq[j]); } } /* complete the sends */ if (k < 2) { /* use MPI_Waitany */ for (j = 0; j < 2; j++) MPI_Waitany (2, aReq, &index, aStatus); } else { /* use MPI_Waitsome */ j = 0; while (j < 2) { MPI_Waitsome (2, aReq, &outcount, indices, aStatus); j += outcount; } } } else if (rank == 1) { /* set up receives for all of the sends */ for (j = 0; j < 2; j++) { MPI_Irecv (&buf[j * BUF_SIZE], BUF_SIZE, MPI_INT, 0, j, comm, &aReq[j]); } /* complete all of the receives... */ MPI_Waitall (2, aReq, aStatus); } } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { /* free the persistent requests */ for (i = 0 ; i < 2; i++) { MPI_Request_free (&aReq[i]); } } MPI_Finalize (); printf ("(%d) Finished normally\n", rank); }
int main(int argc, char *argv[]) { int myid, numprocs; int x; MPI_Init (&argc, &argv); MPI_Comm_size (MPI_COMM_WORLD, &numprocs); MPI_Comm_rank (MPI_COMM_WORLD, &myid); if (myid == 0){ fprintf(stdout,"Please enter the value of x\n"); scanf("%d",&x); printf("the value of x is %d\n", x); } if (numprocs == 1) { /* trivial single CPU case */ int i, result = 0; for (i=1; i<=x; i++) result += i; printf("one process and the final result is %d\n", result); } else if(myid == 0){ MPI_Request* reqs = (MPI_Request*)malloc((numprocs-1)*sizeof(MPI_Request)); MPI_Status* stat=(MPI_Status*)malloc((numprocs-1)*sizeof(MPI_Status)); int* indices = (int*)malloc((numprocs-1)*sizeof(int)); int* buf = (int*)malloc((numprocs-1)*sizeof(int)); int i, j, t, count, numRunningProcs, wkrid; int result = 0; for(i = 0; i < numprocs-1; i++) { /* send the problem size x to workers */ MPI_Send(&x, 1, MPI_INT, i+1, 10, MPI_COMM_WORLD); /*open an unblocking socket and wait for return from that worker*/ MPI_Irecv(&buf[i], 1, MPI_INT, i+1, MPI_ANY_TAG, MPI_COMM_WORLD, &reqs[i]); } numRunningProcs = numprocs - 1; /* while there is any job to be allocated or received */ while (numRunningProcs){ /* wait for partial results from workers*/ MPI_Waitsome(numprocs-1, reqs, &count, indices, stat); numRunningProcs -= count; for(i = 0; i < count; i++){ wkrid = stat[i].MPI_SOURCE; t = stat[i].MPI_TAG; j = indices[i]; result += buf[j]; printf("partial result %d received from worker id = %d with tag = %d\n", buf[j], wkrid, t); } } printf("The final result is %d\n", result); free(buf); free(reqs); free(indices); free(stat); } else{ /* worker processes */ MPI_Status status; int x, i, strt, fnsh; int result = 0; /* receive the size y from master */ MPI_Recv(&x, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, &status); strt = (myid-1) * x / (numprocs-1) + 1; fnsh = myid * x / (numprocs-1); for (i=strt; i<=fnsh; i++) result += i; /* send the result to master */ MPI_Send(&result, 1, MPI_INT, 0, myid, MPI_COMM_WORLD); } MPI_Finalize(); return 0; }
int main( int argc, char* argv[] ) { int myrank, nprocs; int val, val2; int idx, idx2[2]; int flag; MPI_Request req; MPI_Request req2[2]; MPI_Status stat; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); MPI_Comm_size( MPI_COMM_WORLD, &nprocs ); if( nprocs<2 ) { fprintf(stderr, "Need at least 2 procs to run this program\n"); MPI_Abort(MPI_COMM_WORLD, 1); return 1; } /* MPI_STATUS_IGNORE in MPI_Recv */ switch(myrank) { case 0: MPI_Send( &val, 1, MPI_INTEGER, 1, 33, MPI_COMM_WORLD); break; case 1: MPI_Recv( &val, 1, MPI_INTEGER, 0, 33, MPI_COMM_WORLD, MPI_STATUS_IGNORE ); break; } /* MPI_STATUS_IGNORE in MPI_Wait, MPI_Test */ switch(myrank) { case 0: MPI_Isend( &val, 1, MPI_INTEGER, 1, 34, MPI_COMM_WORLD, &req); MPI_Test( &req, &flag, MPI_STATUS_IGNORE ); MPI_Wait( &req, MPI_STATUS_IGNORE ); break; case 1: MPI_Recv( &val, 1, MPI_INTEGER, 0, 34, MPI_COMM_WORLD, &stat ); break; } /* MPI_STATUS_IGNORE in MPI_Waitany, MPI_Testany */ switch(myrank) { case 0: MPI_Isend( &val, 1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0])); MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1])); MPI_Testany( 2, req2, &idx, &flag, MPI_STATUS_IGNORE ); MPI_Waitany( 2, req2, &idx, MPI_STATUS_IGNORE ); break; case 1: MPI_Recv( &val, 1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat ); MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat ); break; } /* MPI_STATUSES_IGNORE in MPI_Waitall, MPI_Testall */ switch(myrank) { case 0: MPI_Isend( &val, 1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0])); MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1])); MPI_Testall( 2, req2, &flag, MPI_STATUSES_IGNORE ); MPI_Waitall( 2, req2, MPI_STATUSES_IGNORE ); break; case 1: MPI_Recv( &val, 1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat ); MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat ); break; } /* MPI_STATUSES_IGNORE in MPI_Waitsome */ switch(myrank) { case 0: MPI_Isend( &val, 1, MPI_INTEGER, 1, 35, MPI_COMM_WORLD, &(req2[0])); MPI_Isend( &val2, 1, MPI_INTEGER, 1, 36, MPI_COMM_WORLD, &(req2[1])); MPI_Testsome( 2, req2, &idx, idx2, MPI_STATUSES_IGNORE ); MPI_Waitsome( 2, req2, &idx, idx2, MPI_STATUSES_IGNORE ); break; case 1: MPI_Recv( &val, 1, MPI_INTEGER, 0, 35, MPI_COMM_WORLD, &stat ); MPI_Recv( &val2, 1, MPI_INTEGER, 0, 36, MPI_COMM_WORLD, &stat ); break; } MPI_Barrier(MPI_COMM_WORLD); fprintf(stderr, "%5d: DONE\n", myrank); MPI_Finalize(); }
FORTRAN_API void FORT_CALL mpi_waitsome_( MPI_Fint *incount, MPI_Fint array_of_requests[], MPI_Fint *outcount, MPI_Fint array_of_indices[], MPI_Fint array_of_statuses[][MPI_STATUS_SIZE], MPI_Fint *__ierr ) { int i,j,found; int loutcount; int *l_indices = 0; int local_l_indices[MPIR_USE_LOCAL_ARRAY]; MPI_Request *lrequest = 0; MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY]; MPI_Status * c_status = 0; MPI_Status local_c_status[MPIR_USE_LOCAL_ARRAY]; if ((int)*incount > 0) { if ((int)*incount > MPIR_USE_LOCAL_ARRAY) { MPIR_FALLOC(lrequest,(MPI_Request*)MALLOC(sizeof(MPI_Request)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); MPIR_FALLOC(l_indices,(int*)MALLOC(sizeof(int)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); MPIR_FALLOC(c_status,(MPI_Status*)MALLOC(sizeof(MPI_Status)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); } else { lrequest = local_lrequest; l_indices = local_l_indices; c_status = local_c_status; } for (i=0; i<(int)*incount; i++) lrequest[i] = MPI_Request_f2c( array_of_requests[i] ); *__ierr = MPI_Waitsome((int)*incount,lrequest,&loutcount,l_indices, c_status); /* By checking for lrequest[l_indices[i]] = 0, we handle persistant requests */ for (i=0; i<(int)*incount; i++) { if ( i < loutcount) { if (l_indices[i] >= 0) { array_of_requests[l_indices[i]] = MPI_Request_c2f( lrequest[l_indices[i]] ); } } else { found = 0; j = 0; while ( (!found) && (j<loutcount) ) { if (l_indices[j++] == i) found = 1; } if (!found) array_of_requests[i] = MPI_Request_c2f( lrequest[i] ); } } } else *__ierr = MPI_Waitsome( (int)*incount, (MPI_Request *)0, &loutcount, l_indices, c_status ); if (*__ierr != MPI_SUCCESS) return; for (i=0; i<loutcount; i++) { MPI_Status_c2f( &c_status[i], &(array_of_statuses[i][0]) ); if (l_indices[i] >= 0) array_of_indices[i] = l_indices[i] + 1; } *outcount = (MPI_Fint)loutcount; if ((int)*incount > MPIR_USE_LOCAL_ARRAY) { FREE( l_indices ); FREE( lrequest ); FREE( c_status ); } }