int main(int argc, char **argv) { int rank, touch; if (argc != 2) { fprintf(stderr, "Invalid arg\n"); return -1; } MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); int *buffer = (int*) malloc(127 * sizeof(int)); MPI_Request r; if (!strcmp(argv[1], "recv")) { MPI_Recv(buffer, 128, MPI_INT, 0, 10, MPI_COMM_WORLD, MPI_STATUSES_IGNORE); } if (!strcmp(argv[1], "send")) { MPI_Send(buffer, 128, MPI_INT, 0, 10, MPI_COMM_WORLD); } if (!strcmp(argv[1], "recv-lock")) { MPI_Request r; MPI_Irecv(buffer, 10, MPI_INT, 0, 10, MPI_COMM_WORLD, &r); MPI_Recv(buffer, 128, MPI_INT, 0, 10, MPI_COMM_WORLD, MPI_STATUSES_IGNORE); } if (!strcmp(argv[1], "send-lock")) { MPI_Request r; MPI_Irecv(buffer, 10, MPI_INT, 0, 10, MPI_COMM_WORLD, &r); MPI_Send(buffer, 128, MPI_INT, 0, 10, MPI_COMM_WORLD); } if (!strcmp(argv[1], "persistent-recv")) { MPI_Request r; MPI_Recv_init(buffer, 10, MPI_INT, 0, 10, MPI_COMM_WORLD, &r); free(buffer); MPI_Start(&r); } if (!strcmp(argv[1], "persistent-send")) { MPI_Request r; MPI_Send_init(buffer, 10, MPI_INT, 0, 10, MPI_COMM_WORLD, &r); free(buffer); MPI_Start(&r); } return 0; }
int main (int argc, char *argv[]) { MPI_Status status; MPI_Request req; int ierr; MPI_Init(&argc, &argv); ierr = MPI_Recv_init(NULL, 0, MPI_INT, MPI_PROC_NULL, MPI_ANY_TAG, MPI_COMM_WORLD, &req); if (ierr != MPI_SUCCESS) MPI_Abort(MPI_COMM_WORLD, 1); ierr = MPI_Start(&req); if (ierr != MPI_SUCCESS) MPI_Abort(MPI_COMM_WORLD, 2); ierr = MPI_Wait(&req, &status); if (ierr != MPI_SUCCESS) MPI_Abort(MPI_COMM_WORLD, 3); if (MPI_PROC_NULL != status.MPI_SOURCE) { if (MPI_ANY_SOURCE == status.MPI_SOURCE) { printf("got MPI_ANY_SOURCE=%d instead of MPI_PROC_NULL=%d\n", status.MPI_SOURCE, MPI_PROC_NULL); } else { printf("got %d instead of MPI_PROC_NULL=%d\n", status.MPI_SOURCE, MPI_PROC_NULL); } } else { printf("OK\n"); } MPI_Finalize(); return 0; }
static int oshmem_mkey_recv_cb(void) { MPI_Status status; int flag; int n; int rc; opal_buffer_t *msg; int32_t size; void *tmp_buf; oob_comm_request_t *r; n = 0; r = (oob_comm_request_t *)opal_list_get_first(&memheap_oob.req_list); assert(r); while (1) { my_MPI_Test(&r->recv_req, &flag, &status); if (OPAL_LIKELY(0 == flag)) { return n; } MPI_Get_count(&status, MPI_BYTE, &size); MEMHEAP_VERBOSE(5, "OOB request from PE: %d, size %d", status.MPI_SOURCE, size); n++; opal_list_remove_first(&memheap_oob.req_list); /* to avoid deadlock we must start request * before processing it. Data are copied to * the tmp buffer */ tmp_buf = malloc(size); if (NULL == tmp_buf) { MEMHEAP_ERROR("not enough memory"); ORTE_ERROR_LOG(0); return n; } memcpy(tmp_buf, (void*)&r->buf, size); msg = OBJ_NEW(opal_buffer_t); if (NULL == msg) { MEMHEAP_ERROR("not enough memory"); ORTE_ERROR_LOG(0); return n; } opal_dss.load(msg, (void*)tmp_buf, size); rc = MPI_Start(&r->recv_req); if (MPI_SUCCESS != rc) { MEMHEAP_ERROR("Failed to post recv request %d", rc); ORTE_ERROR_LOG(rc); return n; } opal_list_append(&memheap_oob.req_list, &r->super); do_recv(status.MPI_SOURCE, msg); OBJ_RELEASE(msg); r = (oob_comm_request_t *)opal_list_get_first(&memheap_oob.req_list); assert(r); } return 1; }
JNIEXPORT jlong JNICALL Java_mpi_Prequest_start( JNIEnv *env, jobject jthis, jlong jRequest) { MPI_Request request = (MPI_Request)jRequest; int rc = MPI_Start(&request); ompi_java_exceptionCheck(env, rc); return (jlong)request; }
void comm_start(void *request) { int rc = MPI_Start( (MPI_Request*)request); if (rc != MPI_SUCCESS) { printf("ERROR: MPI_Test failed\n"); comm_exit(1); } return; }
int main( int argc, char *argv[] ) { MPI_Status status; MPI_Request request; int a[10], b[10]; int buf[BUFSIZE], *bptr, bl, i, j, rank, size; int errs = 0; MTest_Init( 0, 0 ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); MPI_Buffer_attach( buf, BUFSIZE ); for (j=0; j<10; j++) { MPI_Bsend_init( a, 10, MPI_INT, 0, 27+j, MPI_COMM_WORLD, &request ); for (i=0; i<10; i++) { a[i] = (rank + 10 * j) * size + i; } MPI_Start( &request ); MPI_Wait( &request, &status ); MPI_Request_free( &request ); } if (rank == 0) { for (i=0; i<size; i++) { for (j=0; j<10; j++) { int k; status.MPI_TAG = -10; status.MPI_SOURCE = -20; MPI_Recv( b, 10, MPI_INT, i, 27+j, MPI_COMM_WORLD, &status ); if (status.MPI_TAG != 27+j) { errs++; printf( "Wrong tag = %d\n", status.MPI_TAG ); } if (status.MPI_SOURCE != i) { errs++; printf( "Wrong source = %d\n", status.MPI_SOURCE ); } for (k=0; k<10; k++) { if (b[k] != (i + 10 * j) * size + k) { errs++; printf( "received b[%d] = %d from %d tag %d\n", k, b[k], i, 27+j ); } } } } } MPI_Buffer_detach( &bptr, &bl ); MTest_Finalize( errs ); MPI_Finalize(); return 0; }
QMP_status_t QMP_start_mpi (QMP_msghandle_t mh) { int err = QMP_SUCCESS; if(mh->type==MH_multiple) { MPI_Startall(mh->num, mh->request_array); } else { MPI_Start(&mh->request); } return err; }
/** * @brief Responds with no_work to pending work requests. * * Answers any pending work requests in case a rank is blocking, * waiting for a response. */ static void CIRCLE_cleanup_mpi_messages(CIRCLE_state_st* sptr) { int i = 0; int j = 0; /* TODO: this is O(N^2)... need a better way at large scale */ /* Make sure that all pending work requests are answered. */ for(j = 0; j < sptr->size; j++) { for(i = 0; i < sptr->size; i++) { if(i != sptr->rank) { sptr->request_flag[i] = 0; if(MPI_Test(&sptr->mpi_state_st->request_request[i], \ &sptr->request_flag[i], \ &sptr->mpi_state_st->request_status[i]) \ != MPI_SUCCESS) { MPI_Abort(*sptr->mpi_state_st->work_comm, \ LIBCIRCLE_MPI_ERROR); } if(sptr->request_flag[i]) { MPI_Start(&sptr->mpi_state_st->request_request[i]); CIRCLE_send_no_work(i); } } } } /* free off persistent requests */ for(i = 0; i < sptr->size; i++) { if(i != sptr->rank) { MPI_Request_free(&sptr->mpi_state_st->request_request[i]); } } return; }
int memheap_oob_init(mca_memheap_map_t *map) { int rc = OSHMEM_SUCCESS; int i; oob_comm_request_t *r; memheap_map = map; OBJ_CONSTRUCT(&memheap_oob.lck, opal_mutex_t); OBJ_CONSTRUCT(&memheap_oob.cond, opal_condition_t); OBJ_CONSTRUCT(&memheap_oob.req_list, opal_list_t); for (i = 0; i < MEMHEAP_RECV_REQS_MAX; i++) { r = &memheap_oob.req_pool[i]; rc = MPI_Recv_init(r->buf, sizeof(r->buf), MPI_BYTE, MPI_ANY_SOURCE, 0, oshmem_comm_world, &r->recv_req); if (MPI_SUCCESS != rc) { MEMHEAP_ERROR("Failed to created recv request %d", rc); return rc; } rc = MPI_Start(&r->recv_req); if (MPI_SUCCESS != rc) { MEMHEAP_ERROR("Failed to post recv request %d", rc); return rc; } opal_list_append(&memheap_oob.req_list, &r->super); } opal_progress_register(oshmem_mkey_recv_cb); return rc; }
EXPORT_MPI_API void FORTRAN_API mpi_start_( MPI_Fint *request, MPI_Fint *__ierr ) { MPI_Request lrequest = MPI_Request_f2c(*request ); *__ierr = MPI_Start( &lrequest ); *request = MPI_Request_c2f(lrequest); }
void comm_start(MsgHandle *mh) { MPI_CHECK( MPI_Start(&(mh->request)) ); }
int main (int argc, char **argv) { int nprocs = -1; int rank = -1; MPI_Comm comm = MPI_COMM_WORLD; char processor_name[128]; int namelen = 128; int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES]; int buf[BUF_SIZE * 2 * NUM_SEND_TYPES]; int i, j, k, at_size, send_t_number, index, outcount, total, flag; int num_errors, error_count, indices[2 * NUM_SEND_TYPES]; MPI_Request aReq[2 * NUM_SEND_TYPES]; MPI_Status aStatus[2 * NUM_SEND_TYPES]; /* init */ MPI_Init (&argc, &argv); MPI_Comm_size (comm, &nprocs); MPI_Comm_rank (comm, &rank); MPI_Get_processor_name (processor_name, &namelen); printf ("(%d) is alive on %s\n", rank, processor_name); fflush (stdout); MPI_Buffer_attach (bbuf, sizeof(int) * (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES); if (rank == 0) { /* set up persistent sends... */ send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES; MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); } for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) { if (rank == 0) { /* initialize all of the send buffers */ for (j = 0; j < NUM_SEND_TYPES; j++) { for (i = 0; i < BUF_SIZE; i++) { buf[2 * j * BUF_SIZE + i] = i; buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i; } } } else if (rank == 1) { /* zero out all of the receive buffers */ bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES); } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { /* set up transient sends... */ send_t_number = 0; MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; /* Barrier to ensure receives are posted for rsends... */ MPI_Barrier(MPI_COMM_WORLD); MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); send_t_number++; MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2, comm, &aReq[send_t_number * 2]); MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE], BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1, comm, &aReq[send_t_number * 2 + 1]); /* just to be paranoid */ send_t_number++; assert (send_t_number == NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES); /* start the persistent sends... */ if (k % 2) { MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2, &aReq[2 * send_t_number]); } else { for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) { MPI_Start (&aReq[2 * send_t_number + j]); } } /* NOTE: Changing the send buffer of a Bsend is NOT an error... */ for (j = 0; j < NUM_SEND_TYPES; j++) { /* muck the buffers */ buf[j * 2 * BUF_SIZE + (BUF_SIZE >> 1)] = BUF_SIZE; } printf ("USER MSG: 6 change send buffer errors in iteration #%d:\n", k); /* complete the sends */ switch (k/2) { case 0: /* use MPI_Wait */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { MPI_Wait (&aReq[j], &aStatus[j]); } break; case 1: /* use MPI_Waitall */ MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus); break; case 2: /* use MPI_Waitany */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus); } break; case 3: /* use MPI_Waitsome */ total = 0; while (total < NUM_SEND_TYPES * 2) { MPI_Waitsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus); total += outcount; } break; case 4: /* use MPI_Test */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { flag = 0; while (!flag) { MPI_Test (&aReq[j], &flag, &aStatus[j]); } } break; case 5: /* use MPI_Testall */ flag = 0; while (!flag) { MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus); } break; case 6: /* use MPI_Testany */ for (j = 0; j < NUM_SEND_TYPES * 2; j++) { flag = 0; while (!flag) { MPI_Testany (NUM_SEND_TYPES * 2, aReq, &index, &flag, aStatus); } } break; case 7: /* use MPI_Testsome */ total = 0; while (total < NUM_SEND_TYPES * 2) { outcount = 0; while (!outcount) { MPI_Testsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus); } total += outcount; } break; default: assert (0); break; } } else if (rank == 1) {
int main (int argc, char **argv) { int nprocs = -1; int rank = -1; int comm = MPI_COMM_WORLD; char processor_name[128]; int namelen = 128; int buf[BUF_SIZE * 2]; int i, j, k, index, outcount, flag; int indices[2]; MPI_Request aReq[2]; MPI_Status aStatus[2]; /* init */ MPI_Init (&argc, &argv); MPI_Comm_size (comm, &nprocs); MPI_Comm_rank (comm, &rank); MPI_Get_processor_name (processor_name, &namelen); printf ("(%d) is alive on %s\n", rank, processor_name); fflush (stdout); if (rank == 0) { /* set up persistent sends... */ MPI_Send_init (&buf[0], BUF_SIZE, MPI_INT, 1, 0, comm, &aReq[0]); MPI_Send_init (&buf[BUF_SIZE], BUF_SIZE, MPI_INT, 1, 1, comm, &aReq[1]); /* initialize the send buffers */ for (i = 0; i < BUF_SIZE; i++) { buf[i] = i; buf[BUF_SIZE + i] = BUF_SIZE - 1 - i; } } for (k = 0; k < 4; k++) { if (rank == 1) { /* zero out the receive buffers */ bzero (buf, sizeof(int) * BUF_SIZE * 2); } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { /* start the persistent sends... */ if (k % 2) { MPI_Startall (2, &aReq[0]); } else { for (j = 0; j < 2; j++) { MPI_Start (&aReq[j]); } } /* complete the sends */ if (k < 2) { /* use MPI_Waitany */ for (j = 0; j < 2; j++) MPI_Waitany (2, aReq, &index, aStatus); } else { /* use MPI_Waitsome */ j = 0; while (j < 2) { MPI_Waitsome (2, aReq, &outcount, indices, aStatus); j += outcount; } } } else if (rank == 1) { /* set up receives for all of the sends */ for (j = 0; j < 2; j++) { MPI_Irecv (&buf[j * BUF_SIZE], BUF_SIZE, MPI_INT, 0, j, comm, &aReq[j]); } /* complete all of the receives... */ MPI_Waitall (2, aReq, aStatus); } } MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { /* free the persistent requests */ for (i = 0 ; i < 2; i++) { MPI_Request_free (&aReq[i]); } } MPI_Finalize (); printf ("(%d) Finished normally\n", rank); }
int main(int argc, char *argv[]) { MPI_Request r; MPI_Status s; // int flag; int buf[10]; int rbuf[10]; int tag = 27; int dest = 0; int rank, size; MPI_Init( &argc, &argv ); MPI_Comm_size( MPI_COMM_WORLD, &size ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); /* Create a persistent send request */ // tout le monde prépare l'envoi à 0 MPI_Send_init( buf, 10, MPI_INT, dest, tag, MPI_COMM_WORLD, &r ); /* Use that request */ if (rank == 0) { // on alloue un tableau de size request pour les irecv MPI_Request *rr = (MPI_Request *)malloc(size * sizeof(MPI_Request)); for (int i=0; i<size; i++) { // 0 va recevoir de tout le monde MPI_Irecv( rbuf, 10, MPI_INT, i, tag, MPI_COMM_WORLD, &rr[i] ); } // 0 va envoyer à 0 MPI_Start( &r ); // 0 envoi à 0 MPI_Wait( &r, &s ); // 0 recoit de tout le monde MPI_Waitall( size, rr, MPI_STATUSES_IGNORE ); free(rr); } else { // non-0 va envoyer à 0 MPI_Start( &r ); // non-0 envoi à 0 MPI_Wait( &r, &s ); } MPI_Request_free( &r ); // if (rank == 0) // { // MPI_Request sr; // /* Create a persistent receive request */ // // 0 prépare la récéption de tout le monde // MPI_Recv_init( rbuf, 10, MPI_INT, MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &r ); // // 0 va envoyer à 0 // MPI_Isend( buf, 10, MPI_INT, 0, tag, MPI_COMM_WORLD, &sr ); // for (int i=0; i<size; i++) { // // 0 va recevoir de tout le monde // MPI_Start( &r ); // // 0 recoit de tout le monde // MPI_Wait( &r, &s ); // } // // 0 envoi à 0 // MPI_Wait( &sr, &s ); // MPI_Request_free( &r ); // } // else { // // non-0 envoi à 0 // MPI_Send( buf, 10, MPI_INT, 0, tag, MPI_COMM_WORLD ); // } MPI_Finalize(); return 0; }
int MPI_Wait(MPI_Request *request, MPI_Status *status) { if ((*request)->req_mpi_object.comm == MPISPEC_COMM_WORLD) { MPI_Start(request); } return PMPI_Wait(request, status); }
void declareBindings (void) { /* === Point-to-point === */ void* buf; int count; MPI_Datatype datatype; int dest; int tag; MPI_Comm comm; MPI_Send (buf, count, datatype, dest, tag, comm); // L12 int source; MPI_Status status; MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15 MPI_Get_count (&status, datatype, &count); MPI_Bsend (buf, count, datatype, dest, tag, comm); MPI_Ssend (buf, count, datatype, dest, tag, comm); MPI_Rsend (buf, count, datatype, dest, tag, comm); void* buffer; int size; MPI_Buffer_attach (buffer, size); // L22 MPI_Buffer_detach (buffer, &size); MPI_Request request; MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25 MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request); MPI_Issend (buf, count, datatype, dest, tag, comm, &request); MPI_Irsend (buf, count, datatype, dest, tag, comm, &request); MPI_Irecv (buf, count, datatype, source, tag, comm, &request); MPI_Wait (&request, &status); int flag; MPI_Test (&request, &flag, &status); // L32 MPI_Request_free (&request); MPI_Request* array_of_requests; int index; MPI_Waitany (count, array_of_requests, &index, &status); // L36 MPI_Testany (count, array_of_requests, &index, &flag, &status); MPI_Status* array_of_statuses; MPI_Waitall (count, array_of_requests, array_of_statuses); // L39 MPI_Testall (count, array_of_requests, &flag, array_of_statuses); int incount; int outcount; int* array_of_indices; MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L44--45 MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L46--47 MPI_Iprobe (source, tag, comm, &flag, &status); // L48 MPI_Probe (source, tag, comm, &status); MPI_Cancel (&request); MPI_Test_cancelled (&status, &flag); MPI_Send_init (buf, count, datatype, dest, tag, comm, &request); MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Recv_init (buf, count, datatype, source, tag, comm, &request); MPI_Start (&request); MPI_Startall (count, array_of_requests); void* sendbuf; int sendcount; MPI_Datatype sendtype; int sendtag; void* recvbuf; int recvcount; MPI_Datatype recvtype; MPI_Datatype recvtag; MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm, &status); // L67--69 MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag, comm, &status); // L70--71 MPI_Datatype oldtype; MPI_Datatype newtype; MPI_Type_contiguous (count, oldtype, &newtype); // L74 int blocklength; { int stride; MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78 } { MPI_Aint stride; MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82 } int* array_of_blocklengths; { int* array_of_displacements; MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L87--88 } { MPI_Aint* array_of_displacements; MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L92--93 MPI_Datatype* array_of_types; MPI_Type_struct (count, array_of_blocklengths, array_of_displacements, array_of_types, &newtype); // L95--96 } void* location; MPI_Aint address; MPI_Address (location, &address); // L100 MPI_Aint extent; MPI_Type_extent (datatype, &extent); // L102 MPI_Type_size (datatype, &size); MPI_Aint displacement; MPI_Type_lb (datatype, &displacement); // L105 MPI_Type_ub (datatype, &displacement); MPI_Type_commit (&datatype); MPI_Type_free (&datatype); MPI_Get_elements (&status, datatype, &count); void* inbuf; void* outbuf; int outsize; int position; MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114 int insize; MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype, comm); // L116--117 MPI_Pack_size (incount, datatype, comm, &size); /* === Collectives === */ MPI_Barrier (comm); // L121 int root; MPI_Bcast (buffer, count, datatype, root, comm); // L123 MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L124--125 int* recvcounts; int* displs; MPI_Gatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm); // L128--130 MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L131--132 int* sendcounts; MPI_Scatterv (sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); // L134--135 MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L136--137 MPI_Allgatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); // L138--140 MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L141--142 int* sdispls; int* rdispls; MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm); // L145--147 MPI_Op op; MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149 #if 0 MPI_User_function function; int commute; MPI_Op_create (function, commute, &op); // L153 #endif MPI_Op_free (&op); // L155 MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm); MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm); MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm); /* === Groups, contexts, and communicators === */ MPI_Group group; MPI_Group_size (group, &size); // L162 int rank; MPI_Group_rank (group, &rank); // L164 MPI_Group group1; int n; int* ranks1; MPI_Group group2; int* ranks2; MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170 int result; MPI_Group_compare (group1, group2, &result); // L172 MPI_Group newgroup; MPI_Group_union (group1, group2, &newgroup); // L174 MPI_Group_intersection (group1, group2, &newgroup); MPI_Group_difference (group1, group2, &newgroup); int* ranks; MPI_Group_incl (group, n, ranks, &newgroup); // L178 MPI_Group_excl (group, n, ranks, &newgroup); extern int ranges[][3]; MPI_Group_range_incl (group, n, ranges, &newgroup); // L181 MPI_Group_range_excl (group, n, ranges, &newgroup); MPI_Group_free (&group); MPI_Comm_size (comm, &size); MPI_Comm_rank (comm, &rank); MPI_Comm comm1; MPI_Comm comm2; MPI_Comm_compare (comm1, comm2, &result); MPI_Comm newcomm; MPI_Comm_dup (comm, &newcomm); MPI_Comm_create (comm, group, &newcomm); int color; int key; MPI_Comm_split (comm, color, key, &newcomm); // L194 MPI_Comm_free (&comm); MPI_Comm_test_inter (comm, &flag); MPI_Comm_remote_size (comm, &size); MPI_Comm_remote_group (comm, &group); MPI_Comm local_comm; int local_leader; MPI_Comm peer_comm; int remote_leader; MPI_Comm newintercomm; MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag, &newintercomm); // L204--205 MPI_Comm intercomm; MPI_Comm newintracomm; int high; MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209 int keyval; #if 0 MPI_Copy_function copy_fn; MPI_Delete_function delete_fn; void* extra_state; MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215 #endif MPI_Keyval_free (&keyval); // L217 void* attribute_val; MPI_Attr_put (comm, keyval, attribute_val); // L219 MPI_Attr_get (comm, keyval, attribute_val, &flag); MPI_Attr_delete (comm, keyval); /* === Environmental inquiry === */ char* name; int resultlen; MPI_Get_processor_name (name, &resultlen); // L226 MPI_Errhandler errhandler; #if 0 MPI_Handler_function function; MPI_Errhandler_create (function, &errhandler); // L230 #endif MPI_Errhandler_set (comm, errhandler); // L232 MPI_Errhandler_get (comm, &errhandler); MPI_Errhandler_free (&errhandler); int errorcode; char* string; MPI_Error_string (errorcode, string, &resultlen); // L237 int errorclass; MPI_Error_class (errorcode, &errorclass); // L239 MPI_Wtime (); MPI_Wtick (); int argc; char** argv; MPI_Init (&argc, &argv); // L244 MPI_Finalize (); MPI_Initialized (&flag); MPI_Abort (comm, errorcode); }
int main( int argc, char **argv ) { MPI_Request r1; int size, rank; int err = 0; int partner, buf[10], flag, idx, index; MPI_Status status; MPI_Init( &argc, &argv ); MPI_Comm_size( MPI_COMM_WORLD, &size ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (size < 2) { printf( "Cancel test requires at least 2 processes\n" ); MPI_Abort( MPI_COMM_WORLD, 1 ); } /* * Here is the test. First, we ensure an unsatisfied Irecv: * process 0 process size-1 * Sendrecv Sendrecv * Irecv ---- * Cancel ---- * Sendrecv Sendrecv * Next, we confirm receipt before canceling * Irecv Send * Sendrecv Sendrecv * Cancel */ if (rank == 0) { partner = size - 1; /* Cancel succeeds for wait/waitall */ MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Wait( &r1, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a send failed where it should succeed (Wait).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for test/testall */ buf[0] = 3; MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Test( &r1, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a send succeeded where it shouldn't (Test).\n" ); } MPI_Request_free( &r1 ); /* Cancel succeeds for waitany */ MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitany( 1, &r1, &idx, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a send failed where it should succeed (Waitany).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testany */ buf[0] = 3; MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testany( 1, &r1, &idx, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a send succeeded where it shouldn't (Testany).\n" ); } MPI_Request_free( &r1 ); /* Cancel succeeds for waitsome */ MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a send failed where it should succeed (Waitsome).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testsome*/ buf[0] = 3; MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a send succeeded where it shouldn't (Testsome).\n" ); } MPI_Request_free( &r1 ); if (err) { printf( "Test failed with %d errors.\n", err ); } else { printf( "Test passed\n" ); } } else if (rank == size - 1) { partner = 0; /* Cancel succeeds for wait/waitall */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for test/testall */ buf[0] = -1; MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (buf[0] == -1) { printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" ); } /* Cancel succeeds for waitany */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for testany */ buf[0] = -1; MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (buf[0] == -1) { printf( "Receive buffer did not change even though cancel should not have suceeded! (Testany).\n" ); } /* Cancel succeeds for waitsome */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for testsome */ buf[0] = -1; MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (buf[0] == -1) { printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" ); } } MPI_Finalize(); return 0; }
int main (int argc, char *argv[]) { int numtasks, rank, len, rc; char hostname[MPI_MAX_PROCESSOR_NAME]; int buffer[10]; int buffer2[20]; int buffer3[NUMTASKS*10]; int buffer4[NUMTASKS*10 + (NUMTASKS-1)]; int displs[NUMTASKS]; int recvcounts[NUMTASKS]; int i, mpi_errno; int rank__; MPI_Status status; MPI_Request request; rc = MPI_Init(&argc,&argv); if (rc != MPI_SUCCESS) { printf("Error starting MPI program. Termination.\n"); MPI_Abort(MPI_COMM_WORLD, rc); } MPI_Comm_size(MPI_COMM_WORLD,&numtasks); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Get_processor_name(hostname, &len); if (numtasks < NUMTASKS) { if (!rank) printf("I need at least %d tasks!!!\n",NUMTASKS); MPI_Finalize(); return -1; } /* * PT2PT */ if (!rank) printf("Testing MPI_Send and MPI_Recv between 0 and 1... "); /* Sending a buffer of 10 integers to process 1 */ if (!rank) { // producer (rank == 0) for (i=0; i < 10; i++) buffer[i] = i; mpi_errno = MPI_Send(buffer,10,MPI_INT,1,1,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Send!\n"); } if (rank == 1) { // consumer (rank == 1) for (i=0; i < 10; i++) buffer[i] = -1; mpi_errno = MPI_Recv(buffer,10,MPI_INT,0,1,MPI_COMM_WORLD,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Recv!\n"); for (i=0; i < 10; i++) { if (buffer[i] != i) printf("??? buffer[%d]=%d\n",i,buffer[i]); } } if (!rank && mpi_errno == MPI_SUCCESS) { printf("OK\n"); printf("Testing MPI_Sendrecv. Send between 0 and 1. Recv between 2 and 0... "); } if (!rank) { // (rank == 0) for (i=0; i < 10; i++) // producer buffer[i] = i; for (i=0; i < 20; i++) // consumer buffer2[i] = -1; mpi_errno = MPI_Sendrecv(buffer,10,MPI_INT,1,2, buffer2,20,MPI_INT,2,3,MPI_COMM_WORLD,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Sendrecv!\n"); for (i=0; i < 20; i++) { if (buffer2[i] != 20 - i) printf("??? buffer2[%d]=%d\n",i,buffer2[i]); } } if (rank == 1) { // (rank == 1) for (i=0; i < 10; i++) // consumer buffer[i] = -1; mpi_errno = MPI_Recv(buffer,10,MPI_INT,0,2,MPI_COMM_WORLD,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Recv!\n"); for (i=0; i < 10; i++) { if (buffer[i] != i) printf("??? buffer[%d]=%d\n",i,buffer[i]); } } if (rank == 2) { // (rank == 2) for (i=0; i < 20; i++) // producer buffer2[i] = 20 - i; mpi_errno = MPI_Send(buffer2,20,MPI_INT,0,3,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Send!\n"); } if (!rank && mpi_errno == MPI_SUCCESS) { printf("OK\n"); } if (!rank){ printf("Testing MPI_Sendrecv_replace. Send between 0 and 1. Recv between 2 and 0... "); } if (!rank) { // (rank == 0) for (i=0; i < 10; i++) // producer (and consumer!) buffer[i] = i; mpi_errno = MPI_Sendrecv_replace(buffer,10,MPI_INT,1,4,2,5,MPI_COMM_WORLD,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Sendrecv!\n"); for (i=0; i < 10; i++) { if (buffer[i] != 10 - i) printf("??? buffer[%d]=%d\n",i,buffer[i]); } } if (rank == 1) { // (rank == 1) for (i=0; i < 10; i++) // consumer buffer[i] = -1; mpi_errno = MPI_Recv(buffer,10,MPI_INT,0,4,MPI_COMM_WORLD,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Recv!\n"); for (i=0; i < 10; i++) { if (buffer[i] != i) printf("??? buffer[%d]=%d\n",i,buffer[i]); } } if (rank == 2) { // (rank == 2) for (i=0; i < 10; i++) // producer buffer[i] = 10 - i; mpi_errno = MPI_Send(buffer,10,MPI_INT,0,5,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Send!\n"); } if (!rank && mpi_errno == MPI_SUCCESS) { printf("OK\n"); } if (!rank) { printf("Testing MPI_Irecv and MPI_Isend. Send between 0 and 1... "); } if (rank == 1) { // producer for (i=0; i < 10; i++) buffer[i] = i; mpi_errno = MPI_Isend(buffer,10,MPI_INT,0,6,MPI_COMM_WORLD,&request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Isend!\n"); sleep(2); mpi_errno = MPI_Wait(&request,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Wait!\n"); } if (rank == 0) { // consumer for (i=0; i < 10; i++) buffer[i] = -1; mpi_errno = MPI_Irecv(buffer,10,MPI_INT,1,6,MPI_COMM_WORLD,&request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Irecv!\n"); mpi_errno = MPI_Wait(&request,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Wait!\n"); for (i=0; i < 10; i++) { if (buffer[i] != i) printf("??? buffer[%d]=%d\n",i,buffer[i]); } } if (!rank && mpi_errno == MPI_SUCCESS) printf("OK\n"); if (!rank) { printf("Testing MPI_Send_init and MPI_Recv_init. 0 --> 3... "); } if (rank == 0) { // producer for (i=0; i < 10; i++) buffer[i] = i; mpi_errno = MPI_Send_init(buffer,10,MPI_INT, 3,7,MPI_COMM_WORLD, &request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Send_init!\n"); //sleep(3); mpi_errno = MPI_Start(&request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Start!\n"); mpi_errno = MPI_Wait(&request,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Wait!\n"); mpi_errno = MPI_Request_free(&request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Request_free!\n"); } if (rank == 3) { // Consumer for (i=0; i < 10; i++) buffer[i] = -1; mpi_errno = MPI_Recv_init(buffer,10,MPI_INT, 0,7,MPI_COMM_WORLD, &request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Recv_init!\n"); sleep(7); mpi_errno = MPI_Start(&request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Start!\n"); mpi_errno = MPI_Wait(&request,&status); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Wait!\n"); mpi_errno = MPI_Request_free(&request); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Request_free!\n"); } if (!rank && mpi_errno == MPI_SUCCESS) printf("OK\n"); /* * COLL */ if (!rank) printf("Testing MPI_Bcast. 4 to all processes... "); for(i=0; i < 10; i++) { if (rank == 4) { // producer buffer[i] = (int) pow((double)2,(double)i); // 2^i } else { // consumer buffer[i] = -1; } } mpi_errno = MPI_Bcast(buffer,10,MPI_INT,4,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Bcast!\n"); if (rank != 4) { for (i=0; i < 10; i++) if (buffer[i] != (int) pow((double)2,(double)i)) // 2^i printf("??? buffer[%d]=%d\n",i,buffer[i]); } if (!rank && mpi_errno == MPI_SUCCESS) printf("OK\n"); if (!rank) printf("Testing MPI_Gather. All to 5... "); if (rank == 5) // consumer for (i=0; i < NUMTASKS*10; i++) buffer3[i] = -1; // producer for (i=0; i < 10; i++) buffer[i] = (10*rank) + i; mpi_errno = MPI_Gather(buffer,10,MPI_INT, buffer3,10,MPI_INT,5,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Gather!\n"); if (rank == 5) { rank__ = -1; for (i=0; i < NUMTASKS*10; i++) { if (i % 10 == 0) rank__++; if (buffer3[i] != (10*rank__) + (i % 10)) printf("??? buffer3[%d]=%d vs %d\n",i,buffer3[i],(10*rank__)+(i % 10)); } } if (!rank && mpi_errno == MPI_SUCCESS) printf("OK\n"); if (!rank) printf("Testing MPI_Gatherv. All to 5... "); if (rank == 5) { // consumer for (i=0; i < NUMTASKS*10 + (NUMTASKS-1); i++) buffer4[i] = -1; for (i=0; i < NUMTASKS; i++) displs[i] = (10*i) + i; for (i=0; i < NUMTASKS; i++) recvcounts[i] = 10; } // producer for (i=0; i < 10; i++) buffer[i] = (10*rank) + (10 - i); mpi_errno = MPI_Gatherv(buffer,10,MPI_INT, buffer4,recvcounts,displs, MPI_INT,5,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Gatherv!\n"); /* if (rank == 5) { for (i=0; i < NUMTASKS*10 + (NUMTASKS-1); i++) printf("buffer4[%d]=%d\n",i,buffer4[i]); } */ if (!rank && mpi_errno == MPI_SUCCESS) printf("OK\n"); if (!rank) printf("Testing MPI_Scatter. 6 to all... "); if (rank == 6) {// producer for (i=0; i < NUMTASKS; i++) buffer[i] = i; } // consumer buffer2[0] = -1; mpi_errno = MPI_Scatter(buffer,1,MPI_INT, buffer2,1,MPI_INT,6,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Scatter!\n"); if (buffer2[0] != rank) printf("??? result=%d vs %d\n",buffer2[0],rank); if (!rank && mpi_errno==MPI_SUCCESS) printf("OK\n"); if (!rank) printf("Testing MPI_Alltoall. all to all... "); for (i=0; i < NUMTASKS; i++) { // to send buffer[i] = i; } for (i=0; i < NUMTASKS; i++) { // to recv buffer2[i] = -1; } mpi_errno = MPI_Alltoall(buffer,1,MPI_INT,buffer2,1,MPI_INT,MPI_COMM_WORLD); if (mpi_errno != MPI_SUCCESS) printf("Something went wrong in the MPI_Alltoall\n"); // all processes should have an array of size NUMTASKS with its // rank repeated all over. For example, for process 3: // buffer2[] = [3, 3, 3, ..., 3] for (i=0; i < NUMTASKS; i++) if (buffer2[i] != rank) printf("??? buffer2[%d]=%d (for tasks %d)\n",i,buffer2[i],rank); if (!rank && mpi_errno==MPI_SUCCESS) printf("OK\n"); MPI_Finalize(); return 0; }
int MPI_Send ( INOUT void *buf, IN int count, IN MPI_Datatype datatype, IN int dest, IN int tag, IN MPI_Comm comm ) { int ret ; int to ; MPI_Request mpi_req ; /* * console info */ #if defined(__DEBUG__) L_STACK_MSG_Push("MPI_Send,%p,%i,%i,%i,%i,%p", buf, count, datatype, dest, tag, comm) ; #endif /* * get some information */ to = MPI_COMM_translate_rank(comm,dest) ; if (to < 0) goto LABEL_MPI_SEND_ERROR ; /* * send request */ ret = MPI_REQUEST_init(&mpi_req, FALSE, REQ_SEND_X_SYNC, comm, tag, NR_SENDRECV, FILTER_DEFAULT, buf, datatype, count, NODE_node_id(), to) ; if (MPI_ERR == ret) goto LABEL_MPI_SEND_ERROR ; ret = MPI_Start(&mpi_req) ; if (MPI_ERR == ret) goto LABEL_MPI_SEND_ERROR ; else goto LABEL_MPI_SEND_OK ; /* * Return */ LABEL_MPI_SEND_OK : #if defined(__DEBUG__) L_STACK_MSG_Pop ("MPI_Send,%p,%i,%i,%i,%i,%p,%p", buf, count, datatype, source, tag, comm, mpi_req) ; #endif return (MPI_SUCCESS) ; LABEL_MPI_SEND_ERROR : #if defined(__DEBUG__) L_STACK_MSG_Pop ("MPI_Send,%p,%i,%i,%i,%i,%p,%p", buf, count, datatype, source, tag, comm, mpi_req) ; #endif return (MPI_ERR) ; }
int main (int argc, char **argv) { int nprocs = -1; int rank = -1; char processor_name[128]; int namelen = 128; int buf0[buf_size]; int buf1[buf_size]; MPI_Request aReq[2]; MPI_Status aStatus[2]; MPI_Status status; /* init */ MPI_Init (&argc, &argv); MPI_Comm_size (MPI_COMM_WORLD, &nprocs); MPI_Comm_rank (MPI_COMM_WORLD, &rank); MPI_Get_processor_name (processor_name, &namelen); printf ("(%d) is alive on %s\n", rank, processor_name); fflush (stdout); MPI_Barrier (MPI_COMM_WORLD); if (nprocs < 2) { printf ("not enough tasks\n"); } else { if (rank == 0) { memset (buf0, 0, buf_size); MPI_Send_init (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &aReq[0]); MPI_Recv_init (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &aReq[1]); MPI_Start (&aReq[0]); MPI_Start (&aReq[1]); MPI_Waitall (2, aReq, aStatus); memset (buf0, 1, buf_size); MPI_Startall (2, aReq); MPI_Waitall (2, aReq, aStatus); } else if (rank == 1) { memset (buf1, 1, buf_size); MPI_Recv_init (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &aReq[0]); MPI_Send_init (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &aReq[1]); MPI_Start (&aReq[0]); MPI_Start (&aReq[1]); MPI_Waitall (2, aReq, aStatus); memset (buf1, 0, buf_size); MPI_Startall (2, aReq); MPI_Waitall (2, aReq, aStatus); } } MPI_Barrier (MPI_COMM_WORLD); MPI_Request_free (&aReq[0]); MPI_Request_free (&aReq[1]); MPI_Finalize (); printf ("(%d) Finished normally\n", rank); }
int main(int argc, char **argv) { int numtasks, rank; int rank_dst, ping_side; // Initialise MPI MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numtasks); if (numtasks != 2) { printf("Need 2 processes\n"); MPI_Abort(MPI_COMM_WORLD, 1); exit(1); } ping_side = !(rank & 1); rank_dst = ping_side?(rank | 1) : (rank & ~1); if (ping_side) { int x=42, y; MPI_Request send_request; MPI_Request recv_request; MPI_Send_init(&x, 1, MPI_INT, rank_dst, 1, MPI_COMM_WORLD, &send_request); MPI_Start(&send_request); MPI_Wait(&send_request, MPI_STATUS_IGNORE); MPI_Start(&send_request); MPI_Wait(&send_request, MPI_STATUS_IGNORE); MPI_Recv_init(&y, 1, MPI_INT, rank_dst, 1, MPI_COMM_WORLD, &recv_request); MPI_Start(&recv_request); MPI_Wait(&recv_request, MPI_STATUS_IGNORE); if (y == 42) printf("success\n"); else printf("failure\n"); MPI_Start(&recv_request); MPI_Wait(&recv_request, MPI_STATUS_IGNORE); if (y == 42) printf("success\n"); else printf("failure\n"); } else { int x, y; MPI_Recv(&x, 1, MPI_INT, rank_dst, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(&y, 1, MPI_INT, rank_dst, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Send(&y, 1, MPI_INT, rank_dst, 1, MPI_COMM_WORLD); MPI_Send(&y, 1, MPI_INT, rank_dst, 1, MPI_COMM_WORLD); if (x == 42) printf("success\n"); else printf("failure\n"); if (y == 42) printf("success\n"); else printf("failure\n"); } MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); exit(0); }
int main (int argc, char *argv[]) { MPI_Request reqSR[4], reqRR[4], reqSF[4], reqRF[4]; MPI_Status statRR[4], statRF[4], statSR[4], statSF[4]; MPI_Comm cartcomm; int n_proc, nbrs[4], dims[2], periods[2]={1,1}, reorder=1; int landNS, landWE, err,i; float sumFox, sumRabb, nbrab, nbfox, model[2][3]; double time; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &n_proc); if(rank==0){ time= MPI_Wtime(); printf("N_proc:%d",n_proc); } /**************************************************** ********** CASO DE 1 PROCESSO ****************** ***************************************************/ if (n_proc==1) { echoSingle(); }else{ /**************************************************** **********+++ MULTI PROCESSOS ****************** ***************************************************/ int lado = sqrt(n_proc); dims[0] = lado; dims[1] = lado; if((lado * lado) != n_proc){ if(rank==0) printf("ERRO: Numero incorreto de processos\n"); MPI_Finalize(); exit(0); } MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, reorder, &cartcomm); MPI_Comm_rank(cartcomm, &rank); MPI_Cart_coords(cartcomm, rank, 2, coords); MPI_Cart_shift(cartcomm, 0, 1, &nbrs[UP], &nbrs[DOWN]); MPI_Cart_shift(cartcomm, 1, 1, &nbrs[LEFT], &nbrs[RIGHT]); //Actualizar offsets de cada processo landNS = offsetNS = NS_Size / lado; landWE = offsetWE = WE_Size / lado; if(coords[0] == (lado-1)){ offsetNS += NS_Size % lado; } if(coords[1] == (lado-1)){ offsetWE += WE_Size % lado; } //Buffers para envio e receção de dados float buf_sendFoxN[offsetWE],buf_sendFoxS[offsetWE],buf_sendFoxW[offsetNS],buf_sendFoxE[offsetNS]; float buf_recvFoxN[offsetWE],buf_recvFoxS[offsetWE],buf_recvFoxW[offsetNS],buf_recvFoxE[offsetNS]; float buf_sendRabbitN[offsetWE],buf_sendRabbitS[offsetWE],buf_sendRabbitW[offsetNS],buf_sendRabbitE[offsetNS]; float buf_recvRabbitN[offsetWE],buf_recvRabbitS[offsetWE],buf_recvRabbitW[offsetNS],buf_recvRabbitE[offsetNS]; float Rabbit[offsetNS+2][offsetWE+2]; float Fox[offsetNS+2][offsetWE+2]; /* The next two arrays are used in function Evolve() to compute * the next generation of rabbits and foxes. */ float TRabbit[offsetNS+2][offsetWE+2]; float TFox[offsetNS+2][offsetWE+2]; //Inicialização das comunicações //********* Raposas ************** //Enviar //Cima e baixo MPI_Send_init(&buf_sendFoxN[0], offsetWE, MPI_FLOAT, nbrs[UP], 0, cartcomm, &reqSF[UP]); MPI_Send_init(&buf_sendFoxS[0], offsetWE, MPI_FLOAT, nbrs[DOWN], 0, cartcomm, &reqSF[DOWN]); //Esquerda e direita MPI_Send_init(&buf_sendFoxW[0], offsetNS, MPI_FLOAT, nbrs[LEFT], 0, cartcomm, &reqSF[LEFT]); MPI_Send_init(&buf_sendFoxE[0], offsetNS, MPI_FLOAT, nbrs[RIGHT], 0, cartcomm, &reqSF[RIGHT]); //Receber //Cima e Baixo MPI_Recv_init(&buf_recvFoxS[0], offsetWE, MPI_FLOAT, nbrs[DOWN], 0, cartcomm, &reqRF[DOWN]); MPI_Recv_init(&buf_recvFoxN[0], offsetWE, MPI_FLOAT, nbrs[UP], 0, cartcomm, &reqRF[UP]); //Esquerda e direita MPI_Recv_init(&buf_recvFoxE[0], offsetNS, MPI_FLOAT, nbrs[RIGHT], 0, cartcomm, &reqRF[RIGHT]); MPI_Recv_init(&buf_recvFoxW[0], offsetNS, MPI_FLOAT, nbrs[LEFT], 0, cartcomm, &reqRF[LEFT]); //********* Coelhos *************** //Enviar //Cima e baixo MPI_Send_init(&buf_sendRabbitN[0], offsetWE, MPI_FLOAT, nbrs[UP], 0, cartcomm, &reqSR[UP]); MPI_Send_init(&buf_sendRabbitS[0], offsetWE, MPI_FLOAT, nbrs[DOWN], 0, cartcomm, &reqSR[DOWN]); //Esquerda e direita MPI_Send_init(&buf_sendRabbitW[0], offsetNS, MPI_FLOAT, nbrs[LEFT], 0, cartcomm, &reqSR[LEFT]); MPI_Send_init(&buf_sendRabbitE[0], offsetNS, MPI_FLOAT, nbrs[RIGHT], 0, cartcomm, &reqSR[RIGHT]); //Receber //Cima e Baixo MPI_Recv_init(&buf_recvRabbitS[0], offsetWE, MPI_FLOAT, nbrs[DOWN], 0, cartcomm, &reqRR[DOWN]); MPI_Recv_init(&buf_recvRabbitN[0], offsetWE, MPI_FLOAT, nbrs[UP], 0, cartcomm, &reqRR[UP]); //Esquerda e direita MPI_Recv_init(&buf_recvRabbitE[0], offsetNS, MPI_FLOAT, nbrs[RIGHT], 0, cartcomm, &reqRR[RIGHT]); MPI_Recv_init(&buf_recvRabbitW[0], offsetNS, MPI_FLOAT, nbrs[LEFT], 0, cartcomm, &reqRR [LEFT]); /* Initialise the problem. */ err = SetLand(Rabbit,Fox,model,landNS, landWE); // Iterate. for( k=1; k<=NITER; k++) { /****************************************************** **** Começa comunicação de actualização ******** ******************************************************/ //************** Envios ***************/ //Raposas //Cima e baixo for(i=1; i <= offsetWE; i++) buf_sendFoxN[i-1] = Fox[1][i]; MPI_Start(&reqSF[UP]); for(i=1; i <= offsetWE; i++) buf_sendFoxS[i-1] = Fox[offsetNS][i]; MPI_Start(&reqSF[DOWN]); //Esquerda e direita for(i=1; i <= offsetNS; i++) buf_sendFoxW[i-1] = Fox[i][1]; MPI_Start(&reqSF[LEFT]); for(i=1; i <= offsetNS; i++) buf_sendFoxE[i-1] = Fox[i][offsetWE]; MPI_Start(&reqSF[RIGHT]); //Coelhos //Cima e baixo for(i=1; i <= offsetWE; i++) buf_sendRabbitN[i-1] = Rabbit[1][i]; MPI_Start(&reqSR[UP]); for(i=1; i <= offsetWE; i++) buf_sendRabbitS[i-1] = Rabbit[offsetNS][i]; MPI_Start(&reqSR[DOWN]); //Esquerda e direita for(i=1; i <= offsetNS; i++) buf_sendRabbitW[i-1] = Rabbit[i][1]; MPI_Start(&reqSR[LEFT]); for(i=1; i <= offsetNS; i++) buf_sendRabbitE[i-1] = Rabbit[i][offsetWE]; MPI_Start(&reqSR[RIGHT]); //************** Recepção ***************/ //Raposas //Cima e baixo MPI_Start(&reqRF[DOWN]); MPI_Start(&reqRF[UP]); //Esquerda e direita MPI_Start(&reqRF[RIGHT]); MPI_Start(&reqRF[LEFT]); //Coelhos //Cima e baixo MPI_Start(&reqRR[DOWN]); MPI_Start(&reqRR[UP]); //Esquerda e direita MPI_Start(&reqRR[RIGHT]); MPI_Start(&reqRR[LEFT]); //Esperar pelos Receives e aplicar alterações nos quadros //Raposas MPI_Waitall(4, reqRR , statRR); for(i=1; i <= offsetWE; i++) Fox[offsetNS+1][i] = buf_recvFoxS[i-1]; for(i=1; i <= offsetWE; i++) Fox[0][i] = buf_recvFoxN[i-1]; for(i=1; i <= offsetNS; i++) Fox[i][offsetWE+1] = buf_recvFoxE[i-1]; for(i=1; i <= offsetNS; i++) Fox[i][0] = buf_recvFoxW[i-1]; //Coelhos MPI_Waitall(4, reqRF, statRF); for(i=1; i <= offsetWE; i++) Rabbit[offsetNS+1][i] = buf_recvRabbitS[i-1]; for(i=1; i <= offsetWE; i++) Rabbit[0][i] = buf_recvRabbitN[i-1]; for(i=1; i <= offsetNS; i++) Rabbit[i][offsetWE+1] = buf_recvRabbitE[i-1]; for(i=1; i <= offsetNS; i++) Rabbit[i][0] = buf_recvRabbitW[i-1]; /****************************************************** **** Termina comunicação de actualização ******** ******************************************************/ err = Evolve(Rabbit,Fox,TRabbit,TFox,model); if( !(k%PERIOD) ) { err = GetPopulation(Rabbit,&nbrab); err = GetPopulation(Fox,&nbfox); MPI_Reduce(&nbrab, &sumRabb, 1, MPI_FLOAT, MPI_SUM, 0, cartcomm); MPI_Reduce(&nbfox, &sumFox, 1, MPI_FLOAT, MPI_SUM, 0, cartcomm); //if(rank==0) // printf("Year %d: %.0f rabbits and %.0f foxes\n", k, sumRabb, sumFox); } //Esperar que os Sends estejam concluidos para ter a certeza que que já podemos mexer nos buffers //(Não creio de que 100% obrigatório) MPI_Waitall(4, reqSR , statSR); MPI_Waitall(4, reqSF , statSF); } if(rank==0) printf("Year %d: %.0f rabbits and %.0f foxes\n", k, sumRabb, sumFox); } if(rank==0) printf("Time: %f\n",MPI_Wtime()-time); MPI_Finalize(); return 0; }
int main (int argc, char *argv[]) { int ierr; int rank; static char buffer[80]; MPI_Request req = MPI_REQUEST_NULL; MPI_Status status, status2; #ifdef V_T double ts; int messageframe; #endif ierr = MPI_Init(&argc,&argv); #ifdef V_T ts = VT_timestamp(); #endif /* this used to be buggy... */ MPI_Wait( &req, &status ); ierr = MPI_Barrier(MPI_COMM_WORLD); test_pair(); MPI_Comm_rank ( MPI_COMM_WORLD, &rank ); if ( getenv ("VT_ABORT_BEFORE_FINALIZE") ) { if ( atoi ( getenv ("VT_ABORT_BEFORE_FINALIZE") ) < 2 ) MPI_Abort( MPI_COMM_WORLD, 10 ); if ( !rank ) { *((char *)NULL) = 0; } else { MPI_Barrier ( MPI_COMM_WORLD ); } } /* test some other aspects of message transfer: persistent send with MPI_PROC_NULL */ MPI_Send_init( &ierr, 1, MPI_INT, MPI_PROC_NULL, 100, MPI_COMM_WORLD, &req ); MPI_Start( &req ); MPI_Wait( &req, &status ); MPI_Start( &req ); MPI_Wait( &req, &status ); MPI_Request_free( &req ); /* persistent receive with MPI_PROC_NULL */ MPI_Recv_init( &ierr, 1, MPI_INT, MPI_PROC_NULL, 100, MPI_COMM_WORLD, &req ); MPI_Start( &req ); MPI_Wait( &req, &status ); MPI_Start( &req ); MPI_Wait( &req, &status ); MPI_Request_free( &req ); /* real reuse of persistent communication */ if( rank & 1 ) { MPI_Recv_init( buffer, sizeof(buffer), MPI_CHAR, rank^1, 101, MPI_COMM_WORLD, &req ); } else { MPI_Send_init( buffer, sizeof(buffer), MPI_CHAR, rank^1, 101, MPI_COMM_WORLD, &req ); } MPI_Start( &req ); MPI_Wait( &req, &status ); MPI_Start( &req ); MPI_Wait( &req, &status ); MPI_Request_free( &req ); /* send to MPI_PROC_NULL */ MPI_Send( buffer, sizeof(buffer), MPI_CHAR, MPI_PROC_NULL, 103, MPI_COMM_WORLD ); /* cancelled receive */ MPI_Irecv( buffer, sizeof(buffer), MPI_CHAR, rank^1, 105, MPI_COMM_WORLD, &req ); MPI_Cancel( &req ); MPI_Wait( &req, &status2 ); #ifdef V_T printf( "Time: %f\n", VT_timestamp()-ts ); #endif ierr = MPI_Finalize(); return ierr; }
void mpi_start_(int* request, int* ierr) { MPI_Request req = find_request(*request); *ierr = MPI_Start(&req); }
static void test_pair (void) { int prev, next, count, tag, index, i, outcount, indices[2]; int rank, size, flag, ierr, reqcount; double send_buf[TEST_SIZE], recv_buf[TEST_SIZE]; double buffered_send_buf[TEST_SIZE * 2 + MPI_BSEND_OVERHEAD]; /* factor of two is based on guessing - only dynamic allocation would be safe */ void *buffer; MPI_Status statuses[2]; MPI_Status status; MPI_Request requests[2]; MPI_Comm dupcom, intercom; #ifdef V_T struct _VT_FuncFrameHandle { char *name; int func; int frame; }; typedef struct _VT_FuncFrameHandle VT_FuncFrameHandle_t; VT_FuncFrameHandle_t normal_sends, buffered_sends, buffered_persistent_sends, ready_sends, sync_sends, nblock_sends, nblock_rsends, nblock_ssends, pers_sends, pers_rsends, pers_ssends, sendrecv, sendrecv_repl, intercomm; int classid; VT_classdef( "Application:test_pair", &classid ); #define VT_REGION_DEF( _name, _nameframe, _class ) \ (_nameframe).name=_name; \ VT_funcdef( (_nameframe).name, _class, &((_nameframe).func) ); #define VT_BEGIN_REGION( _nameframe ) \ LOCDEF(); \ VT_begin( (_nameframe).func ) #define VT_END_REGION( _nameframe ) \ LOCDEF(); VT_end( (_nameframe).func ) #else #define VT_REGION_DEF( _name, _nameframe, _class ) #define VT_BEGIN_REGION( _nameframe ) #define VT_END_REGION( _nameframe ) #endif ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); ierr = MPI_Comm_size(MPI_COMM_WORLD, &size); if ( size < 2 ) { if ( rank == 0 ) { printf("Program needs to be run on at least 2 processes.\n"); } ierr = MPI_Abort( MPI_COMM_WORLD, 66 ); } ierr = MPI_Comm_dup(MPI_COMM_WORLD, &dupcom); if ( rank >= 2 ) { /* printf( "%d Calling finalize.\n", rank ); */ ierr = MPI_Finalize( ); exit(0); } next = rank + 1; if (next >= 2) next = 0; prev = rank - 1; if (prev < 0) prev = 1; VT_REGION_DEF( "Normal_Sends", normal_sends, classid ); VT_REGION_DEF( "Buffered_Sends", buffered_sends, classid ); VT_REGION_DEF( "Buffered_Persistent_Sends", buffered_persistent_sends, classid ); VT_REGION_DEF( "Ready_Sends", ready_sends, classid ); VT_REGION_DEF( "Sync_Sends", sync_sends, classid ); VT_REGION_DEF( "nblock_Sends", nblock_sends, classid ); VT_REGION_DEF( "nblock_RSends", nblock_rsends, classid ); VT_REGION_DEF( "nblock_SSends", nblock_ssends, classid ); VT_REGION_DEF( "Pers_Sends", pers_sends, classid ); VT_REGION_DEF( "Pers_RSends", pers_rsends, classid ); VT_REGION_DEF( "Pers_SSends", pers_ssends, classid ); VT_REGION_DEF( "SendRecv", sendrecv, classid ); VT_REGION_DEF( "SendRevc_Repl", sendrecv_repl, classid ); VT_REGION_DEF( "InterComm", intercomm, classid ); /* * Normal sends */ VT_BEGIN_REGION( normal_sends ); if (rank == 0) printf ("Send\n"); tag = 0x100; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Send(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv"); } else { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( normal_sends ); /* * Buffered sends */ VT_BEGIN_REGION( buffered_sends ); if (rank == 0) printf ("Buffered Send\n"); tag = 138; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf)); MPI_Bsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); MPI_Buffer_detach(&buffer, &size); if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) { printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf)); MPI_Abort(MPI_COMM_WORLD, 201); } MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv"); } else { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( buffered_sends ); /* * Buffered sends */ VT_BEGIN_REGION( buffered_persistent_sends ); if (rank == 0) printf ("Buffered Persistent Send\n"); tag = 238; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf)); MPI_Bsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Start(requests); MPI_Wait(requests, statuses); MPI_Request_free(requests); MPI_Buffer_detach(&buffer, &size); if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) { printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf)); MPI_Abort(MPI_COMM_WORLD, 201); } MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv"); } else { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( buffered_persistent_sends ); /* * Ready sends. Note that we must insure that the receive is posted * before the rsend; this requires using Irecv. */ VT_BEGIN_REGION( ready_sends ); if (rank == 0) printf ("Rsend\n"); tag = 1456; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Recv(MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD, &status); MPI_Rsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); MPI_Probe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status); if (status.MPI_SOURCE != prev) printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE); if (status.MPI_TAG != tag) printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG); MPI_Get_count(&status, MPI_DOUBLE, &i); if (i != count) printf ("Incorrect count, expected %d, got %d\n",count,i); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "rsend and recv"); } else { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); MPI_Send( MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD); MPI_Wait(requests, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "rsend and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( ready_sends ); /* * Synchronous sends */ VT_BEGIN_REGION( sync_sends ); if (rank == 0) printf ("Ssend\n"); tag = 1789; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status); if (flag) printf ("Iprobe succeeded! source %d, tag %d\n",status.MPI_SOURCE, status.MPI_TAG); MPI_Ssend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); while (!flag) MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status); if (status.MPI_SOURCE != prev) printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE); if (status.MPI_TAG != tag) printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG); MPI_Get_count(&status, MPI_DOUBLE, &i); if (i != count) printf ("Incorrect count, expected %d, got %d\n",count,i); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Ssend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( sync_sends ); /* * Nonblocking normal sends */ VT_BEGIN_REGION( nblock_sends ); if (rank == 0) printf ("Isend\n"); tag = 2123; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); init_test_data(send_buf,TEST_SIZE,0); MPI_Isend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); MPI_Waitall(2, requests, statuses); rq_check( requests, 2, "isend and irecv" ); msg_check(recv_buf,prev,tag,count,statuses, TEST_SIZE,"isend and irecv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"isend and irecv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Isend(recv_buf, count, MPI_DOUBLE, next, tag,MPI_COMM_WORLD, (requests)); MPI_Wait((requests), &status); rq_check(requests, 1, "isend (and recv)"); } VT_END_REGION( nblock_sends ); /* * Nonblocking ready sends */ VT_BEGIN_REGION( nblock_rsends ); if (rank == 0) printf ("Irsend\n"); tag = 2456; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); init_test_data(send_buf,TEST_SIZE,0); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0, MPI_BOTTOM, 0, MPI_INT, next, 0, dupcom, &status); MPI_Irsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); reqcount = 0; while (reqcount != 2) { MPI_Waitany( 2, requests, &index, statuses); if( index == 0 ) { memcpy( &status, statuses, sizeof(status) ); } reqcount++; } rq_check( requests, 1, "irsend and irecv"); msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"irsend and irecv"); } else { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0, MPI_BOTTOM, 0, MPI_INT, next, 0, dupcom, &status); flag = 0; while (!flag) MPI_Test(requests, &flag, &status); rq_check( requests, 1, "irsend and irecv (test)"); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "irsend and irecv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Irsend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Waitall(1, requests, statuses); rq_check( requests, 1, "irsend and irecv"); } VT_END_REGION( nblock_rsends ); /* * Nonblocking synchronous sends */ VT_BEGIN_REGION( nblock_ssends ); if (rank == 0) printf ("Issend\n"); tag = 2789; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests ); init_test_data(send_buf,TEST_SIZE,0); MPI_Issend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); flag = 0; while (!flag) MPI_Testall(2, requests, &flag, statuses); rq_check( requests, 2, "issend and irecv (testall)"); msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "issend and recv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "issend and recv"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Issend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,requests); flag = 0; while (!flag) MPI_Testany(1, requests, &index, &flag, statuses); rq_check( requests, 1, "issend and recv (testany)"); } VT_END_REGION( nblock_ssends ); /* * Persistent normal sends */ VT_BEGIN_REGION( pers_sends ); if (rank == 0) printf ("Send_init\n"); tag = 3123; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); MPI_Send_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1)); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Startall(2, requests); MPI_Waitall(2, requests, statuses); msg_check( recv_buf, prev, tag, count, (statuses+1), TEST_SIZE, "persistent send/recv"); } else { MPI_Start((requests+1)); MPI_Wait((requests+1), &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "persistent send/recv"); init_test_data(send_buf,TEST_SIZE,1); MPI_Start(requests); MPI_Wait(requests, &status); } MPI_Request_free(requests); MPI_Request_free((requests+1)); VT_END_REGION( pers_sends ); /* * Persistent ready sends */ VT_BEGIN_REGION( pers_rsends ); if (rank == 0) printf ("Rsend_init\n"); tag = 3456; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); MPI_Rsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests); MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1)); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Barrier( MPI_COMM_WORLD ); MPI_Startall(2, requests); reqcount = 0; while (reqcount != 2) { MPI_Waitsome(2, requests, &outcount, indices, statuses); for (i=0; i<outcount; i++) { if (indices[i] == 1) { msg_check( recv_buf, prev, tag, count, (statuses+i), TEST_SIZE, "waitsome"); } reqcount++; } } } else { MPI_Start((requests+1)); MPI_Barrier( MPI_COMM_WORLD ); flag = 0; while (!flag) MPI_Test((requests+1), &flag, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "test"); init_test_data(send_buf,TEST_SIZE,1); MPI_Start(requests); MPI_Wait(requests, &status); } MPI_Request_free(requests); MPI_Request_free((requests+1)); VT_END_REGION( pers_rsends ); /* * Persistent synchronous sends */ VT_BEGIN_REGION( pers_ssends ); if (rank == 0) printf ("Ssend_init\n"); tag = 3789; count = TEST_SIZE / 3; clear_test_data(recv_buf,TEST_SIZE); MPI_Ssend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, (requests+1)); MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, requests); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Startall(2, requests); reqcount = 0; while (reqcount != 2) { MPI_Testsome(2, requests, &outcount, indices, statuses); for (i=0; i<outcount; i++) { if (indices[i] == 0) { msg_check( recv_buf, prev, tag, count, (statuses+i), TEST_SIZE, "testsome"); } reqcount++; } } } else { MPI_Start(requests); flag = 0; while (!flag) MPI_Testany(1, requests, &index, &flag, statuses); msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "testany" ); init_test_data(send_buf,TEST_SIZE,1); MPI_Start((requests+1)); MPI_Wait((requests+1), &status); } MPI_Request_free(requests); MPI_Request_free((requests+1)); VT_END_REGION( pers_ssends ); /* * Send/receive. */ VT_BEGIN_REGION( sendrecv ); if (rank == 0) printf ("Sendrecv\n"); tag = 4123; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); MPI_Sendrecv(send_buf, count, MPI_DOUBLE, next, tag, recv_buf, count, MPI_DOUBLE, prev, tag, MPI_COMM_WORLD, &status ); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "sendrecv"); } else { MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "recv/send"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( sendrecv ); #ifdef V_T VT_flush(); #endif /* * Send/receive replace. */ VT_BEGIN_REGION( sendrecv_repl ); if (rank == 0) printf ("Sendrecv_replace\n"); tag = 4456; count = TEST_SIZE / 3; if (rank == 0) { init_test_data(recv_buf, TEST_SIZE,0); for (i=count; i< TEST_SIZE; i++) recv_buf[i] = 0.0; MPI_Sendrecv_replace(recv_buf, count, MPI_DOUBLE, next, tag, prev, tag, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "sendrecvreplace"); } else { clear_test_data(recv_buf,TEST_SIZE); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "recv/send for replace"); init_test_data(recv_buf,TEST_SIZE,1); MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD); } VT_END_REGION( sendrecv_repl ); /* * Send/Receive via inter-communicator */ VT_BEGIN_REGION( intercomm ); MPI_Intercomm_create(MPI_COMM_SELF, 0, MPI_COMM_WORLD, next, 1, &intercom); if (rank == 0) printf ("Send via inter-communicator\n"); tag = 4018; count = TEST_SIZE / 5; clear_test_data(recv_buf,TEST_SIZE); if (rank == 0) { init_test_data(send_buf,TEST_SIZE,0); LOCDEF(); MPI_Send(send_buf, count, MPI_DOUBLE, 0, tag, intercom); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, intercom, &status); msg_check(recv_buf, 0, tag, count, &status, TEST_SIZE, "send and recv via inter-communicator"); } else if (rank == 1) { LOCDEF(); MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG, intercom, &status); msg_check( recv_buf, 0, tag, count, &status, TEST_SIZE,"send and recv via inter-communicator"); init_test_data(recv_buf,TEST_SIZE,0); MPI_Send(recv_buf, count, MPI_DOUBLE, 0, tag, intercom); } VT_END_REGION( normal_sends ); MPI_Comm_free(&intercom); MPI_Comm_free(&dupcom); }
int main(int argc, char * argv[]) { // initialize MPI int i, rank, size, rec; int arr[100]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // populate a sample array, // i.e. simulated input if(rank == 0) { printf("\r\n\r\nHello from r00t. Starting\r\n\r\n"); } else { int i; for(i=0;i<=100;i++) { srand(time(NULL) * rank * i); arr[i] = rand() % 100; } } MPI_Request sreq, rreq; MPI_Status status; if(rank > 0) { int source, dest; if(rank == 1) { source = size - 1; } else { source = rank - 1; } if(rank == size - 1) { dest = 1; } else { dest = rank + 1; } // create a persistent send and a persistent recieve request MPI_Send_init(&arr[rank], 1, MPI_INT, dest, NULL, MPI_COMM_WORLD, &sreq); MPI_Recv_init(&rec, 1, MPI_INT, source, NULL, MPI_COMM_WORLD, &rreq); // once created we can use them over and over again... for(i=0; i<100; i++) { MPI_Start(&rreq); MPI_Start(&sreq); MPI_Wait(&rreq, &status); printf("My rank is %d and I received %d from %d\n", rank, rec, source); MPI_Wait(&sreq, &status); } MPI_Cancel(&rreq); MPI_Cancel(&sreq); } }
int main( int argc, char **argv ) { MPI_Request r1; int size, rank; int err = 0; int partner, buf[10], flag, idx, index; MPI_Status status; MPI_Init( &argc, &argv ); MPI_Comm_size( MPI_COMM_WORLD, &size ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (size < 2) { printf( "Cancel test requires at least 2 processes\n" ); MPI_Abort( MPI_COMM_WORLD, 1 ); } /* * Here is the test. First, we ensure an unsatisfied Irecv: * process 0 process size-1 * Sendrecv Sendrecv * Irecv ---- * Cancel ---- * Sendrecv Sendrecv * Next, we confirm receipt before canceling * Irecv Send * Sendrecv Sendrecv * Cancel */ if (rank == 0) { partner = size - 1; /* Cancel succeeds for wait/waitall */ MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Wait( &r1, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a receive failed where it should succeed (Wait).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for test/testall */ buf[0] = -1; MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Test( &r1, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a receive succeeded where it shouldn't (Test).\n" ); if (buf[0] != -1) { printf( "Receive buffer changed even though cancel suceeded! (Test).\n" ); } } MPI_Request_free( &r1 ); /* Cancel succeeds for waitany */ MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitany( 1, &r1, &idx, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a receive failed where it should succeed (Waitany).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testany */ buf[0] = -1; MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testany( 1, &r1, &idx, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a receive succeeded where it shouldn't (Testany).\n" ); if (buf[0] != -1) { printf( "Receive buffer changed even though cancel suceeded! (Test).\n" ); } } MPI_Request_free( &r1 ); /* Cancel succeeds for waitsome */ MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Waitsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); if (!flag) { err++; printf( "Cancel of a receive failed where it should succeed (Waitsome).\n" ); } MPI_Request_free( &r1 ); /* Cancel fails for testsome*/ buf[0] = -1; MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Cancel( &r1 ); MPI_Testsome( 1, &r1, &idx, &index, &status ); MPI_Test_cancelled( &status, &flag ); if (flag) { err++; printf( "Cancel of a receive succeeded where it shouldn't (Testsome).\n" ); if (buf[0] != -1) { printf( "Receive buffer changed even though cancel suceeded! (Testsome).\n" ); } } MPI_Request_free( &r1 ); if (err) { printf( "Test failed with %d errors.\n", err ); } else { printf( " No Errors\n" ); } } else if (rank == size - 1) { partner = 0; /* Cancel succeeds for wait/waitall */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for test/testall */ buf[0] = 3; MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel succeeds for waitany */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for testany */ MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel succeeds for waitsome */ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Cancel fails for waitsome */ MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_BOTTOM, 0, MPI_INT, partner, 1, MPI_COMM_WORLD, &status ); /* Next test - check that a cancel for a request receive from MPI_PROC_NULL succeeds (there is some suspicion that some systems can't handle this - also, MPI_REQUEST_NULL */ /* A null request is an error. (null objects are errors unless otherwise allowed) r1 = MPI_REQUEST_NULL; MPI_Cancel( &r1 ); */ MPI_Recv_init( buf, 10, MPI_INT, MPI_PROC_NULL, 0, MPI_COMM_WORLD, &r1 ); MPI_Start( &r1 ); MPI_Cancel( &r1 ); MPI_Request_free( &r1 ); /* Must complete cancel. We know that it won't complete, so we don't need to do anything else */ } MPI_Finalize(); return 0; }