int main(int argc, char **argv) { char *buf; char err[200]; int i, iam; MPI_Init(&argc, &argv); Test_Init_No_File(); MPI_Barrier(MPI_COMM_WORLD); buf = (char *)malloc(32 * 1024); MPI_Comm_rank(MPI_COMM_WORLD, &iam); for(i = 1; i <= 32; i++) { if (iam == 0) { *buf = i; #ifdef VERBOSE printf("Broadcasting %d bytes\n", i * 64); #endif } MPI_Bcast(buf, i * 64, MPI_BYTE, 0, MPI_COMM_WORLD); if (*buf != i) { sprintf(err, "Broadcast of %d bytes", i * 64); Test_Failed(err); } /* gsync(); */ MPI_Barrier(MPI_COMM_WORLD); } Test_Waitforall(); Test_Global_Summary(); MPI_Finalize(); free(buf); return 0; }
int main( int argc, char **argv ) { char *buf; int rank, size, i; MPI_Request req[10]; MPI_Status stat[10]; MPI_Status status; buf = (char *)malloc(32*1024); MPI_Init(&argc, &argv); MPI_Comm_rank ( MPI_COMM_WORLD, &rank ); MPI_Comm_size ( MPI_COMM_WORLD, &size ); if (size > 10) return 1; if (rank == 0) { for ( i = 1; i < size; i++ ) MPI_Isend(buf,1024,MPI_BYTE,i,0,MPI_COMM_WORLD,&req[i]); MPI_Waitall(size-1, &req[1], &stat[1]); /* Core dumps here! */ } else MPI_Recv(buf,1024,MPI_BYTE,0,0,MPI_COMM_WORLD,&status); Test_Waitforall( ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { MPI_Init( &argc, &argv ); test_communicators(); Test_Waitforall( ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int rank, size, i; MPI_Group group1, group2, group3, groupall, groupunion, newgroup; MPI_Comm newcomm; int ranks1[100], ranks2[100], ranks3[100]; int nranks1=0, nranks2=0, nranks3=0; MPI_Init( &argc, &argv ); MPI_Barrier( MPI_COMM_WORLD ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); MPI_Comm_group( MPI_COMM_WORLD, &groupall ); /* Divide groups */ for (i=0; i<size; i++) if ( (i%3)==0 ) ranks1[nranks1++] = i; else if ( (i%3)==1 ) ranks2[nranks2++] = i; else ranks3[nranks3++] = i; MPI_Group_incl ( groupall, nranks1, ranks1, &group1 ); MPI_Group_incl ( groupall, nranks2, ranks2, &group2 ); MPI_Group_incl ( groupall, nranks3, ranks3, &group3 ); MPI_Group_difference ( groupall, group2, &groupunion ); MPI_Comm_create ( MPI_COMM_WORLD, group3, &newcomm ); newgroup = MPI_GROUP_NULL; if (newcomm != MPI_COMM_NULL) { /* If we don't belong to group3, this would fail */ MPI_Comm_group ( newcomm, &newgroup ); } /* Free the groups */ MPI_Group_free( &groupall ); MPI_Group_free( &group1 ); MPI_Group_free( &group2 ); MPI_Group_free( &group3 ); MPI_Group_free( &groupunion ); if (newgroup != MPI_GROUP_NULL) { MPI_Group_free( &newgroup ); } /* Free the communicator */ if (newcomm != MPI_COMM_NULL) MPI_Comm_free( &newcomm ); Test_Waitforall( ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int rank, size, i; int *table; int errors=0; MPI_Aint address; MPI_Datatype type, newtype; int lens; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); /* Make data table */ table = (int *) calloc (size, sizeof(int)); table[rank] = rank + 1; MPI_Barrier ( MPI_COMM_WORLD ); /* Broadcast the data */ for ( i=0; i<size; i++ ) MPI_Bcast( &table[i], 1, MPI_INT, i, MPI_COMM_WORLD ); /* See if we have the correct answers */ for ( i=0; i<size; i++ ) if (table[i] != i+1) errors++; MPI_Barrier ( MPI_COMM_WORLD ); /* Try the same thing, but with a derived datatype */ for ( i=0; i<size; i++ ) table[i] = 0; table[rank] = rank + 1; for ( i=0; i<size; i++ ) { //MPI_Address( &table[i], &address ); address=0; type = MPI_INT; lens = 1; MPI_Type_struct( 1, &lens, &address, &type, &newtype ); MPI_Type_commit( &newtype ); MPI_Bcast( &table[i], 1, newtype, i, MPI_COMM_WORLD ); MPI_Type_free( &newtype ); } /* See if we have the correct answers */ for ( i=0; i<size; i++ ) if (table[i] != i+1) errors++; MPI_Barrier ( MPI_COMM_WORLD ); Test_Waitforall( ); MPI_Finalize(); if (errors) printf( "[%d] done with ERRORS!\n", rank ); return errors; }
int main(int argc, char **argv) { int rank, size, i, j; int **table; int *row; int errors = 0; int *displs; int *send_counts; int recv_count; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); table = (int**) malloc(size * sizeof(int*)); table[0] = (int*) malloc(size * size * sizeof(int)); for(i = 1; i < size; i++) table[i] = table[i - 1] + size; row = (int*) malloc(size * sizeof(int)); displs = (int*) malloc(size * sizeof(int)); send_counts = (int*) malloc(size * sizeof(int)); recv_count = size; /* If I'm the root (process 0), then fill out the big table and setup * send_counts and displs arrays */ if (rank == 0) { for (i = 0; i < size; i++) { send_counts[i] = recv_count; displs[i] = i * size; for (j = 0; j < size; j++) table[i][j] = i + j; } } /* Scatter the big table to everybody's little table */ MPI_Scatterv(&table[0][0], send_counts, displs, MPI_INT, &row[0] , recv_count, MPI_INT, 0, MPI_COMM_WORLD); /* Now see if our row looks right */ for (i = 0; i < size; i++) { if (row[i] != i + rank) Test_Failed(NULL); } Test_Waitforall(); errors = Test_Global_Summary(); MPI_Finalize(); free(displs); free(row); free(table[0]); free(table); return errors; }
int main(int argc, char **argv) { int rank, size, i; MPI_Group groupall, groupunion, newgroup, group[GROUPS]; MPI_Comm newcomm; int ranks[GROUPS][100]; int nranks[GROUPS] = { 0, 0, 0 }; MPI_Init(&argc, &argv); MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_group(MPI_COMM_WORLD, &groupall); /* Divide groups */ for (i = 0; i < size; i++) ranks[i % GROUPS][nranks[i % GROUPS]++] = i; for (i = 0; i < GROUPS; i++) MPI_Group_incl(groupall, nranks[i], ranks[i], &group[i]); MPI_Group_difference(groupall, group[1], &groupunion); MPI_Comm_create(MPI_COMM_WORLD, group[2], &newcomm); newgroup = MPI_GROUP_NULL; if (newcomm != MPI_COMM_NULL) { /* If we don't belong to group[2], this would fail */ MPI_Comm_group(newcomm, &newgroup); } /* Free the groups */ MPI_Group_free(&groupall); for (i = 0; i < GROUPS; i++) MPI_Group_free(&group[i]); MPI_Group_free(&groupunion); if (newgroup != MPI_GROUP_NULL) { MPI_Group_free(&newgroup); } /* Free the communicator */ if (newcomm != MPI_COMM_NULL) MPI_Comm_free(&newcomm); Test_Waitforall(); Test_Global_Summary(); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int data, to, from, tag, maxlen, np, myid, src, dest; MPI_Status status; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &myid ); MPI_Comm_size( MPI_COMM_WORLD, &np ); /* dest writes out the received stats; for the output to be consistant (with the final check), it should be procees 0 */ if (argc > 1 && argv[1] && strcmp( "-alt", argv[1] ) == 0) { dest = np - 1; src = 0; } else { src = np - 1; dest = 0; } if (myid == src) { to = dest; tag = 2000; #ifdef VERBOSE printf( "About to send\n" ); #endif MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); } if (myid == dest) { tag = 2000; from = MPI_ANY_SOURCE; MPI_Probe( from, tag, MPI_COMM_WORLD, &status ); MPI_Get_count( &status, MPI_INT, &maxlen ); /* Here I'd normally allocate space; I'll just check that it is ok */ if (maxlen > 1) printf( "Error; size = %d\n", maxlen ); #ifdef VERBOSE printf( "About to receive\n" ); #endif MPI_Recv( &data, 1, MPI_INT, status.MPI_SOURCE, status.MPI_TAG, MPI_COMM_WORLD, &status ); } MPI_Barrier( MPI_COMM_WORLD ); Test_Waitforall( ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int rank, size, i,j; int table[MAX_PROCESSES][MAX_PROCESSES]; int row[MAX_PROCESSES]; int errors=0; int participants; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); /* A maximum of MAX_PROCESSES processes can participate */ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; else participants = size; if ( (rank < participants) ) { int send_count = MAX_PROCESSES; int recv_count = MAX_PROCESSES; /* If I'm the root (process 0), then fill out the big table */ if (rank == 0) for ( i=0; i<participants; i++) for ( j=0; j<MAX_PROCESSES; j++ ) table[i][j] = i+j; /* Scatter the big table to everybody's little table */ MPI_Scatter(&table[0][0], send_count, MPI_INT, &row[0] , recv_count, MPI_INT, 0, MPI_COMM_WORLD); /* Now see if our row looks right */ for (i=0; i<MAX_PROCESSES; i++) if ( row[i] != i+rank ) errors++; } Test_Waitforall( ); MPI_Finalize(); if (errors) printf( "[%d] done with ERRORS(%d)!\n", rank, errors ); return errors; }
int main( int argc, char **argv) { char *buf; int i, iam; MPI_Init(&argc, &argv); MPI_Barrier(MPI_COMM_WORLD); buf = (char *)malloc(32*1024); MPI_Comm_rank(MPI_COMM_WORLD, &iam); for(i=1; i<=32; i++){ if (iam == 0){ *buf=i; printf("Broadcasting %d bytes\n", i*64); } MPI_Bcast(buf, i*64, MPI_BYTE, 0, MPI_COMM_WORLD); if (*buf != i) printf("Sanity check error on node %d\n", iam); /* gsync(); */ MPI_Barrier(MPI_COMM_WORLD); } Test_Waitforall( ); MPI_Finalize(); return 0; }
int main(int argc, char **argv) { int rank, size, i, j; int table[MAX_PROCESSES][MAX_PROCESSES]; int errors = 0; int block_size, begin_row, end_row, send_count, recv_count; DBM("calling MPI_Init\n"); MPI_Init(&argc, &argv); DBM("calling MPI_Comm_rank\n"); MPI_Comm_rank(MPI_COMM_WORLD, &rank); DBM("calling MPI_Comm_size\n"); MPI_Comm_size(MPI_COMM_WORLD, &size); for (i = 0; i < MAX_PROCESSES; i++) for (j = 0; j < MAX_PROCESSES; j++) table[i][j]=-1; /* Determine what rows are my responsibility */ block_size = MAX_PROCESSES / size; begin_row = rank * block_size; end_row = (rank + 1) * block_size; send_count = block_size * MAX_PROCESSES; recv_count = send_count; /* A maximum of MAX_PROCESSES processes can participate */ if (size > MAX_PROCESSES) { fprintf(stderr, "Number of processors is maximal %d\n", MAX_PROCESSES); DBM("calling MPI_Abort\n"); MPI_Abort(MPI_COMM_WORLD, 1); } /* Paint my rows my color */ for (i = begin_row; i < end_row ;i++) for (j = 0; j < MAX_PROCESSES; j++) table[i][j] = rank + 10; #ifdef PRINTTABLE /* print table */ printf("\n"); for (i = 0; i < MAX_PROCESSES; i++) { for (j = 0; j < MAX_PROCESSES; j++) printf("%4d", table[i][j]); printf("\n"); } #endif DBM("starting algather\n"); /* Everybody gets the gathered table */ MPI_Allgather(&table[begin_row][0], send_count, MPI_INT, &table[0][0], recv_count, MPI_INT, MPI_COMM_WORLD); /* Everybody should have the same table now. * This test does not in any way guarantee there are no errors. * Print out a table or devise a smart test to make sure it's correct */ #ifdef PRINTTABLE /* print table */ printf("\n"); for (i = 0; i < MAX_PROCESSES; i++) { for (j = 0; j < MAX_PROCESSES; j++) printf("%4d", table[i][j]); printf("\n"); } #endif for (i = 0; i < MAX_PROCESSES; i++) { if (table[i][0] - table[i][MAX_PROCESSES - 1] !=0) { Test_Failed(NULL); printf("error at i=%d\n", i); } } Test_Waitforall(); errors = Test_Global_Summary(); MPI_Finalize(); return errors; }
int main(int argc, char *argv[]) { int rank, nprocs, A[SIZE2], B[SIZE2], i; MPI_Win win; MPI_Init(&argc,&argv); Test_Init_No_File(); MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_rank(MPI_COMM_WORLD,&rank); if (nprocs != 2) { printf("Run this program with 2 processes\n"); MPI_Abort(MPI_COMM_WORLD,1); } if (rank == 0) { for (i = 0; i < SIZE2; i++) A[i] = B[i] = i; MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win); for (i = 0; i < SIZE1; i++) { MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win); MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win); MPI_Win_unlock(1, win); } for (i = 0; i < SIZE1; i++) { MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win); MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win); MPI_Win_unlock(1, win); } MPI_Win_free(&win); for (i = 0; i < SIZE1; i++) if (B[i] != (-4) * (i + SIZE1)) { printf("Get Error: B[%d] is %d, should be %d\n", i, B[i], (-4) * (i + SIZE1)); Test_Failed(NULL); } } else { /* rank=1 */ for (i = 0; i < SIZE2; i++) B[i] = (-4) * i; MPI_Win_create(B, SIZE2 * sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win); MPI_Win_free(&win); for (i = 0; i < SIZE1; i++) { if (B[i] != i) { printf("Put Error: B[%d] is %d, should be %d\n", i, B[i], i); Test_Failed(NULL); } } } Test_Waitforall(); Test_Global_Summary(); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int rank, size, ret, passed, i, *test_array; int stride, count, root; MPI_Datatype newtype; MPI_Comm comm = MPI_COMM_WORLD; /* Set up MPI */ MPI_Init(&argc, &argv); MPI_Comm_rank(comm, &rank); /* Setup the tests */ Test_Init("bcastvec", rank); /* Allow for additional communicators */ MPI_Comm_size(comm, &size); /* MPI_Comm_rank(comm, &rank); */ stride = (rank + 1); test_array = (int *)malloc(size*stride*sizeof(int)); /* Create the vector datatype EXCEPT for process 0 (vector of stride 1 is contiguous) */ if (rank > 0) { count = 1; MPI_Type_vector( size, 1, stride, MPI_INT, &newtype); MPI_Type_commit( &newtype ); } else { count = size; newtype = MPI_INT; } /* Perform the test. Each process in turn becomes the root. After each operation, check that nothing has gone wrong */ passed = 1; for (root = 0; root < size; root++) { /* Fill the array with -1 for unset, rank + i * size for set */ for (i=0; i<size*stride; i++) test_array[i] = -1; if (rank == root) for (i=0; i<size; i++) test_array[i*stride] = rank + i * size; MPI_Bcast( test_array, count, newtype, root, comm ); for (i=0; i<size; i++) { if (test_array[i*stride] != root + i * size) { passed = 0; } } } free(test_array); if (rank != 0) MPI_Type_free( &newtype ); if (!passed) Test_Failed("Simple Broadcast test with datatypes"); else { if (rank == 0) Test_Passed("Simple Broadcast test with datatypes"); } /* Close down the tests */ if (rank == 0) ret = Summarize_Test_Results(); else { ret = 0; } Test_Finalize(); /* Close down MPI */ Test_Waitforall( ); MPI_Finalize(); return ret; }
int main( int argc, char **argv) { int rank, size, i, recv_flag, ret, passed; MPI_Status Status; char message[17]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (rank == 0) { Test_Init("barrier", rank); /* Receive the startup messages from each of the other clients */ for (i = 0; i < size - 1; i++) { MPI_Recv(message, 17, MPI_CHAR, MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, &Status); } /* Now use Iprobe to make sure no more messages arive for a while */ passed = 1; for (i = 0; i < WAIT_TIMES; i++){ recv_flag = 0; MPI_Iprobe(MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, &recv_flag, &Status); if (recv_flag) passed = 0; } if (passed) Test_Passed("Barrier Test 1"); else Test_Failed("Barrier Test 1"); /* Now go into the barrier myself */ MPI_Barrier(MPI_COMM_WORLD); /* And get everyones message who came out */ for (i = 0; i < size - 1; i++) { MPI_Recv(message, 13, MPI_CHAR, MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, &Status); } /* Now use Iprobe to make sure no more messages arive for a while */ passed = 1; for (i = 0; i < WAIT_TIMES; i++){ recv_flag = 0; MPI_Iprobe(MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, &recv_flag, &Status); if (recv_flag) passed = 0; } if (passed) Test_Passed("Barrier Test 2"); else Test_Failed("Barrier Test 2"); Test_Waitforall( ); ret = Summarize_Test_Results(); Test_Finalize(); MPI_Finalize(); return ret; } else { MPI_Send((char*)"Entering Barrier", 17, MPI_CHAR, 0, 2000, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); MPI_Send((char*)"Past Barrier", 13, MPI_CHAR, 0, 2000, MPI_COMM_WORLD); Test_Waitforall( ); MPI_Finalize(); return 0; } }
int main(int argc, char **argv) { int datasize; int *sbuf, *rbuf; int rank, size; int sendcount, *recvcounts, *displs; int i, j, k, *p; char errmsg[200]; int bufsize; MPI_Init(&argc, &argv); Test_Init_No_File(); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* is the buffersize specified? */ if (argc == 2) { datasize = atoi(argv[1]); if (datasize == 0) { fprintf(stderr, "Invalid data size!\n"); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } } else { datasize = DATASIZE; } /* Create the buffers */ sbuf = (int *) malloc((rank + 1) * datasize * sizeof(int)); rbuf = (int *) malloc( (size * (size+1) / 2 * datasize + (size-1)) * sizeof(int) ); recvcounts = (int *) malloc(size * sizeof(int)); displs = (int *) malloc(size * sizeof(int)); if (!sbuf || !rbuf || !recvcounts || !displs) { fprintf(stderr, "Could not allocate buffers!\n"); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } /* Load up the buffers */ for (i = 0; i < rank + 1; i++) { for (j = 0; j < datasize; j++) sbuf[i * datasize + j] = i + 100 * rank; } for (i = 0; i < size * (size + 1) / 2 * datasize + (size-1); i++) { rbuf[i] = -(i + 1); } /* Create the arguments to MPI_Allgatherv() */ sendcount = (rank + 1) * datasize; j = 0; for (i = 0; i < size; i++) { recvcounts[i] = (i + 1) * datasize; displs[i] = j; j += (i + 1) * datasize + 1; } MPI_Allgatherv(sbuf, sendcount, MPI_INT, rbuf, recvcounts, displs, MPI_INT, MPI_COMM_WORLD); /* Check rbuf */ p = rbuf; for (i = 0; i < size; i++) { for (j = 0; j < i + 1; j++) { for (k = 0; k < datasize; k++) { if (p[j * datasize + k] != j + 100 * i) { sprintf(errmsg, "[%d] got %d expected %d for %dth\n", rank, p[j * datasize + k], j + 100 * i, j * datasize + k); Test_Message( errmsg ); Test_Failed( NULL ); } } } p += (i + 1) * datasize + 1; } free(rbuf); free(sbuf); free(recvcounts); free(displs); Test_Waitforall(); Test_Global_Summary(); MPI_Finalize(); exit( EXIT_SUCCESS ); }
int main(int argc, char **argv) { int datasize; int *sbuf, *rbuf; int rank, size; int i, j, *p; char errmsg[200]; MPI_Init(&argc, &argv); Test_Init_No_File(); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* is the buffersize specified? */ if (argc == 2) { datasize = atoi(argv[1]); if (datasize <= 0) { fprintf(stderr, "Invalid data size!\n"); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } } else { datasize = DATASIZE; } /* Create the buffers */ sbuf = (int *) malloc(datasize * sizeof(int)); rbuf = (int *) malloc(size * datasize * sizeof(int)); if (!sbuf || !rbuf) { fprintf(stderr, "Could not allocate buffers!\n"); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } /* Load up the buffers */ for (i = 0; i < datasize; i++) { sbuf[i] = i + 100 * rank; } for (i = 0; i < size * datasize; i++) { rbuf[i] = -(i + 1); } MPI_Allgather(sbuf, datasize, MPI_INT, rbuf, datasize, MPI_INT, MPI_COMM_WORLD); /* Check rbuf */ for (i = 0; i < size; i++) { p = rbuf + i * datasize; for (j = 0; j < datasize; j++) { if (p[j] != j + 100 * i) { sprintf(errmsg, "[%d] got %d expected %d for %dth\n", rank, p[j], j + 100 * i, i * datasize + j); Test_Message( errmsg ); Test_Failed( NULL ); } } } free(rbuf); free(sbuf); Test_Waitforall(); Test_Global_Summary(); MPI_Finalize(); exit( EXIT_SUCCESS ); }
int main(int argc, char *argv[]) { int rank, destrank, nprocs, A[SIZE2], B[SIZE2], i; MPI_Group comm_group, group; MPI_Win win; MPI_Init(&argc,&argv); Test_Init_No_File(); MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_rank(MPI_COMM_WORLD,&rank); if (nprocs != 2) { printf("Run this program with 2 processes\n"); MPI_Abort(MPI_COMM_WORLD,1); } MPI_Comm_group(MPI_COMM_WORLD, &comm_group); if (rank == 0) { for (i=0; i<SIZE2; i++) A[i] = B[i] = i; MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win); destrank = 1; MPI_Group_incl(comm_group, 1, &destrank, &group); MPI_Win_start(group, 0, win); for (i=0; i<SIZE1; i++) MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win); for (i=0; i<SIZE1; i++) MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win); MPI_Win_complete(win); for (i=0; i<SIZE1; i++) if (B[i] != (-4)*(i+SIZE1)) { printf("Get Error: B[i] is %d, should be %d\n", B[i], (-4)*(i+SIZE1)); Test_Failed(NULL); } } else { /* rank=1 */ for (i=0; i<SIZE2; i++) B[i] = (-4)*i; MPI_Win_create(B, SIZE2*sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win); destrank = 0; MPI_Group_incl(comm_group, 1, &destrank, &group); MPI_Win_post(group, 0, win); MPI_Win_wait(win); for (i=0; i<SIZE1; i++) { if (B[i] != i) { printf("Put Error: B[i] is %d, should be %d\n", B[i], i); Test_Failed(NULL); } } } MPI_Group_free(&group); MPI_Group_free(&comm_group); MPI_Win_free(&win); Test_Waitforall(); Test_Global_Summary(); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int rank, size, to, from, tag, count, i; int src, dest; int st_source, st_tag, st_count; int errcnt = 0; MPI_Request handle; MPI_Status status; double data[100]; MPI_Datatype rowtype; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); src = size - 1; dest = 0; /* dest = size - 1; src = 0; */ MPI_Type_vector( 10, 1, 10, MPI_DOUBLE, &rowtype ); MPI_Type_commit( &rowtype ); /* First test: send a row */ if (rank == src) { to = dest; count = 1; tag = 2001; for (i = 0; i < 100; i++) data[i] = i; /* Send a row */ MPI_Send( data, count, rowtype, to, tag, MPI_COMM_WORLD ); #ifdef SHOWMSG printf("%d sent", rank ); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif } if (rank == dest) { tag = MPI_ANY_TAG; count = 10; from = MPI_ANY_SOURCE; MPI_Recv(data, count, MPI_DOUBLE, from, tag, MPI_COMM_WORLD, &status ); st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG; MPI_Get_count( &status, MPI_DOUBLE, &st_count ); if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); } #ifdef SHOWMSG printf( "%d received", rank); for (i = 0; i < 10; i++) printf(" %f",data[i]); printf("\n"); #endif for (i = 0; i < 10; i++) if (data[i] != 10*i) { errcnt++; fprintf( stderr, "[%d](rcv double) %d'th element = %f, should be %f\n", rank, i, data[i], 10.0*i ); } } /* Second test: receive a column into row */ if (rank == src) { to = dest; count = 10; tag = 2001; for (i = 0; i < 100; i++) data[i] = i; /* Send a row */ MPI_Send( data, count, MPI_DOUBLE, to, tag, MPI_COMM_WORLD ); #ifdef SHOWMSG printf("%d sent", rank ); for (i = 0; i < 10; i++) printf(" %f",data[i]);printf("\n"); #endif } if (rank == dest) { tag = MPI_ANY_TAG; count = 1; from = MPI_ANY_SOURCE; MPI_Recv(data, count, rowtype, from, tag, MPI_COMM_WORLD, &status ); st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG; MPI_Get_count( &status, MPI_DOUBLE, &st_count ); if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); } #ifdef SHOWMSG printf( "%d received", rank); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif for (i = 0; i < 10; i++) if (data[i*10] != i) { errcnt++; fprintf( stderr, "[%d](rcv row) %d'th element = %f, should be %f\n", rank, i, data[i*10], 1.0*i ); } } /* Third test: send AND receive a row */ if (rank == src) { to = dest; count = 1; tag = 2001; for (i = 0; i < 100; i++) data[i] = i; /* Send a row */ MPI_Send( data, count, rowtype, to, tag, MPI_COMM_WORLD ); #ifdef SHOWMSG printf("%d sent", rank ); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif } if (rank == dest) { tag = MPI_ANY_TAG; count = 1; from = MPI_ANY_SOURCE; MPI_Recv(data, count, rowtype, from, tag, MPI_COMM_WORLD, &status ); st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG; MPI_Get_count( &status, MPI_DOUBLE, &st_count ); if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); } #ifdef SHOWMSG printf( "%d received", rank); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif for (i = 0; i < 10; i++) if (data[i*10] != i*10) { errcnt++; fprintf( stderr, "[%d](rcv row-row) %d'th element = %f, should be %f\n", rank, i, data[i*10], 10.0*i ); } } /* Second Set of Tests: Use Isend and Irecv instead of Send and Recv */ /* First test: send a row */ if (rank == src) { to = dest; count = 1; tag = 2001; for (i = 0; i < 100; i++) data[i] = i; /* Send a row */ MPI_Isend( data, count, rowtype, to, tag, MPI_COMM_WORLD, &handle ); MPI_Wait( &handle, &status ); #ifdef SHOWMSG printf("%d sent", rank ); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif } if (rank == dest) { tag = MPI_ANY_TAG; count = 10; from = MPI_ANY_SOURCE; MPI_Irecv(data, count, MPI_DOUBLE, from, tag, MPI_COMM_WORLD, &handle ); MPI_Wait( &handle, &status ); st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG; MPI_Get_count( &status, MPI_DOUBLE, &st_count ); if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); } #ifdef SHOWMSG printf( "%d received", rank); for (i = 0; i < 10; i++) printf(" %f",data[i]); printf("\n"); #endif for (i = 0; i < 10; i++) if (data[i] != 10*i) { errcnt++; fprintf( stderr, "[%d](ircv double) %d'th element = %f, should be %f\n", rank, i, data[i], 10.0*i ); } } /* Second test: receive a column into row */ if (rank == src) { to = dest; count = 10; tag = 2001; for (i = 0; i < 100; i++) data[i] = i; /* Send a row */ MPI_Isend( data, count, MPI_DOUBLE, to, tag, MPI_COMM_WORLD, &handle ); MPI_Wait( &handle, &status ); #ifdef SHOWMSG printf("%d sent", rank ); for (i = 0; i < 10; i++) printf(" %f",data[i]);printf("\n"); #endif } if (rank == dest) { tag = MPI_ANY_TAG; count = 1; from = MPI_ANY_SOURCE; MPI_Irecv(data, count, rowtype, from, tag, MPI_COMM_WORLD, &handle ); MPI_Wait( &handle, &status ); st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG; MPI_Get_count( &status, MPI_DOUBLE, &st_count ); if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); } #ifdef SHOWMSG printf( "%d received", rank); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif for (i = 0; i < 10; i++) if (data[i*10] != i) { errcnt++; fprintf( stderr, "[%d](ircv row) %d'th element = %f, should be %f\n", rank, i, data[i*10], 1.0*i ); } } /* Third test: send AND receive a row */ if (rank == src) { to = dest; count = 1; tag = 2001; for (i = 0; i < 100; i++) data[i] = i; /* Send a row */ MPI_Isend( data, count, rowtype, to, tag, MPI_COMM_WORLD, &handle ); MPI_Wait( &handle, &status ); #ifdef SHOWMSG printf("%d sent", rank ); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif } if (rank == dest) { tag = MPI_ANY_TAG; count = 1; from = MPI_ANY_SOURCE; MPI_Irecv(data, count, rowtype, from, tag, MPI_COMM_WORLD, &handle ); MPI_Wait( &handle, &status ); st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG; MPI_Get_count( &status, MPI_DOUBLE, &st_count ); if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); } #ifdef SHOWMSG printf( "%d received", rank); for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); #endif for (i = 0; i < 10; i++) if (data[i*10] != i*10) { errcnt++; fprintf( stderr, "[%d](ircv row-row) %d'th element = %f, should be %f\n", rank, i, data[i*10], 10.0*i ); } } i = errcnt; MPI_Allreduce( &i, &errcnt, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); if (errcnt > 0) { printf( "Found %d errors in the run \n", errcnt ); } MPI_Type_free( &rowtype ); Test_Waitforall( ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int rank, size, i; int errors=0; int dims[NUM_DIMS]; int periods[NUM_DIMS]; int coords[NUM_DIMS]; int new_coords[NUM_DIMS]; int reorder = 1; MPI_Comm comm_temp, comm_cart, new_comm; int topo_status; int ndims; int new_rank; int remain_dims[NUM_DIMS]; int newnewrank; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); /* Clear dims array and get dims for topology */ for(i=0;i<NUM_DIMS;i++) { dims[i] = 0; periods[i] = 0; } MPI_Dims_create ( size, NUM_DIMS, dims ); /* Make a new communicator with a topology */ MPI_Cart_create ( MPI_COMM_WORLD, 2, dims, periods, reorder, &comm_temp ); MPI_Comm_dup ( comm_temp, &comm_cart ); /* Determine the status of the new communicator */ MPI_Topo_test ( comm_cart, &topo_status ); if (topo_status != MPI_CART) { printf( "topo_status of duped comm is not MPI_CART\n" ); errors++; } /* How many dims do we have? */ MPI_Cartdim_get( comm_cart, &ndims ); if ( ndims != NUM_DIMS ) { printf( "Number of dims of duped comm (%d) should be %d\n", ndims, NUM_DIMS ); errors++; } /* Get the topology, does it agree with what we put in? */ for(i=0;i<NUM_DIMS;i++) { dims[i] = 0; periods[i] = 0; } MPI_Cart_get ( comm_cart, NUM_DIMS, dims, periods, coords ); /* Does the mapping from coords to rank work? */ MPI_Cart_rank ( comm_cart, coords, &new_rank ); if ( new_rank != rank ) { printf( "New rank of duped comm (%d) != old rank (%d)\n", new_rank, rank ); errors++; } /* Does the mapping from rank to coords work */ MPI_Cart_coords ( comm_cart, rank, NUM_DIMS, new_coords ); for (i=0;i<NUM_DIMS;i++) if ( coords[i] != new_coords[i] ) { printf( "Old coords[%d] of duped comm (%d) != new_coords (%d)\n", i, coords[i], new_coords[i] ); errors++; } /* Let's shift in each dimension and see how it works! */ /* Because it's late and I'm tired, I'm not making this */ /* automatically test itself. */ for (i=0;i<NUM_DIMS;i++) { int source, dest; MPI_Cart_shift(comm_cart, i, 1, &source, &dest); #ifdef VERBOSE printf ("[%d] Shifting %d in the %d dimension\n",rank,1,i); printf ("[%d] source = %d dest = %d\n",rank,source,dest); #endif } /* Subdivide */ remain_dims[0] = 0; for (i=1; i<NUM_DIMS; i++) remain_dims[i] = 1; MPI_Cart_sub ( comm_cart, remain_dims, &new_comm ); /* Determine the status of the new communicator */ MPI_Topo_test ( new_comm, &topo_status ); if (topo_status != MPI_CART) { printf( "topo_status of cartsub comm is not MPI_CART\n" ); errors++; } /* How many dims do we have? */ MPI_Cartdim_get( new_comm, &ndims ); if ( ndims != NUM_DIMS-1 ) { printf( "Number of dims of cartsub comm (%d) should be %d\n", ndims, NUM_DIMS-1 ); errors++; } /* Get the topology, does it agree with what we put in? */ for(i=0;i<NUM_DIMS-1;i++) { dims[i] = 0; periods[i] = 0; } MPI_Cart_get ( new_comm, ndims, dims, periods, coords ); /* Does the mapping from coords to rank work? */ MPI_Comm_rank ( new_comm, &newnewrank ); MPI_Cart_rank ( new_comm, coords, &new_rank ); if ( new_rank != newnewrank ) { printf( "New rank of cartsub comm (%d) != old rank (%d)\n", new_rank, newnewrank ); errors++; } /* Does the mapping from rank to coords work */ MPI_Cart_coords ( new_comm, new_rank, NUM_DIMS -1, new_coords ); for (i=0;i<NUM_DIMS-1;i++) if ( coords[i] != new_coords[i] ) { printf( "Old coords[%d] of cartsub comm (%d) != new_coords (%d)\n", i, coords[i], new_coords[i] ); errors++; } /* We're at the end */ MPI_Comm_free( &new_comm ); MPI_Comm_free( &comm_temp ); MPI_Comm_free( &comm_cart ); Test_Waitforall( ); if (errors) printf( "[%d] done with %d ERRORS!\n", rank,errors ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int data, to, from, tag, maxlen, np, myid, flag, dest, src; MPI_Status status, status1; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &myid ); MPI_Comm_size( MPI_COMM_WORLD, &np ); /* dest writes out the received stats; for the output to be consistant (with the final check), it should be procees 0 */ if (argc > 1 && argv[1] && strcmp( "-alt", argv[1] ) == 0) { dest = np - 1; src = 0; } else { src = np - 1; dest = 0; } if (myid == src) { to = dest; tag = 2000; #ifdef VERBOSE printf( "About to send\n" ); #endif MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); tag = 2001; #ifdef VERBOSE printf( "About to send 'done'\n" ); #endif MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); } else { /* Server loop */ while (1) { tag = MPI_ANY_TAG; from = MPI_ANY_SOURCE; /* Should really use MPI_Probe, but functionally this will work (it is less efficient, however) */ do { MPI_Iprobe( from, tag, MPI_COMM_WORLD, &flag, &status ); } while (!flag); if (status.MPI_TAG == 2001) { printf( "Received terminate message\n" ); /* Actually need to receive it ... */ MPI_Recv( &data, 1, MPI_INT, status.MPI_SOURCE, status.MPI_TAG, MPI_COMM_WORLD, &status1 ); break; } if (status.MPI_TAG == 2000) { MPI_Get_count( &status, MPI_INT, &maxlen ); if (maxlen > 1) printf( "Error; size = %d\n", maxlen ); printf( "About to receive\n" ); MPI_Recv( &data, 1, MPI_INT, status.MPI_SOURCE, status.MPI_TAG, MPI_COMM_WORLD, &status1 ); } } } MPI_Barrier( MPI_COMM_WORLD ); Test_Waitforall( ); MPI_Finalize(); return 0; }
int main(int argc, char **argv) { int rank, size, i, j; int table[MAX_PROCESSES][MAX_PROCESSES]; int errors = 0; int displs[MAX_PROCESSES]; int recv_counts[MAX_PROCESSES]; int block_size, begin_row, end_row, send_count; for(i = 1; i < MAX_PROCESSES; i++) for(j = 1; j < MAX_PROCESSES; j++) table[i][j] = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); /* Determine what rows are my responsibility */ block_size = MAX_PROCESSES / size; begin_row = rank * block_size; end_row = (rank + 1) * block_size; send_count = block_size * MAX_PROCESSES; /* A maximum of MAX_PROCESSES processes can participate */ if (size > MAX_PROCESSES) { fprintf(stderr, "Number of processors is maximum %d\n", MAX_PROCESSES); fflush(stderr); DBM("calling MPI_Abort\n"); MPI_Abort(MPI_COMM_WORLD, 1); } /* Fill in the displacements and recv_counts */ for (i = 0; i < size; i++) { displs[i] = i * block_size * MAX_PROCESSES; recv_counts[i] = send_count; } /* Paint my rows my color */ for (i = begin_row; i < end_row; i++) for (j = 0; j < MAX_PROCESSES; j++) table[i][j] = rank + 10; /* Everybody gets the gathered data */ MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT, &table[0][0], recv_counts, displs, MPI_INT, MPI_COMM_WORLD); /* Everybody should have the same table now. * * The entries are: * Table[i][j] = i / block_size + 10; */ for (i = 0; i < size; i++) if (table[i][0] - table[i][MAX_PROCESSES - 1] != 0) Test_Failed(NULL); for (i = 0; i < size; i++) for (j = 0; j < MAX_PROCESSES; j++) if (table[i][j] != i / block_size + 10) Test_Failed(NULL); errors = Test_Global_Summary(); if (errors) { /* Print out table if there are any errors */ for (i = 0; i < size; i++) { printf("\n"); for (j = 0; j < MAX_PROCESSES; j++) printf(" %d", table[i][j]); } printf("\n"); } Test_Waitforall(); MPI_Finalize(); return errors; }
int main(int argc, char *argv[]) { int rank, nprocs, i; int *A, *B; MPI_Win win; MPI_Init(&argc,&argv); Test_Init_No_File(); MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_rank(MPI_COMM_WORLD,&rank); if (nprocs != 2) { printf("Run this program with 2 processes\n"); MPI_Abort(MPI_COMM_WORLD,1); } i = MPI_Alloc_mem(SIZE * sizeof(int), MPI_INFO_NULL, &A); if (i) { printf("Can't allocate memory in test program\n"); MPI_Abort(MPI_COMM_WORLD, 1); } i = MPI_Alloc_mem(SIZE * sizeof(int), MPI_INFO_NULL, &B); if (i) { printf("Can't allocate memory in test program\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if (rank == 0) { for (i=0; i<SIZE; i++) A[i] = B[i] = i; } else { for (i=0; i<SIZE; i++) { A[i] = (-3)*i; B[i] = (-4)*i; } } MPI_Win_create(B, SIZE*sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win); MPI_Win_fence(0, win); if (rank == 0) { for (i=0; i<SIZE-1; i++) MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win); } else { for (i=0; i<SIZE-1; i++) MPI_Get(A+i, 1, MPI_INT, 0, i, 1, MPI_INT, win); MPI_Accumulate(A+i, 1, MPI_INT, 0, i, 1, MPI_INT, MPI_SUM, win); } MPI_Win_fence(0, win); if (rank == 1) { for (i=0; i<SIZE-1; i++) { if (A[i] != B[i]) { printf("Put/Get Error: A[i]=%d, B[i]=%d\n", A[i], B[i]); Test_Failed(NULL); } } } else { if (B[SIZE-1] != SIZE - 1 - 3*(SIZE-1)) { printf("Accumulate Error: B[SIZE-1] is %d, should be %d\n", B[SIZE-1], SIZE - 1 - 3*(SIZE-1)); Test_Failed(NULL); } } MPI_Win_free(&win); MPI_Free_mem(A); MPI_Free_mem(B); Test_Waitforall(); Test_Global_Summary(); MPI_Finalize(); return 0; }
/* Define VERBOSE to get printed output */ int main( int argc, char **argv ) { int rank, size, to, from, tag, count; int src, dest, waiter; int st_count; #ifdef VERBOSE int st_source, st_tag; #endif MPI_Status status; char data[100]; MPI_Request rq[2]; MPI_Status statuses[2]; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); /* src = size - 1; dest = 0; */ if (size < 2){ fprintf(stderr,"Needs at least two processes"); fflush(stderr); return 1; } src = 0; dest = size - 1; /* waiter = dest; */ /* Receiver delays, so msgs unexpected */ /* waiter = src; */ /* Sender delays, so recvs posted */ waiter = 10000; /* nobody waits */ if (rank == src) { if (waiter == src) #ifdef WIN32 Sleep(10000); #else sleep(10); #endif to = dest; tag = 2001; sprintf(data,"First message, type 2001"); count = strlen(data) + 1; MPI_Isend( data, count, MPI_CHAR, to, tag, MPI_COMM_WORLD, &rq[0] ); #ifdef VERBOSE printf("%d sent :%s:\n", rank, data ); #endif tag = 2002; sprintf(data,"Second message, type 2002"); count = strlen(data) + 1; MPI_Isend( data, count, MPI_CHAR, to, tag, MPI_COMM_WORLD, &rq[1] ); MPI_Waitall( 2, rq, statuses ); #ifdef VERBOSE printf("%d sent :%s:\n", rank, data ); #endif } else if (rank == dest) { if (waiter == dest) #ifdef WIN32 Sleep(10000); #else sleep(10); #endif from = MPI_ANY_SOURCE; count = 100; tag = 2002; MPI_Recv(data, count, MPI_CHAR, from, tag, MPI_COMM_WORLD, &status ); MPI_Get_count( &status, MPI_CHAR, &st_count ); if (st_count != strlen("Second message, type 2002") + 1) { printf( "Received wrong length!\n" ); } #ifdef VERBOSE st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG; printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); printf( "%d received :%s:\n", rank, data); #endif tag = 2001; MPI_Recv(data, count, MPI_CHAR, from, tag, MPI_COMM_WORLD, &status ); MPI_Get_count( &status, MPI_CHAR, &st_count ); if (st_count != strlen("First message, type 2001") + 1) { printf( "Received wrong length!\n" ); } #ifdef VERBOSE st_source = status.MPI_SOURCE; st_tag = status.MPI_TAG;\ printf( "Status info: source = %d, tag = %d, count = %d\n", st_source, st_tag, st_count ); printf( "%d received :%s:\n", rank, data); #endif } #ifdef VERBOSE printf( "Process %d exiting\n", rank ); #endif Test_Waitforall( ); MPI_Finalize(); return 0; }
int main( int argc, char **argv ) { int rank, size, i; int data; int errors=0; int result = -100; int correct_result; MPI_Op op_assoc, op_addem; MPI_Comm comm; MPI_Init( &argc, &argv ); MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc ); MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem ); /* Run this for a variety of communicator sizes */ while ((comm = GetNextComm()) != MPI_COMM_NULL) { MPI_Comm_rank( comm, &rank ); MPI_Comm_size( comm, &size ); data = rank; correct_result = 0; for (i=0;i<=rank;i++) correct_result += i; MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm ); if (result != correct_result) { fprintf( stderr, "[%d] Error suming ints with scan\n", rank ); errors++; } MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm ); if (result != correct_result) { fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank ); errors++; } data = rank; result = -100; MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm ); if (result != correct_result) { fprintf( stderr, "[%d] Error summing ints with scan (userop)\n", rank ); errors++; } MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm ); if (result != correct_result) { fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n", rank ); errors++; } /* result = -100;*/ /* data = rank;*/ /* MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, comm );*/ /* if (result == BAD_ANSWER) {*/ /* fprintf( stderr, "[%d] Error scanning with non-commutative op\n",*/ /* rank );*/ /* errors++;*/ /* }*/ MPI_Comm_free( &comm ); } MPI_Op_free( &op_assoc ); MPI_Op_free( &op_addem ); if (errors) { MPI_Comm_rank( MPI_COMM_WORLD, &rank ); printf( "[%d] done with ERRORS(%d)!\n", rank, errors ); } Test_Waitforall( ); MPI_Finalize(); return errors; }
int main(int argc, char **argv) { int rank, size; int i, j, k; int **table, *buffer; int begin_row, end_row, send_count, *recv_counts, *displs; char errmsg[200]; MPI_Init( &argc, &argv ); Test_Init_No_File(); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); /* get buffer space and init table */ buffer = (int *) malloc( (size * BLOCKSIZE) * (size * BLOCKSIZE) * sizeof(int) ); table = (int **)malloc( (size * BLOCKSIZE) * sizeof(int *) ); recv_counts = (int *) malloc( size * sizeof(int) ); displs = (int *) malloc( size * sizeof(int) ); if( !buffer || !table || !recv_counts || !displs ) { fprintf( stderr, "Out of memory error!\n" ); MPI_Abort( MPI_COMM_WORLD, EXIT_FAILURE ); } for( i = 0; i < size * BLOCKSIZE; i++ ) table[i] = &(buffer[i*size*BLOCKSIZE]); /* Determine what rows are my responsibility */ begin_row = rank * BLOCKSIZE; end_row = (rank + 1) * BLOCKSIZE; send_count = BLOCKSIZE * size * BLOCKSIZE; for( i = 0; i < size; i++ ) { recv_counts[i] = BLOCKSIZE * size * BLOCKSIZE; displs[i] = i * BLOCKSIZE * size * BLOCKSIZE; } /* Paint my rows my color */ for( i = begin_row; i < end_row ; i++ ) for( j = 0; j < size * BLOCKSIZE; j++ ) table[i][j] = rank + 10; /* Gather everybody's result together - sort of like an */ /* inefficient allgather */ for (i = 0; i < size; i++) MPI_Gatherv(table[begin_row], send_count, MPI_INT, table[0], recv_counts, displs, MPI_INT, i, MPI_COMM_WORLD); /* Everybody should have the same table now. */ for( i = 0; i < size; i++ ) for( j = 0; j < BLOCKSIZE; j++ ) for( k = 0; k < size * BLOCKSIZE; k++ ) if( table[i*BLOCKSIZE+j][k] != i + 10 ) { sprintf(errmsg, "[%d] got %d expected %d for %dth entry in row %d\n", rank, table[i*BLOCKSIZE+j][k], i + 10, k, i*BLOCKSIZE + j); Test_Message( errmsg ); Test_Failed( NULL ); } Test_Waitforall(); Test_Global_Summary(); free( buffer ); free( table ); free( recv_counts ); free( displs ); MPI_Finalize(); exit( EXIT_SUCCESS ); }