int main(int argc, char *argv[]) { int ierr, i, size, rank; int cnt = 270000000; int stat_cnt = 0; MPI_Status status; long long *cols; int errs = 0; MTest_Init(&argc, &argv); /* need large memory */ if (sizeof(void *) < 8) { MTest_Finalize(errs); return MTestReturnValue(errs); } ierr = MPI_Comm_size(MPI_COMM_WORLD, &size); ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (size != 3) { fprintf(stderr, "[%d] usage: mpiexec -n 3 %s\n", rank, argv[0]); MPI_Abort(MPI_COMM_WORLD, 1); } cols = malloc(cnt * sizeof(long long)); if (cols == NULL) { printf("malloc of >2GB array failed\n"); errs++; MTest_Finalize(errs); return MTestReturnValue(errs); } if (rank == 0) { for (i = 0; i < cnt; i++) cols[i] = i; /* printf("[%d] sending...\n",rank); */ ierr = MPI_Send(cols, cnt, MPI_LONG_LONG_INT, 1, 0, MPI_COMM_WORLD); ierr = MPI_Send(cols, cnt, MPI_LONG_LONG_INT, 2, 0, MPI_COMM_WORLD); } else { /* printf("[%d] receiving...\n",rank); */ for (i = 0; i < cnt; i++) cols[i] = -1; ierr = MPI_Recv(cols, cnt, MPI_LONG_LONG_INT, 0, 0, MPI_COMM_WORLD, &status); ierr = MPI_Get_count(&status, MPI_LONG_LONG_INT, &stat_cnt); if (cnt != stat_cnt) { fprintf(stderr, "Output of MPI_Get_count (%d) does not match expected count (%d).\n", stat_cnt, cnt); errs++; } for (i = 0; i < cnt; i++) { if (cols[i] != i) { /*printf("Rank %d, cols[i]=%lld, should be %d\n", rank, cols[i], i); */ errs++; } } } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int i, k, wrank, errs = 0; int dims[MAX_DIMS]; MTest_Init(0, 0); MPI_Comm_rank(MPI_COMM_WORLD, &wrank); if (wrank == 0) { for (k=0; tests[k].size > 0; k++) { zeroDims(tests[k].dim, dims); MPI_Dims_create(tests[k].size, tests[k].dim, dims); if (checkDims(&tests[k], dims)) { errs++; MTestPrintfMsg(1, "Test %d failed with mismatched output", k); if (errs < 10) { fprintf(stderr, "%d in %dd: ", tests[k].size, tests[k].dim); for (i=0; i<tests[k].dim-1; i++) fprintf(stderr, "%d x ", dims[i]); fprintf(stderr, "%d != %d", dims[tests[k].dim-1], tests[k].orderedDecomp[0]); for (i=1; i<tests[k].dim; i++) fprintf(stderr," x %d", tests[k].orderedDecomp[i]); fprintf(stderr,"\n"); } } } } MTest_Finalize(errs); MPI_Finalize(); return MTestReturnValue(errs); }
int main( int argc, char **argv ) { int rank, size, i; int data; int errors=0; int result = -100; int correct_result; MPI_Op op; MTest_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); data = rank; MPI_Op_create( (MPI_User_function *)addem, 1, &op ); MPI_Reduce ( &data, &result, 1, MPI_INT, op, 0, MPI_COMM_WORLD ); MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD ); MPI_Op_free( &op ); correct_result = 0; for(i=0;i<size;i++) correct_result += i; if (result != correct_result) errors++; MTest_Finalize( errors ); MPI_Finalize(); return MTestReturnValue( errors ); }
int main(int argc, char **argv) { int rank, nproc; int out_val, i, counter = 0; MPI_Win win; MTest_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); MPI_Win_create(&counter, sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win); for (i = 0; i < NITER; i++) { MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win); MPI_Get_accumulate(&acc_val, 1, MPI_INT, &out_val, 1, MPI_INT, rank, 0, 1, MPI_INT, MPI_SUM, win); MPI_Win_unlock(rank, win); if (out_val != acc_val * i) { errs++; printf("Error: got %d, expected %d at iter %d\n", out_val, acc_val * i, i); break; } } MPI_Win_free(&win); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int errs = 0, err; int j, count; char *ap; MTest_Init(&argc, &argv); MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); for (count = 1; count < 128000; count *= 2) { err = MPI_Alloc_mem(count, MPI_INFO_NULL, &ap); if (err) { int errclass; /* An error of MPI_ERR_NO_MEM is allowed */ MPI_Error_class(err, &errclass); if (errclass != MPI_ERR_NO_MEM) { errs++; MTestPrintError(err); } } else { /* Access all of this memory */ for (j = 0; j < count; j++) { ap[j] = (char) (j & 0x7f); } MPI_Free_mem(ap); } } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char **argv) { int thread_args[NUM_THREADS]; int i, provided; int errs = 0; MTest_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); check(provided == MPI_THREAD_MULTIPLE); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); for (i = 0; i < NUM_THREADS; i++) { MPI_Comm_dup(MPI_COMM_WORLD, &comms[i]); } for (i = 0; i < NUM_THREADS; i++) { thread_args[i] = i; MTest_Start_thread(test_iallred, (void *) &thread_args[i]); } errs = MTest_Join_threads(); for (i = 0; i < NUM_THREADS; i++) { MPI_Comm_free(&comms[i]); } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char **argv) { int err, errs = 0; MTest_Init(&argc, &argv); parse_args(argc, argv); /* To improve reporting of problems about operations, we * change the error handler to errors return */ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); /* perform some tests */ err = blockindexed_contig_test(); if (err && verbose) fprintf(stderr, "%d errors in blockindexed test.\n", err); errs += err; err = blockindexed_vector_test(); if (err && verbose) fprintf(stderr, "%d errors in blockindexed vector test.\n", err); errs += err; MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int buf[2]; MPI_Win win; MPI_Errhandler newerr; int i; MTest_Init(&argc, &argv); /* Run this test multiple times to expose storage leaks (we found a leak * of error handlers with this test) */ for (i = 0; i < 1000; i++) { calls = 0; MPI_Win_create(buf, 2 * sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win); mywin = win; MPI_Win_create_errhandler(eh, &newerr); MPI_Win_set_errhandler(win, newerr); MPI_Win_call_errhandler(win, MPI_ERR_OTHER); MPI_Errhandler_free(&newerr); if (calls != 1) { errs++; printf("Error handler not called\n"); } MPI_Win_free(&win); } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int errs = 0; MPI_Win win; int cnt, namelen; char name[MPI_MAX_OBJECT_NAME], nameout[MPI_MAX_OBJECT_NAME]; MTest_Init(&argc, &argv); cnt = 0; while (MTestGetWin(&win, 1)) { if (win == MPI_WIN_NULL) continue; sprintf(name, "win-%d", cnt); cnt++; MPI_Win_set_name(win, name); nameout[0] = 0; MPI_Win_get_name(win, nameout, &namelen); if (strcmp(name, nameout)) { errs++; printf("Unexpected name, was %s but should be %s\n", nameout, name); } MTestFreeWin(&win); } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int errs = 0, err; int dims[2]; int periods[2]; int size, rank; MPI_Comm comm; MTest_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); dims[0] = size; dims[1] = size; periods[0] = 0; periods[1] = 0; MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); err = MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 0, &comm); if (err == MPI_SUCCESS) { errs++; printf("Cart_create returned success when dims > size\n"); } else if (comm != MPI_COMM_NULL) { errs++; printf("Expected a null comm from cart create\n"); } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main( int argc, char **argv ) { int rank, size, i; int data; int errors=0; int result = -100; int correct_result; MTest_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); data = rank; MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD ); correct_result = 0; for(i=0;i<size;i++) correct_result += i; if (result != correct_result) errors++; MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD ); MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD ); if (result != 0) errors++; MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD ); MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD ); if (result != (size-1)) errors++; MTest_Finalize( errors ); MPI_Finalize(); return MTestReturnValue( errors ); }
int main(int argc, char *argv[]) { MPI_Win win; int flag, tmp, rank; int base[1024], errs = 0; MPI_Request req; MTest_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Win_create(base, 1024 * sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win); if (rank == 0) { MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 0, 0, win); MPI_Barrier(MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); MPI_Win_unlock(0, win); } else { MPI_Barrier(MPI_COMM_WORLD); MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 0, 0, win); MPI_Rput(&tmp, 1, MPI_INT, 0, 0, 1, MPI_INT, win, &req); MPI_Test(&req, &flag, MPI_STATUS_IGNORE); MPI_Barrier(MPI_COMM_WORLD); MPI_Win_unlock(0, win); } MPI_Win_free(&win); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { MPI_Info i1, i2; int errs = 0; char value[64]; int flag; MTest_Init(&argc, &argv); MPI_Info_create(&i1); MPI_Info_create(&i2); MPI_Info_set(i1, (char *) "key1", (char *) "value1"); MPI_Info_set(i2, (char *) "key2", (char *) "value2"); MPI_Info_get(i1, (char *) "key2", 64, value, &flag); if (flag) { printf("Found key2 in info1\n"); errs++; } MPI_Info_get(i1, (char *) "key1", 64, value, &flag); if (!flag) { errs++; printf("Did not find key1 in info1\n"); } else if (strcmp(value, "value1")) { errs++; printf("Found wrong value (%s), expected value1\n", value); } MPI_Info_free(&i1); MPI_Info_free(&i2); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int rank, size; int chunk = 128; int i; int *sb; int *rb; int status, gstatus; MTest_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); for (i = 1; i < argc; ++i) { if (argv[i][0] != '-') continue; switch (argv[i][1]) { case 'm': chunk = atoi(argv[++i]); break; default: fprintf(stderr, "Unrecognized argument %s\n", argv[i]); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } } sb = (int *) malloc(size * chunk * sizeof(int)); if (!sb) { perror("can't allocate send buffer"); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } rb = (int *) malloc(size * chunk * sizeof(int)); if (!rb) { perror("can't allocate recv buffer"); free(sb); MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } for (i = 0; i < size * chunk; ++i) { sb[i] = rank + 1; rb[i] = 0; } /* fputs("Before MPI_Alltoall\n",stdout); */ /* This should really send MPI_CHAR, but since sb and rb were allocated * as chunk*size*sizeof(int), the buffers are large enough */ status = MPI_Alltoall(sb, chunk, MPI_INT, rb, chunk, MPI_INT, MPI_COMM_WORLD); /* fputs("Before MPI_Allreduce\n",stdout); */ MTest_Finalize(status); free(sb); free(rb); MPI_Finalize(); return MTestReturnValue(status); }
int main(int argc, char **argv) { int errs; MTest_Init(&argc, &argv); errs = test_communicators(); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main( int argc, char **argv ) { int rank, size, i,j; int table[MAX_PROCESSES][MAX_PROCESSES]; int errors=0; int participants; MTest_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); /* A maximum of MAX_PROCESSES processes can participate */ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; else participants = size; if (MAX_PROCESSES % participants) { fprintf( stderr, "Number of processors must divide %d\n", MAX_PROCESSES ); MPI_Abort( MPI_COMM_WORLD, 1 ); exit(1); } if ( (rank < participants) ) { /* Determine what rows are my responsibility */ int block_size = MAX_PROCESSES / participants; int begin_row = rank * block_size; int end_row = (rank+1) * block_size; int send_count = block_size * MAX_PROCESSES; int recv_count = send_count; /* Paint my rows my color */ for (i=begin_row; i<end_row ;i++) for (j=0; j<MAX_PROCESSES; j++) table[i][j] = rank + 10; /* Gather everybody's result together - sort of like an */ /* inefficient allgather */ for (i=0; i<participants; i++) { void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]); MPI_Gather(sendbuf, send_count, MPI_INT, &table[0][0], recv_count, MPI_INT, i, MPI_COMM_WORLD ); } /* Everybody should have the same table now, */ /* This test does not in any way guarantee there are no errors */ /* Print out a table or devise a smart test to make sure it's correct */ for (i=0; i<MAX_PROCESSES;i++) { if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) ) errors++; } } MTest_Finalize( errors ); MPI_Finalize(); return MTestReturnValue( errors ); }
int main(int argc, char *argv[]) { MPI_Status status; MPI_Comm comm, scomm; int a[10], b[10]; int buf[BUFSIZE], *bptr, bl, i, j, rank, size, color, errs = 0; MTest_Init(0, 0); MPI_Comm_rank(MPI_COMM_WORLD, &rank); color = rank % 2; MPI_Comm_split(MPI_COMM_WORLD, color, rank, &scomm); MPI_Intercomm_create(scomm, 0, MPI_COMM_WORLD, 1 - color, 52, &comm); MPI_Comm_rank(comm, &rank); MPI_Comm_remote_size(comm, &size); MPI_Buffer_attach(buf, BUFSIZE); for (j = 0; j < 10; j++) { for (i = 0; i < 10; i++) { a[i] = (rank + 10 * j) * size + i; } MPI_Bsend(a, 10, MPI_INT, 0, 27 + j, comm); } if (rank == 0) { for (i = 0; i < size; i++) { for (j = 0; j < 10; j++) { int k; status.MPI_TAG = -10; status.MPI_SOURCE = -20; MPI_Recv(b, 10, MPI_INT, i, 27 + j, comm, &status); if (status.MPI_TAG != 27 + j) { errs++; printf("Wrong tag = %d\n", status.MPI_TAG); } if (status.MPI_SOURCE != i) { errs++; printf("Wrong source = %d\n", status.MPI_SOURCE); } for (k = 0; k < 10; k++) { if (b[k] != (i + 10 * j) * size + k) { errs++; printf("received b[%d] = %d from %d tag %d\n", k, b[k], i, 27 + j); } } } } } MPI_Buffer_detach(&bptr, &bl); MPI_Comm_free(&scomm); MPI_Comm_free(&comm); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char **argv) { int rank, nproc, mpi_errno; int i, ncomm, *ranks; int errs = 1; MPI_Comm *comm_hdls; MPI_Group world_group; MTest_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); MPI_Comm_group(MPI_COMM_WORLD, &world_group); MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM); ranks = malloc(sizeof(int) * nproc); ncomm = 0; for (i = 0; i < MAX_NCOMM; i++) { int incl = i % nproc; MPI_Group comm_group; /* Comms include ranks: 0; 1; 2; ...; 0; 1; ... */ MPI_Group_incl(world_group, 1, &incl, &comm_group); /* Note: the comms we create all contain one rank from MPI_COMM_WORLD */ mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]); if (mpi_errno == MPI_SUCCESS) { if (verbose) printf("%d: Created comm %d\n", rank, i); ncomm++; } else { if (verbose) printf("%d: Error creating comm %d\n", rank, i); MPI_Group_free(&comm_group); errs = 0; break; } MPI_Group_free(&comm_group); } for (i = 0; i < ncomm; i++) MPI_Comm_free(&comm_hdls[i]); free(comm_hdls); free(ranks); MPI_Group_free(&world_group); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char **argv) { double *vecout; MPI_Comm comm; int count, minsize = 2; int i, errs = 0; int rank, size; int *displs, *recvcounts; MTest_Init(&argc, &argv); while (MTestGetIntracommGeneral(&comm, minsize, 1)) { if (comm == MPI_COMM_NULL) continue; /* Determine the sender and receiver */ MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); displs = (int *) malloc(size * sizeof(int)); recvcounts = (int *) malloc(size * sizeof(int)); for (count = 1; count < 9000; count = count * 2) { vecout = (double *) malloc(size * count * sizeof(double)); for (i = 0; i < count; i++) { vecout[rank * count + i] = rank * count + i; } for (i = 0; i < size; i++) { recvcounts[i] = count; displs[i] = i * count; } MPI_Allgatherv(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, vecout, recvcounts, displs, MPI_DOUBLE, comm); for (i = 0; i < count * size; i++) { if (vecout[i] != i) { errs++; if (errs < 10) { fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]); } } } free(vecout); } free(displs); free(recvcounts); MTestFreeComm(&comm); } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char **argv) { int err, errs = 0; MTest_Init(&argc, &argv); parse_args(argc, argv); /* To improve reporting of problems about operations, we * change the error handler to errors return */ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); /* perform some tests */ err = subarray_1d_c_test1(); if (err && verbose) fprintf(stderr, "%d errors in 1d subarray c test 1.\n", err); errs += err; err = subarray_1d_fortran_test1(); if (err && verbose) fprintf(stderr, "%d errors in 1d subarray fortran test 1.\n", err); errs += err; err = subarray_2d_c_test1(); if (err && verbose) fprintf(stderr, "%d errors in 2d subarray c test 1.\n", err); errs += err; err = subarray_2d_fortran_test1(); if (err && verbose) fprintf(stderr, "%d errors in 2d subarray fortran test 1.\n", err); errs += err; err = subarray_2d_c_test2(); if (err && verbose) fprintf(stderr, "%d errors in 2d subarray c test 2.\n", err); errs += err; err = subarray_4d_c_test1(); if (err && verbose) fprintf(stderr, "%d errors in 4d subarray c test 1.\n", err); errs += err; err = subarray_4d_fortran_test1(); if (err && verbose) fprintf(stderr, "%d errors in 4d subarray fortran test 1.\n", err); errs += err; MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char **argv) { MPI_Comm scomm; int errs = 0; MTest_Init(&argc, &argv); MPI_Comm_split(MPI_COMM_WORLD, 1, 0, &scomm); MPI_Comm_create_keyval(MPI_NULL_COPY_FN, delete_fn, &key, &errs); MPI_Comm_set_attr(scomm, key, a); MPI_Comm_free(&scomm); MPI_Comm_free_keyval(&key); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main( int argc, char **argv ) { int rank, size, i,j; int table[MAX_PROCESSES][MAX_PROCESSES]; int row[MAX_PROCESSES]; int errors=0; int participants; MPI_Comm comm; MTest_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Comm_size( MPI_COMM_WORLD, &size ); comm = MPI_COMM_WORLD; /* A maximum of MAX_PROCESSES processes can participate */ if ( size > MAX_PROCESSES ) { participants = MAX_PROCESSES; MPI_Comm_split( MPI_COMM_WORLD, rank < MAX_PROCESSES, rank, &comm ); } else { participants = size; MPI_Comm_dup( MPI_COMM_WORLD, &comm ); } if ( (rank < participants) ) { int send_count = MAX_PROCESSES; int recv_count = MAX_PROCESSES; /* If I'm the root (process 0), then fill out the big table */ if (rank == 0) for ( i=0; i<participants; i++) for ( j=0; j<MAX_PROCESSES; j++ ) table[i][j] = i+j; /* Scatter the big table to everybody's little table */ MPI_Scatter(&table[0][0], send_count, MPI_INT, &row[0] , recv_count, MPI_INT, 0, comm ); /* Now see if our row looks right */ for (i=0; i<MAX_PROCESSES; i++) if ( row[i] != i+rank ) errors++; } MPI_Comm_free( &comm ); MTest_Finalize( errors ); MPI_Finalize(); return MTestReturnValue( errors ); }
int main(int argc, char **argv) { int errcode, errclass, errs = 0; MTest_Init(&argc, &argv); MPI_Add_error_code(MPI_ERR_ARG, &errcode); MPI_Error_class(errcode, &errclass); if (errclass != MPI_ERR_ARG) { printf("ERROR: Got 0x%x, expected 0x%x\n", errclass, MPI_ERR_ARG); errs++; } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int err, errs = 0; MTest_Init(&argc, &argv); parse_args(argc, argv); /* To improve reporting of problems about operations, we * change the error handler to errors return */ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); err = short_int_pack_test(); errs += err; MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int err; int buf[2]; MPI_Win win; MPI_Comm comm; MPI_Errhandler newerr, olderr; MTEST_VG_MEM_INIT(buf, 2 * sizeof(int)); MTest_Init(&argc, &argv); comm = MPI_COMM_WORLD; MPI_Win_create_errhandler(weh, &newerr); MPI_Win_create(buf, 2 * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win); mywin = win; MPI_Win_get_errhandler(win, &olderr); if (olderr != MPI_ERRORS_ARE_FATAL) { errs++; printf("Expected errors are fatal\n"); } MPI_Win_set_errhandler(win, newerr); expected_err_class = MPI_ERR_RANK; err = MPI_Put(buf, 1, MPI_INT, -5, 0, 1, MPI_INT, win); if (calls != 1) { errs++; printf("newerr not called\n"); calls = 1; } expected_err_class = MPI_ERR_OTHER; MPI_Win_call_errhandler(win, MPI_ERR_OTHER); if (calls != 2) { errs++; printf("newerr not called (2)\n"); } MPI_Win_free(&win); MPI_Errhandler_free(&newerr); MTest_Finalize(errs); return MTestReturnValue(errs); }
/* Finally, here's the main program */ int main(int argc, char *argv[]) { int n, stride, err, errs = 0; void *dest, *src; double avgTimeUser, avgTimeMPI; MTest_Init(&argc, &argv); if (getenv("MPITEST_VERBOSE")) verbose = 1; n = 30000; stride = 4; dest = (void *) malloc(n * sizeof(double)); src = (void *) malloc(n * ((1 + stride) * sizeof(double))); /* Touch the source and destination arrays */ memset(src, 0, n * (1 + stride) * sizeof(double)); memset(dest, 0, n * sizeof(double)); err = TestVecPackDouble(n, stride, &avgTimeUser, &avgTimeMPI, dest, src); errs += Report("VecPackDouble", "Pack", avgTimeMPI, avgTimeUser); err = TestVecUnPackDouble(n, stride, &avgTimeUser, &avgTimeMPI, src, dest); errs += Report("VecUnPackDouble", "Unpack", avgTimeMPI, avgTimeUser); err = TestIndexPackDouble(n, stride, &avgTimeUser, &avgTimeMPI, dest, src); errs += Report("VecIndexDouble", "Pack", avgTimeMPI, avgTimeUser); free(dest); free(src); dest = (void *) malloc(2 * n * sizeof(double)); src = (void *) malloc((1 + n) * ((1 + stride) * sizeof(double))); memset(dest, 0, 2 * n * sizeof(double)); memset(src, 0, (1 + n) * (1 + stride) * sizeof(double)); err = TestVecPack2Double(n, stride, &avgTimeUser, &avgTimeMPI, dest, src); errs += Report("VecPack2Double", "Pack", avgTimeMPI, avgTimeUser); free(dest); free(src); fflush(stdout); MTest_Finalize(errs); return MTestReturnValue(errs); }
/* * This test attempts MPI_Recv with the source being a dead process. It should fail * and return an error. If we are testing sufficiently new MPICH, we look for the * MPIX_ERR_PROC_FAILED error code. These should be converted to look for the * standarized error code once it is finalized. */ int main(int argc, char **argv) { int rank, size, err, errclass, toterrs = 0; char buf[10]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size < 2) { fprintf(stderr, "Must run with at least 2 processes\n"); MPI_Abort(MPI_COMM_WORLD, 1); } if (rank == 1) { exit(EXIT_FAILURE); } if (rank == 0) { MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); err = MPI_Recv(buf, 1, MPI_CHAR, 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); #if defined (MPICH) && (MPICH_NUMVERSION >= 30100102) MPI_Error_class(err, &errclass); if (errclass == MPIX_ERR_PROC_FAILED) { printf(" No Errors\n"); fflush(stdout); } else { fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n", errclass); toterrs++; } #else if (err) { printf(" No Errors\n"); fflush(stdout); } else { fprintf(stderr, "Program reported MPI_SUCCESS, but an error code was expected.\n"); toterrs++; } #endif } MPI_Finalize(); return MTestReturnValue(toterrs); }
int main(int argc, char *argv[]) { int errs = 0; int rank, size; int minsize = 2, count; MPI_Comm comm; MPI_Op op; int *buf, i; MTest_Init(&argc, &argv); MPI_Op_create(mysum, 0, &op); while (MTestGetIntracommGeneral(&comm, minsize, 1)) { if (comm == MPI_COMM_NULL) continue; MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); for (count = 1; count < 65000; count = count * 2) { /* Contiguous data */ buf = (int *) malloc(count * sizeof(int)); for (i = 0; i < count; i++) buf[i] = rank + i; MPI_Allreduce(MPI_IN_PLACE, buf, count, MPI_INT, op, comm); /* Check the results */ for (i = 0; i < count; i++) { int result = i * size + (size * (size - 1)) / 2; if (buf[i] != result) { errs++; if (errs < 10) { fprintf(stderr, "buf[%d] = %d expected %d\n", i, buf[i], result); } } } free(buf); } MTestFreeComm(&comm); } MPI_Op_free(&op); MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { int mpi_errno; MPI_Datatype type, duptype; int rank; int errs = 0; MTest_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); foo_initialize(); mpi_errno = MPI_Type_contiguous(2, MPI_INT, &type); mpi_errno = MPI_Type_set_attr(type, foo_keyval, NULL); mpi_errno = MPI_Type_dup(type, &duptype); my_func = "Free of type"; mpi_errno = MPI_Type_free(&type); my_func = "free of duptype"; mpi_errno = MPI_Type_free(&duptype); foo_finalize(); if (rank == 0) { if (copy_called != 1) { printf("Copy called %d times; expected once\n", copy_called); errs++; } if (delete_called != 2) { printf("Delete called %d times; expected twice\n", delete_called); errs++; } fflush(stdout); } MTest_Finalize(errs); return MTestReturnValue(errs); }
int main(int argc, char *argv[]) { MPI_Comm comm; MPI_Datatype dtype; int count, *bufin, *bufout, size, i, minsize = 1; MTest_Init(&argc, &argv); while (MTestGetIntracommGeneral(&comm, minsize, 1)) { if (comm == MPI_COMM_NULL) { continue; } MPI_Comm_size(comm, &size); count = size * 2; bufin = (int *) malloc(count * sizeof(int)); bufout = (int *) malloc(count * sizeof(int)); if (!bufin || !bufout) { fprintf(stderr, "Unable to allocated space for buffers (%d)\n", count); MPI_Abort(MPI_COMM_WORLD, 1); } for (i = 0; i < count; i++) { bufin[i] = i; bufout[i] = -1; } dtype = MPI_INT; MPI_Allreduce(bufin, bufout, count, dtype, MPI_SUM, comm); /* Check output */ for (i = 0; i < count; i++) { if (bufout[i] != i * size) { fprintf(stderr, "Expected bufout[%d] = %d but found %d\n", i, i * size, bufout[i]); errs++; } } free(bufin); free(bufout); MTestFreeComm(&comm); } MTest_Finalize(errs); return MTestReturnValue(errs); }