void mpi_file_set_errhandler_f(MPI_Fint *fh, MPI_Fint *errhandler, MPI_Fint *ierr) { MPI_File c_fh = MPI_File_f2c(*fh); MPI_Errhandler c_err = MPI_Errhandler_f2c(*errhandler); *ierr = OMPI_INT_2_FINT(MPI_File_set_errhandler(c_fh, c_err)); }
int main( int argc, char *argv[] ) { int rank, errs = 0, rc; MPI_Errhandler ioerr_handler; MPI_Status status; MPI_File fh; char inbuf[80]; MTest_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); /* Create a file to which to attach the handler */ rc = MPI_File_open( MPI_COMM_WORLD, (char*)"test.txt", MPI_MODE_CREATE | MPI_MODE_WRONLY | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh ); if (rc) { errs ++; printf( "Unable to open test.txt for writing\n" ); } rc = MPI_File_create_errhandler( user_handler, &ioerr_handler ); if (rc) { errs++; printf("MPI_File_create_Errhandler returned an error code: %d\n", rc); } rc = MPI_File_set_errhandler( fh, ioerr_handler ); if (rc) { errs++; printf("MPI_File_set_errhandler returned an error code: %d\n", rc); } /* avoid leaking the errhandler, safe because they have refcount semantics */ rc = MPI_Errhandler_free(&ioerr_handler); if (rc) { errs++; printf("MPI_Errhandler_free returned an error code: %d\n", rc); } /* This should generate an error because the file mode is WRONLY */ rc = MPI_File_read_at( fh, 0, inbuf, 80, MPI_BYTE, &status ); if (handlerCalled != 1) { errs++; printf( "User-defined error handler was not called\n" ); } rc = MPI_File_close( &fh ); if (rc) { errs++; printf("MPI_File_close returned an error code: %d\n",rc); } MTest_Finalize( errs ); MPI_Finalize( ); return 0; }
FORTRAN_API void FORT_CALL mpi_file_set_errhandler_(MPI_Fint *fh, MPI_Fint *err_handler, MPI_Fint *ierr) { MPI_File fh_c; MPI_Errhandler err_handler_c; fh_c = MPI_File_f2c(*fh); err_handler_c = MPI_Errhandler_f2c(*err_handler); *ierr = MPI_File_set_errhandler(fh_c,err_handler_c); }
void mpi_file_set_errhandler_f(MPI_Fint *fh, MPI_Fint *errhandler, MPI_Fint *ierr) { MPI_File c_fh = MPI_File_f2c(*fh); MPI_Errhandler c_err = MPI_Errhandler_f2c(*errhandler); *ierr = OMPI_INT_2_FINT(MPI_File_set_errhandler(c_fh, c_err)); if ( MPI_SUCCESS == OMPI_FINT_2_INT(*ierr) && OMPI_ERRHANDLER_TYPE_PREDEFINED != c_err->eh_mpi_object_type ) { c_err->eh_fortran_function = true ; } }
raf_t MPI_Load_raf(char *name,MPI_Comm comm){ raf_t raf=(raf_t)RTmalloc(sizeof(struct raf_struct_s)); raf_init(raf,name); raf->blocksize=65536; MPI_File f; MPI_Comm_size(comm,&(raf->workers)); MPI_Comm_rank(comm,&(raf->rank)); int e=MPI_File_open(comm,name,MPI_MODE_RDONLY,MPI_INFO_NULL,&f); if(e){ int i=1024; char msg[1024]; MPI_Error_string(e,msg,&i); Fatal(0,error,"err is %s\n",msg); } MPI_File_set_errhandler(f,MPI_ERRORS_ARE_FATAL); MPI_File_get_size(f,&(raf->size)); if ((raf->size)%(raf->blocksize)) Fatal(0,error,"file not multiple of block size"); if (((raf->size)/(raf->blocksize))%(raf->workers)) Fatal(0,error,"block count not multiple of worker count"); //Warning(info,"my share is %d",(raf->size)/(raf->workers)); raf->data=RTmalloc((raf->size)/(raf->workers)); if (1) { Warning(info,"using MPI_File_read_all"); MPI_Datatype ftype; MPI_Type_vector((raf->size)/(raf->blocksize),(raf->blocksize),(raf->blocksize)*(raf->workers),MPI_CHAR,&ftype); MPI_Type_commit(&ftype); MPI_File_set_view(f,(raf->blocksize)*(raf->rank),MPI_CHAR,ftype,"native",MPI_INFO_NULL); MPI_File_read_all(f,raf->data,(raf->size)/(raf->workers),MPI_CHAR,MPI_STATUS_IGNORE); MPI_File_close(&f); MPI_Type_free(&ftype); } else { Warning(info,"using MPI_File_read_at"); int blockcount=((raf->size)/(raf->blocksize))/(raf->workers); for(int i=0;i<blockcount;i++){ MPI_File_read_at(f,((i*(raf->workers)+(raf->rank))*(raf->blocksize)), (raf->data)+(i*(raf->blocksize)),(raf->blocksize),MPI_CHAR,MPI_STATUS_IGNORE); } MPI_File_close(&f); } raf->rq_tag=core_add(raf,request_service); raf->ack_tag=core_add(raf,receive_service); raf->shared.read=read_at; raf->shared.size=mpi_size; raf->shared.close=mpi_close; //Warning(info,"file loaded"); return raf; }
void Set_Errhand(struct comm_info* c_info) { #ifdef SET_ERRH if( c_info->communicator != MPI_COMM_NULL ) MPI_Errhandler_set(c_info->communicator, c_info->ERR); #ifdef EXT if( c_info->WIN != MPI_WIN_NULL ) MPI_Win_set_errhandler(c_info->WIN, c_info->ERRW); #endif #ifdef MPIIO if( c_info->fh != MPI_FILE_NULL ) MPI_File_set_errhandler(c_info->fh, c_info->ERRF); #endif #endif }
int main(int argc, char** argv) { MPI_Init(&argc, &argv); setup_globals(); /* Parse arguments. */ int SCALE = 16; int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ // if (argc >= 2) SCALE = atoi(argv[1]); // if (argc >= 3) edgefactor = atoi(argv[2]); char* name = argv[1]; if (argc >= 3) SCALE = atoi(argv[2]); if (argc >= 4) edgefactor = atoi(argv[3]); // if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) { // if (rank == 0) { // fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); // } if (argc <= 2 || argc >= 5 || SCALE == 0 || edgefactor == 0) { if (rank == 0) { fprintf(stderr, "Usage: %s filename SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); } MPI_Abort(MPI_COMM_WORLD, 1); } uint64_t seed1 = 2, seed2 = 3; // const char* filename = getenv("TMPFILE"); const char* filename = name; /* If filename is NULL, store data in memory */ tuple_graph tg; tg.nglobaledges = (int64_t)(edgefactor) << SCALE; int64_t nglobalverts = (int64_t)(1) << SCALE; tg.data_in_file = (filename != NULL); if (tg.data_in_file) { printf("data in file \n"); MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL); // MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile); MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile); MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge)); MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL); MPI_File_set_atomicity(tg.edgefile, 0); } /* Make the raw graph edges. */ /* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by * validator). */ int num_bfs_roots = 64; int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); int64_t max_used_vertex = 0; double make_graph_start = MPI_Wtime(); { /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(seed1, seed2, seed); /* As the graph is being generated, also keep a bitmap of vertices with * incident edges. We keep a grid of processes, each row of which has a * separate copy of the bitmap (distributed among the processes in the * row), and then do an allreduce at the end. This scheme is used to avoid * non-local communication and reading the file separately just to find BFS * roots. */ MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE; int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT); if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) { bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT); } int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes; int nrows = size / ranks_per_row; int my_row = -1, my_col = -1; unsigned char* restrict has_edge = NULL; MPI_Comm cart_comm; { int dims[2] = {size / ranks_per_row, ranks_per_row}; int periods[2] = {0, 0}; MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm); } int in_generating_rectangle = 0; if (cart_comm != MPI_COMM_NULL) { in_generating_rectangle = 1; { int dims[2], periods[2], coords[2]; MPI_Cart_get(cart_comm, 2, dims, periods, coords); my_row = coords[0]; my_col = coords[1]; } MPI_Comm this_col; MPI_Comm_split(cart_comm, my_col, my_row, &this_col); MPI_Comm_free(&cart_comm); has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes); memset(has_edge, 0, bitmap_size_in_bytes); /* Every rank in a given row creates the same vertices (for updating the * bitmap); only one writes them to the file (or final memory buffer). */ packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge)); MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows; // fprintf(stderr, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row); if (tg.data_in_file) { tg.edgememory_size = 0; tg.edgememory = NULL; } else { int my_pos = my_row + my_col * nrows; int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ? (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) : -1; int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE; int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) + FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) + (my_pos == last_pos ? edges_left : 0); /* fprintf(stderr, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */ tg.edgememory_size = nedges; tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge)); } MPI_Offset block_idx; for (block_idx = 0; block_idx < block_limit; ++block_idx) { /* fprintf(stderr, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */ MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges); MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE); packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ? tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) : buf; /* fprintf(stderr, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */ if (!tg.data_in_file && block_idx % ranks_per_row == my_col) { assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size); } // debug char* wtxbuf = (char*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge)); // generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf); generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf); if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */ // MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE); // debug printf("%d: %d, %d\n", rank, start_edge_index, edge_count); int i; // for (i = start_edge_index; i < start_edge_index + 3; i++) { // if(block_idx == 0) { // for (i = 0; i < 3; i++) { // if (edge_count > 3) // printf("%d: %d\t%d\n", rank, actual_buf[i].v0, actual_buf[i].v1); // } // } MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE); } ptrdiff_t i; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < edge_count; ++i) { int64_t src = get_v0_from_edge(&actual_buf[i]); int64_t tgt = get_v1_from_edge(&actual_buf[i]); if (src == tgt) continue; if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT)); } if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT)); } } } free(buf); #if 0 /* The allreduce for each root acts like we did this: */ MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col); #endif MPI_Comm_free(&this_col); } else { tg.edgememory = NULL; tg.edgememory_size = 0; } MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); #ifndef GEN_ONLY /* Find roots and max used vertex */ { uint64_t counter = 0; int bfs_root_idx; for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root; while (1) { double d[2]; make_random_numbers(2, seed1, seed2, counter, d); root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts; counter += 2; if (counter > 2 * nglobalverts) break; int is_duplicate = 0; int i; for (i = 0; i < bfs_root_idx; ++i) { if (root == bfs_roots[i]) { is_duplicate = 1; break; } } if (is_duplicate) continue; /* Everyone takes the same path here */ int root_ok = 0; if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) { root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0; } MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); if (root_ok) break; } bfs_roots[bfs_root_idx] = root; } num_bfs_roots = bfs_root_idx; /* Find maximum non-zero-degree vertex. */ { int64_t i; max_used_vertex = 0; if (in_generating_rectangle) { for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) { if (i > nglobalverts) continue; if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) { max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes; break; } } } MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); } } #endif if (in_generating_rectangle) { MPI_Free_mem(has_edge); } if (tg.data_in_file) { MPI_File_sync(tg.edgefile); } } double make_graph_stop = MPI_Wtime(); double make_graph_time = make_graph_stop - make_graph_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stderr, "graph_generation: %f s\n", make_graph_time); } //debug #ifndef GEN_ONLY //!GEN_ONLY /* Make user's graph data structure. */ double data_struct_start = MPI_Wtime(); make_graph_data_structure(&tg); double data_struct_stop = MPI_Wtime(); double data_struct_time = data_struct_stop - data_struct_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stderr, "construction_time: %f s\n", data_struct_time); } /* Number of edges visited in each BFS; a double so get_statistics can be * used directly. */ double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double)); /* Run BFS. */ int validation_passed = 1; double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); uint64_t nlocalverts = get_nlocalverts_for_pred(); int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); int bfs_root_idx; for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root = bfs_roots[bfs_root_idx]; if (rank == 0) fprintf(stderr, "Running BFS %d\n", bfs_root_idx); /* Clear the pred array. */ memset(pred, 0, nlocalverts * sizeof(int64_t)); /* Do the actual BFS. */ double bfs_start = MPI_Wtime(); run_bfs(root, &pred[0]); double bfs_stop = MPI_Wtime(); bfs_times[bfs_root_idx] = bfs_stop - bfs_start; if (rank == 0) fprintf(stderr, "Time for BFS %d is %f\n", bfs_root_idx, bfs_times[bfs_root_idx]); /* Validate result. */ if (rank == 0) fprintf(stderr, "Validating BFS %d\n", bfs_root_idx); double validate_start = MPI_Wtime(); int64_t edge_visit_count; int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); double validate_stop = MPI_Wtime(); validate_times[bfs_root_idx] = validate_stop - validate_start; if (rank == 0) fprintf(stderr, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]); edge_counts[bfs_root_idx] = (double)edge_visit_count; if (rank == 0) fprintf(stderr, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / bfs_times[bfs_root_idx]); if (!validation_passed_one) { validation_passed = 0; if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n"); break; } } MPI_Free_mem(pred); free(bfs_roots); free_graph_data_structure(); #endif //!GEN_ONLY if (tg.data_in_file) { MPI_File_close(&tg.edgefile); } else { free(tg.edgememory); tg.edgememory = NULL; } #ifndef GEN_ONLY /* Print results. */ if (rank == 0) { if (!validation_passed) { fprintf(stdout, "No results printed for invalid run.\n"); } else { int i; fprintf(stdout, "SCALE: %d\n", SCALE); fprintf(stdout, "edgefactor: %d\n", edgefactor); fprintf(stdout, "NBFS: %d\n", num_bfs_roots); fprintf(stdout, "graph_generation: %g\n", make_graph_time); fprintf(stdout, "num_mpi_processes: %d\n", size); fprintf(stdout, "construction_time: %g\n", data_struct_time); double stats[s_LAST]; get_statistics(bfs_times, num_bfs_roots, stats); fprintf(stdout, "min_time: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_time: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_time: %g\n", stats[s_maximum]); fprintf(stdout, "mean_time: %g\n", stats[s_mean]); fprintf(stdout, "stddev_time: %g\n", stats[s_std]); get_statistics(edge_counts, num_bfs_roots, stats); fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]); fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]); fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]); fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]); fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]); fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]); double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double)); for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i]; get_statistics(secs_per_edge, num_bfs_roots, stats); fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]); fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]); fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]); fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]); fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]); fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]); /* Formula from: * Title: The Standard Errors of the Geometric and Harmonic Means and * Their Application to Index Numbers * Author(s): Nilan Norris * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448 * Publisher(s): Institute of Mathematical Statistics * Stable URL: http://www.jstor.org/stable/2235723 * (same source as in specification). */ fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1))); free(secs_per_edge); secs_per_edge = NULL; free(edge_counts); edge_counts = NULL; get_statistics(validate_times, num_bfs_roots, stats); fprintf(stdout, "min_validate: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_validate: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_validate: %g\n", stats[s_maximum]); fprintf(stdout, "mean_validate: %g\n", stats[s_mean]); fprintf(stdout, "stddev_validate: %g\n", stats[s_std]); #if 0 for (i = 0; i < num_bfs_roots; ++i) { fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]); } #endif } } free(bfs_times); free(validate_times); #endif cleanup_globals(); MPI_Finalize(); return 0; }
FORT_DLL_SPEC void FORT_CALL mpi_file_set_errhandler_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint *ierr ){ *ierr = MPI_File_set_errhandler( MPI_File_f2c(*v1), (MPI_Errhandler)*v2 ); }