int main (int argc, char ** argv) { int err; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); init_vars(); adios_init_noxml (comm); err = adios_read_init_method(ADIOS_READ_METHOD_BP, comm, "verbose=2"); if (err) { printE ("%s\n", adios_errmsg()); } if (!err) err = declare_group (); if (!err) err = write_file ("reuse_dim.bp"); if (!err) err = read_file ("reuse_dim.bp"); adios_finalize (rank); fini_vars(); MPI_Finalize (); return err; }
ADIOS1CommonRead::ADIOS1CommonRead(const std::string &fileName, MPI_Comm mpiComm, const bool debugMode) : ADIOS1Common(fileName, mpiComm, debugMode) { Init(); adios_read_init_method(m_ReadMethod, m_MPIComm, ""); }
int read_stepbystep () { ADIOS_FILE * f; float timeout_sec = 0.0; int steps = 0; int retval = 0; MPI_Comm comm = MPI_COMM_SELF; adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); printf ("\n--------- Read as stream ------------\n"); f = adios_read_open (fname, ADIOS_READ_METHOD_BP, comm, ADIOS_LOCKMODE_NONE, timeout_sec); if (adios_errno == err_file_not_found) { printf ("Stream not found after waiting %f seconds: %s\n", timeout_sec, adios_errmsg()); retval = adios_errno; } else if (adios_errno == err_end_of_stream) { printf ("Stream terminated before open. %s\n", adios_errmsg()); retval = adios_errno; } else if (f == NULL) { printf ("Error at opening stream: %s\n", adios_errmsg()); retval = adios_errno; } else { /* Processing loop over the steps (we are already in the first one) */ while (adios_errno != err_end_of_stream) { steps++; // steps start counting from 1 printf ("Step: %d\n", f->current_step); print_varinfo (f, f->current_step); // advance to 1) next available step with 2) blocking wait adios_advance_step (f, 0, timeout_sec); if (adios_errno == err_step_notready) { //printf ("No new step arrived within the timeout. Quit. %s\n", // adios_errmsg()); break; // quit while loop } } adios_read_close (f); } adios_read_finalize_method (ADIOS_READ_METHOD_BP); //printf ("We have processed %d steps\n", steps); return retval; }
void ToolsAdiosParallel::convertToText() { if(m_options.data.size() == 0) throw std::runtime_error("No datasets requested"); for (size_t i = 0; i < m_options.data.size(); ++i) { ADIOS_VARINFO *pVarInfo; //get name of dataset to print std::string nodeName = m_options.data[i]; uint8_t *P; int varElement = 1; int varTypeSize = 0; adios_read_init_method(ADIOS_READ_METHOD_BP, comm, nodeName.c_str()); pVarInfo = adios_inq_var(pFile, nodeName.c_str()); varTypeSize = adios_type_size(pVarInfo->type, NULL); // get number of elements combined in a dataset for(int j = 0; j < pVarInfo->ndim; j++) { varElement = varElement * pVarInfo->dims[j]; } // allocate memory P = (uint8_t*) malloc (sizeof(uint8_t) * varTypeSize * varElement); adios_schedule_read(pFile, NULL, nodeName.c_str(), 0, 1, P); adios_perform_reads(pFile, 1); if(pVarInfo->ndim > 0) { for(int k = 0; k < varElement; k++) { printValue(pVarInfo->type, &P[k*varTypeSize]); } } else { printValue(pVarInfo->type, pVarInfo->value); } adios_free_varinfo(pVarInfo); } }
int main (int argc, char ** argv) { MPI_Comm comm = 0; // dummy mpi /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; int64_t g; int64_t f; int64_t Tid, Pid, Vid; // variable IDs char dimstr[32]; sprintf (dimstr, "%d,%d", NX, NY); adios_init_noxml (comm); adios_set_max_buffer_size (1); adios_declare_group (&g, "vars", "", adios_flag_yes); adios_select_method (g, "POSIX", "", ""); Tid = adios_define_var (g, "T" ,"", adios_double, dimstr, dimstr, "0,0"); adios_set_transform (Tid, "none"); Pid = adios_define_var (g, "P" ,"", adios_double, dimstr, dimstr, "0,0"); adios_set_transform (Pid, "none"); Vid = adios_define_var (g, "V" ,"", adios_double, dimstr, dimstr, "0,0"); adios_set_transform (Vid, "none"); adios_read_init_method(ADIOS_READ_METHOD_BP,0,""); if (adios_query_is_method_available (ADIOS_QUERY_METHOD_ALACRITY)) { adios_set_transform (Tid, "alacrity"); adios_set_transform (Pid, "alacrity"); adios_set_transform (Vid, "alacrity"); printf ("Turned on ALACRITY transformation for array variables\n"); } adios_open (&f, "vars", "vars.bp", "w", comm); adios_groupsize = 3*NX*NY*sizeof(double); adios_group_size (f, adios_groupsize, &adios_totalsize); adios_write (f, "T", T); adios_write (f, "P", P); adios_write (f, "V", V); adios_close (f); adios_finalize (0); return 0; }
int main(int argc, char **argv) { MPI_Init(&argc, &argv); const char *cmd = *argv; SHIFT; if (argc != 1) { if (rank == 0) fprintf(stderr, "Usage: %s <BP filename>\n", cmd); MPI_Abort(COMM, 1); } const char *bp_filename = *argv; SHIFT; MPI_Comm_rank(COMM, &rank); MPI_Comm_size(COMM, &size); adios_read_init_method(ADIOS_READ_METHOD_BP, COMM, ""); if (rank == 0) fprintf(stderr, "Starting file-mode writeblock tests on %s (%d ranks)...\n", bp_filename, size); test_file_mode_reads(bp_filename); adios_read_finalize_method(ADIOS_READ_METHOD_BP); MPI_Finalize(); }
int read_all () { ADIOS_FILE * f; float timeout_sec = 0.0; int steps = 0; int retval = 0; MPI_Comm comm = MPI_COMM_SELF; adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); printf ("\n--------- Read as file ------------\n"); f = adios_read_open_file (fname, ADIOS_READ_METHOD_BP, comm); if (f == NULL) { printf ("Error at opening file: %s\n", adios_errmsg()); retval = adios_errno; } else { /* Processing all the steps at once */ print_varinfo (f, 0); adios_read_close (f); } adios_read_finalize_method (ADIOS_READ_METHOD_BP); return retval; }
int read_scalar () { ADIOS_FILE * f; float timeout_sec = 0.0; int steps = 0; int retval = 0; MPI_Comm comm = MPI_COMM_SELF; adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); printf ("\n--------- Read all instances of the scalar 'O' ------------\n"); f = adios_read_open_file (fname, ADIOS_READ_METHOD_BP, comm); if (f == NULL) { printf ("Error at opening file: %s\n", adios_errmsg()); retval = adios_errno; } else { /* Read the scalar O with writeblock selection */ print_scalar (f, "O"); adios_read_close (f); } adios_read_finalize_method (ADIOS_READ_METHOD_BP); return retval; }
int main (int argc, char **argv){ int rank =0, size =0; int NX = 0; double *t = NULL; // this is an array we expect as a reference array double *t_ref = NULL; MPI_Comm comm = MPI_COMM_WORLD; diag_t diag = DIAG_OK; // to store the diagnostic information struct test_info test_result = {TEST_PASSED, "1D_arr_global_noxml"}; struct err_counts err = { 0, 0}; struct adios_tsprt_opts adios_opts; GET_ENTRY_OPTIONS(adios_opts, "Runs readers. It is recommended to run as many readers as writers."); // adios read initialization MPI_Init( &argc, &argv); MPI_Comm_rank (comm, &rank); // depending on the method SET_ERROR_IF_NOT_ZERO(adios_read_init_method(adios_opts.method, comm, adios_opts.adios_options), err.adios); RET_IF_ERROR(err.adios, rank); // I will be working with streams so the lock mode is necessary, // return immediately if the stream unavailable ADIOS_FILE *adios_handle = adios_read_open(FILE_NAME, adios_opts.method, comm, ADIOS_LOCKMODE_NONE, 0.0); if ( !adios_handle){ p_error("Quitting ... (%d) %s\n", adios_errno, adios_errmsg()); return DIAG_ERR; } // define portions of data how they will be read ADIOS_SELECTION *sel = NULL; ADIOS_VARINFO *avi = NULL; // for storing the variables char buf[STR_BUFFER_SIZE]; int step = 0; // read how many processors wrote that array avi = adios_inq_var (adios_handle, "size"); if (!avi){ p_error("rank %d: Quitting ... (%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto close_adios; } size = *((int*)avi->value); adios_free_varinfo(avi); avi = NULL; // if I run the more readers than writers; just release // the excessive readers if (rank >= size){ p_info("rank %d: I am an excessive rank. Nothing to read ...\n", rank); diag = DIAG_OK; goto close_adios; } // read the size of the array avi = adios_inq_var (adios_handle, "NX"); if (!avi){ p_error("rank %d: Quitting ... (%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto close_adios; } // I expect a scalar that will tell me the size of an array assert(0 == avi->ndim); assert(adios_integer == avi->type); NX = *((int*)avi->value); // I don't need that variable any more adios_free_varinfo(avi); assert(NX_DIM == NX); avi = NULL; // this will define the slice that we want to read; each rank should // read its own slice written by a corresponding writer rank uint64_t count[1] = { NX }; uint64_t start[1] = { 0 }; start[0] = rank*NX; sel = adios_selection_boundingbox(1,start, count); if( !sel ){ p_error("rank %d: Quitting ... (%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto close_adios; } // make the reference array with reference values I expect to get t_ref = calloc(NX, sizeof(double)); if (gen_1D_array(t_ref, NX, rank) == DIAG_ERR){ p_error("Generating 1D array. Quitting ...\n"); diag = DIAG_ERR; goto close_adios; } // allocate the memory for the actual array to be read t = calloc(NX, sizeof(double)); if (adios_schedule_read(adios_handle, sel, "var_1d_array",0,1,t) != 0){ p_error("rank %d: Quitting ...(%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto just_clean; } // not sure if this assumption is correct; difficult to find in the ADIOS sources if (adios_perform_reads(adios_handle, 1) != 0){ p_error("rank %d: Quitting ...(%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto just_clean; } sprintf(buf, "Rank %d: var_1d_array: step %d: t: ", rank, step); int i = 0; for(i=0; i < NX; ++i){ if( t[i] != t_ref[i] ){ p_test_failed("%s: rank %d: for t[%d] (expected %.1f, got %.1f)\n", test_result.name, rank, i, t_ref[i], t[i] ); test_result.result = TEST_FAILED; break; } } if (TEST_PASSED == test_result.result) p_test_passed("%s: rank %d\n", test_result.name, rank); just_clean: // clean everything adios_selection_delete(sel); sel = NULL; free(t); t = NULL; free(t_ref); t_ref = NULL; close_adios: CLOSE_ADIOS_READER(adios_handle, adios_opts.method); if ((DIAG_OK == diag) && (TEST_PASSED == test_result.result)) { return 0; } else { return 1; } }
int main (int argc, char ** argv) { int err; int steps = 0, curr_step; int retval = 0; MPI_Init (&argc, &argv); comm = MPI_COMM_WORLD; MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &numproc); if (processArgs(argc, argv)) { return 1; } print0("Input stream = %s\n", infilename); print0("Output stream = %s\n", outfilename); print0("Read method = %s (id=%d)\n", rmethodname, read_method); print0("Read method parameters = \"%s\"\n", rmethodparams); print0("Write method = %s\n", wmethodname); print0("Write method parameters = \"%s\"\n", wmethodparams); err = adios_read_init_method(read_method, comm, "max_chunk_size=100; " "app_id =32767; \n" "verbose= 3;" "poll_interval = 100;" ); if (!err) { print0 ("%s\n", adios_errmsg()); } adios_init_noxml(comm); print0 ("Waiting to open stream %s...\n", infilename); f = adios_read_open_stream (infilename, read_method, comm, ADIOS_LOCKMODE_ALL, timeout_sec); if (adios_errno == err_file_not_found) { print ("rank %d: Stream not found after waiting %d seconds: %s\n", rank, timeout_sec, adios_errmsg()); retval = adios_errno; } else if (adios_errno == err_end_of_stream) { print ("rank %d: Stream terminated before open. %s\n", rank, adios_errmsg()); retval = adios_errno; } else if (f == NULL) { print ("rank %d: Error at opening stream: %s\n", rank, adios_errmsg()); retval = adios_errno; } else { // process steps here... if (f->current_step != 0) { print ("rank %d: WARNING: First %d steps were missed by open.\n", rank, f->current_step); } while (1) { steps++; // start counting from 1 print0 ("File info:\n"); print0 (" current step: %d\n", f->current_step); print0 (" last step: %d\n", f->last_step); print0 (" # of variables: %d:\n", f->nvars); retval = process_metadata(steps); if (retval) break; retval = read_write(steps); if (retval) break; // advance to 1) next available step with 2) blocking wait curr_step = f->current_step; // save for final bye print adios_advance_step (f, 0, timeout_sec); if (adios_errno == err_end_of_stream) { break; // quit while loop } else if (adios_errno == err_step_notready) { print ("rank %d: No new step arrived within the timeout. Quit. %s\n", rank, adios_errmsg()); break; // quit while loop } else if (f->current_step != curr_step+1) { // we missed some steps print ("rank %d: WARNING: steps %d..%d were missed when advancing.\n", rank, curr_step+1, f->current_step-1); } } adios_read_close (f); } print0 ("Bye after processing %d steps\n", steps); adios_read_finalize_method (read_method); adios_finalize (rank); MPI_Finalize (); return retval; }
int main (int argc, char ** argv) { int err,i,M; int iconnect; MPI_Init (&argc, &argv); MPI_Comm_rank (wcomm, &wrank); MPI_Comm_size (wcomm, &wsize); if (argc < 2) { Usage(); return 1; } errno = 0; M = strtol (argv[1], NULL, 10); if (errno || M < 1 || M > wsize) { printE("Invalid 1st argument %s\n", argv[1]); Usage(); return 1; } iconnect = (wrank >= wsize-M); // connect to server from ranks N-M, N-M+1, ..., N MPI_Comm_split (wcomm, iconnect, wrank+M-wsize, &subcomm); MPI_Comm_rank (subcomm, &subrank); MPI_Comm_size (subcomm, &subsize); if (iconnect) { if (subsize != M) { printE ("Something wrong with communicator split: N=%d, M=%d, splitted size=%d\n", wsize, M, subsize); return 2; } log ("connect as subrank %d\n", subrank); } alloc_vars(); adios_read_init_method(ADIOS_READ_METHOD_DATASPACES, subcomm, "verbose=4"); adios_init_noxml (subcomm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 1); if (iconnect) { adios_declare_group (&m_adios_group, "connect", "iter", adios_flag_yes); adios_select_method (m_adios_group, "DATASPACES", "verbose=4", ""); adios_define_var (m_adios_group, "ldim1", "", adios_integer, 0, 0, 0); adios_define_var (m_adios_group, "gdim1", "", adios_integer, 0, 0, 0); adios_define_var (m_adios_group, "offs1", "", adios_integer, 0, 0, 0); adios_define_var (m_adios_group, "a1", "", adios_integer, "ldim1", "gdim1", "offs1"); for (i=0; i<NSTEPS; i++) { if (!err) { set_vars (i); err = write_file (i); } } } log ("done with work, sync with others...\n"); MPI_Barrier (wcomm); log ("call adios_finalize...\n"); adios_finalize (wrank); log ("call adios_read_finalize_method...\n"); adios_read_finalize_method (ADIOS_READ_METHOD_DATASPACES); fini_vars(); MPI_Finalize (); return err; }
int main (int argc, char ** argv) { int err, step ; int do_write = 1; int do_read = 1; int m = 0; char write_method[16]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); if (argc > 1) { m = strtol (argv[1], NULL, 10); if (errno) { printf("Invalid 1st argument %s\n", argv[1]); Usage(); return 1;} } if (argc > 2) { if (argv[2][0] == 'w' || argv[2][0] == 'W') { do_read = 0; } else if (argv[2][0] == 'r' || argv[2][0] == 'R') { do_write = 0; } else { printE ("Invalid command line argument %s. Allowed ones:\n" " w: do write only\n" " r: do read only\n", argv[2]); MPI_Finalize (); return 1; } } if (m==0) { read_method = ADIOS_READ_METHOD_BP; strcpy(write_method,"MPI"); } else { read_method = ADIOS_READ_METHOD_DATASPACES; strcpy(write_method,"DATASPACES"); } log ("Writing: %s method=%s\n" "Reading: %s method=%d\n", (do_write ? "yes" : "no"), write_method, (do_read ? "yes" : "no"), read_method); alloc_vars(); if (do_write) { adios_init_noxml (comm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10); } if (do_read) { err = adios_read_init_method(read_method, comm, "verbose=2"); if (err) { printE ("%s\n", adios_errmsg()); } } if (do_write) { adios_declare_group (&m_adios_group, "selections", "iter", adios_flag_yes); adios_select_method (m_adios_group, write_method, "verbose=2", ""); define_vars(); for (step=0; step<NSTEPS; step++) { if (!err) { set_vars (step); err = write_file (step); sleep(1); } } adios_free_group (m_adios_group); } if (!err && do_read) err = read_points (); //if (!err && do_read) // err = read_writerblocks (); if (do_read) { adios_read_finalize_method (read_method); } fini_vars(); if (do_write) { adios_finalize (rank); } MPI_Finalize (); return err; }
int main (int argc, char ** argv) { int i, j, k,l; MPI_Comm comm_dummy = 0; /* MPI_Comm is defined through adios_read.h */ if (argc < 2) { printf("Usage: %s <BP-file>\n", argv[0]); return 1; } adios_read_init_method (ADIOS_READ_METHOD_BP, comm_dummy, "show_hidden_attrs"); ADIOS_FILE * f; f = adios_read_open_file (argv[1], ADIOS_READ_METHOD_BP, comm_dummy); if (f == NULL) { printf ("%s\n", adios_errmsg()); return -1; } /* For all variables */ printf(" Variables=%d:\n", f->nvars); for (i = 0; i < f->nvars; i++) { ADIOS_VARINFO * v = adios_inq_var_byid (f, i); adios_inq_var_stat (f, v, 0, 1); adios_inq_var_blockinfo (f, v); uint64_t total_size = adios_type_size (v->type, v->value); for (j = 0; j < v->ndim; j++) total_size *= v->dims[j]; printf(" %-9s %s", adios_type_to_string(v->type), f->var_namelist[i]); if (v->ndim == 0) { /* Scalars do not need to be read in, we get it from the metadata when using adios_inq_var */ printf(" = %s\n", value_to_string(v->type, v->value, 0)); } else { /* Arrays, print min/max statistics*/ printf("[%lld",v->dims[0]); for (j = 1; j < v->ndim; j++) printf(", %lld",v->dims[j]); //printf("] = \n"); if (v->type == adios_integer) printf("] : min=%d max=%d\n", (*(int*)v->statistics->min), (*(int*)v->statistics->max)); else if (v->type == adios_double) printf("] : min=%lg max=%lg\n", (*(double*)v->statistics->min), (*(double*)v->statistics->max)); /* Print block info */ for (l=0; l<v->nsteps; l++) { printf(" step %3d: \n", l); for (j=0; j<v->nblocks[l]; j++) { printf(" block %3d: [", j); for (k=0; k<v->ndim; k++) { printf("%3lld:%3lld", v->blockinfo[j].start[k], v->blockinfo[j].start[k]+v->blockinfo[j].count[k]-1); if (k<v->ndim-1) printf(", "); } printf("]\n"); } } } adios_free_varinfo (v); } /* variables */ /* For all attributes */ printf(" Attributes=%d:\n", f->nattrs); for (i = 0; i < f->nattrs; i++) { enum ADIOS_DATATYPES atype; int asize; void *adata; adios_get_attr_byid (f, i, &atype, &asize, &adata); printf(" %-9s %s = %s\n", adios_type_to_string(atype), f->attr_namelist[i], value_to_string(atype, adata, 0)); free(adata); } /* attributes */ adios_read_close (f); return 0; }
int main (int argc, char ** argv) { int rank, size, i; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_DATATYPES attr_type; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; int attr_size; void * data = NULL; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=3"); adios_logger_open ("log_read_C", rank); ADIOS_FILE * f = adios_read_open ("attributes_C.bp", method, comm, ADIOS_LOCKMODE_NONE, 0.0); if (f == NULL) { log_error ("%s\n", adios_errmsg()); return -1; } for (i = 0; i < f->nattrs; i++) { adios_get_attr (f, f->attr_namelist[i], &attr_type, &attr_size, &data); log_test("rank %d: attr: %s %s = ", rank, adios_type_to_string(attr_type), f->attr_namelist[i]); int type_size = adios_type_size (attr_type, data); int nelems = attr_size / type_size; int k; char *p = (char*)data; for (k=0; k<nelems; k++) { if (k>0) log_test(", "); switch (attr_type) { case adios_integer: log_test ("%d", *(int *)p); break; case adios_double: log_test ("%e", *(double *)p); break; case adios_string: log_test ("\"%s\"", (char *)p); break; case adios_string_array: log_test ("\"%s\"", *(char **)p); break; default: log_test ("??????\n"); } p=p+type_size; } log_test("\n"); free (data); data = 0; } adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (ADIOS_READ_METHOD_BP); adios_logger_close(); MPI_Finalize (); return 0; }
int worker(int argc, char* argv[]) { TAU_PROFILE_TIMER(timer, __func__, __FILE__, TAU_USER); TAU_PROFILE_START(timer); my_printf("%d of %d In worker B\n", myrank, commsize); static bool announced = false; /* validate input */ validate_input(argc, argv); my_printf("Worker B will execute until it sees n iterations.\n", iterations); /* ADIOS: These declarations are required to match the generated * gread_/gwrite_ functions. (And those functions are * generated by calling 'gpp.py adios_config.xml') ... * EXCEPT THAT THE generation of Reader code is broken. * So, we will write the reader code manually. */ uint64_t adios_groupsize; uint64_t adios_totalsize; uint64_t adios_handle; void * data = NULL; uint64_t start[2], count[2]; int i, j, steps = 0; int NX = 10; int NY = 1; double t[NX]; double p[NX]; /* ADIOS: Can duplicate, split the world, whatever. * This allows you to have P writers to N files. * With no splits, everyone shares 1 file, but * can write lock-free by using different areas. */ MPI_Comm adios_comm, adios_comm_b_to_c; adios_comm = MPI_COMM_WORLD; //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm); adios_comm_b_to_c = MPI_COMM_WORLD; //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm_b_to_c); enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_FLEXPATH; adios_read_init_method(method, adios_comm, "verbose=3"); if (adios_errno != err_no_error) { fprintf (stderr, "rank %d: Error %d at init: %s\n", myrank, adios_errno, adios_errmsg()); exit(4); } if (send_to_c) { adios_init("adios_config.xml", adios_comm); } /* ADIOS: Set up the adios communications and buffers, open the file. */ ADIOS_FILE *fp; // file handler ADIOS_VARINFO *vi; // information about one variable ADIOS_SELECTION * sel; char adios_filename_a_to_b[256]; char adios_filename_b_to_c[256]; enum ADIOS_LOCKMODE lock_mode = ADIOS_LOCKMODE_NONE; double timeout_sec = 1.0; sprintf(adios_filename_a_to_b, "adios_a_to_b.bp"); sprintf(adios_filename_b_to_c, "adios_b_to_c.bp"); my_printf ("rank %d: Worker B opening file: %s\n", myrank, adios_filename_a_to_b); fp = adios_read_open(adios_filename_a_to_b, method, adios_comm, lock_mode, timeout_sec); if (adios_errno == err_file_not_found) { fprintf (stderr, "rank %d: Stream not found after waiting %d seconds: %s\n", myrank, timeout_sec, adios_errmsg()); exit(1); } else if (adios_errno == err_end_of_stream) { // stream has been gone before we tried to open fprintf (stderr, "rank %d: Stream terminated before open. %s\n", myrank, adios_errmsg()); exit(2); } else if (fp == NULL) { // some other error happened fprintf (stderr, "rank %d: Error %d at opening: %s\n", myrank, adios_errno, adios_errmsg()); exit(3); } else { my_printf("Found file %s\n", adios_filename_a_to_b); my_printf ("File info:\n"); my_printf (" current step: %d\n", fp->current_step); my_printf (" last step: %d\n", fp->last_step); my_printf (" # of variables: %d:\n", fp->nvars); vi = adios_inq_var(fp, "temperature"); adios_inq_var_blockinfo(fp, vi); printf ("ndim = %d\n", vi->ndim); printf ("nsteps = %d\n", vi->nsteps); printf ("dims[%llu][%llu]\n", vi->dims[0], vi->dims[1]); uint64_t slice_size = vi->dims[0]/commsize; if (myrank == commsize-1) { slice_size = slice_size + vi->dims[0]%commsize; } start[0] = myrank * slice_size; count[0] = slice_size; start[1] = 0; count[1] = vi->dims[1]; data = malloc (slice_size * vi->dims[1] * 8); /* Processing loop over the steps (we are already in the first one) */ while (adios_errno != err_end_of_stream && steps < iterations) { steps++; // steps start counting from 1 TAU_PROFILE_TIMER(adios_recv_timer, "ADIOS recv", __FILE__, TAU_USER); TAU_PROFILE_START(adios_recv_timer); sel = adios_selection_boundingbox (vi->ndim, start, count); adios_schedule_read (fp, sel, "temperature", 0, 1, data); adios_perform_reads (fp, 1); if (myrank == 0) printf ("--------- B Step: %d --------------------------------\n", fp->current_step); #if 0 printf("B rank=%d: [0:%lld,0:%lld] = [", myrank, vi->dims[0], vi->dims[1]); for (i = 0; i < slice_size; i++) { printf (" ["); for (j = 0; j < vi->dims[1]; j++) { printf ("%g ", *((double *)data + i * vi->dims[1] + j)); } printf ("]"); } printf (" ]\n\n"); #endif // advance to 1) next available step with 2) blocking wait adios_advance_step (fp, 0, timeout_sec); if (adios_errno == err_step_notready) { printf ("B rank %d: No new step arrived within the timeout. Quit. %s\n", myrank, adios_errmsg()); break; // quit while loop } TAU_PROFILE_STOP(adios_recv_timer); /* Do some exchanges with neighbors */ //do_neighbor_exchange(); /* "Compute" */ compute(steps); for (i = 0; i < NX; i++) { t[i] = steps*100.0 + myrank*NX + i; } for (i = 0; i < NY; i++) { p[i] = steps*1000.0 + myrank*NY + i; } if (send_to_c) { TAU_PROFILE_TIMER(adios_send_timer, "ADIOS send", __FILE__, TAU_USER); TAU_PROFILE_START(adios_send_timer); /* ADIOS: write to the next application in the workflow */ if (steps == 0) { adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "w", adios_comm_b_to_c); } else { adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "a", adios_comm_b_to_c); } /* ADIOS: Actually write the data out. * Yes, this is the recommended method, and this way, changes in * configuration with the .XML file will, even in the worst-case * scenario, merely require running 'gpp.py adios_config.xml' * and typing 'make'. */ #include "gwrite_b_to_c.ch" /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_close(adios_handle); TAU_PROFILE_STOP(adios_send_timer); #if 1 if (!announced) { SOS_val foo; foo.i_val = NX; SOS_pack(example_pub, "NX", SOS_VAL_TYPE_INT, foo); SOS_announce(example_pub); SOS_publish(example_pub); announced = true; } #endif } MPI_Barrier(adios_comm_b_to_c); } MPI_Barrier(MPI_COMM_WORLD); adios_read_close(fp); /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_read_finalize_method(method); } if (send_to_c) { adios_finalize(myrank); } free(data); //MPI_Comm_free(&adios_comm); //MPI_Comm_free(&adios_comm_b_to_c); TAU_PROFILE_STOP(timer); /* exit */ return 0; }
int main (int argc, char ** argv) { int rank, size, i, j; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel; void * data = NULL; uint64_t start[2], count[2]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=4"); adios_logger_open ("log_read_C", rank); ADIOS_FILE * f = adios_read_open ("global_array_C.bp", method, comm, ADIOS_LOCKMODE_NONE, 0); if (f == NULL) { log_error ("%s\n", adios_errmsg()); return -1; } ADIOS_VARINFO * v = adios_inq_var (f, "temperature"); /* Using less readers to read the global array back, i.e., non-uniform */ uint64_t slice_size = v->dims[0]/size; start[0] = slice_size * rank; if (rank == size-1) /* last rank may read more lines */ slice_size = slice_size + v->dims[0]%size; count[0] = slice_size; start[1] = 0; count[1] = v->dims[1]; data = malloc (slice_size * v->dims[1] * sizeof (double)); if (data == NULL) { log_error (stderr, "malloc failed.\n"); return -1; } /* Read a subset of the temperature array */ sel = adios_selection_boundingbox (v->ndim, start, count); adios_schedule_read (f, sel, "temperature", 0, 1, data); adios_perform_reads (f, 1); for (i = 0; i < slice_size; i++) { log_test ("rank %d: [%lld,%d:%lld]", rank, start[0]+i, 0, slice_size); for (j = 0; j < v->dims[1]; j++) log_test (" %6.6g", * ((double *)data + i * v->dims[1] + j)); log_test ("\n"); } free (data); adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (method); adios_logger_close(); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { /* application data structures */ int rank; int NX, NY; double *t; int *p; /* MPI and ADIOS data structures */ MPI_Comm comm = MPI_COMM_WORLD; /* MPI and ADIOS setup */ MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, ""); /* First read in the scalars to calculate the size of the arrays */ /* get everything from single process - rank 0 for now*/ ADIOS_SELECTION process_select; process_select.type=ADIOS_SELECTION_WRITEBLOCK; process_select.u.block.index = rank; /* read the size of arrays using local inq_var */ /* Note: at this moment, timeout is not handled. It blocks until writer appears */ ADIOS_FILE* afile = adios_read_open("arrays", ADIOS_READ_METHOD_FLEXPATH, comm, ADIOS_LOCKMODE_NONE, 30.0); /* Read arrays for each time step */ while(adios_errno != err_end_of_stream){ ADIOS_VARINFO* nx_info = adios_inq_var( afile, "NX"); if(nx_info->value) { NX = *((int *)nx_info->value); } ADIOS_VARINFO* ny_info = adios_inq_var( afile, "NY"); if(ny_info->value) { NY = *((int *)ny_info->value); } /* Allocate space for the arrays */ t = (double *) malloc (NX*NY*sizeof(double)); p = (int *) malloc (NX*sizeof(int)); memset(t, 0, NX*NY*sizeof(double)); memset(p, 0, NX*sizeof(int)); /* schedule a read of the arrays */ adios_schedule_read (afile, &process_select, "var_double_2Darray", 0, 1, t); adios_schedule_read (afile, &process_select, "var_int_1Darray", 0, 1, p); /* commit request and retrieve data */ adios_perform_reads (afile, 1); /* print result */ printf("Results Rank=%d Step=%d p[] = [%d, %d,...] t[][] = [%.2f, %.2f]\n", rank, afile->current_step, p[0], p[1], t[0], t[1]); /* block until next step is available (30 sec timeout unsupported) */ adios_release_step(afile); adios_advance_step(afile, 0, 30); MPI_Barrier (comm); /* shutdown ADIOS and MPI */ } adios_read_close(afile); /* wait until all readers finish */ adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { char filename [256]; int rank, size, i, j, k, token; MPI_Comm comm = MPI_COMM_WORLD; MPI_Status status; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel; void * data = NULL; uint64_t start[3], count[3], step = 0; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=3"); /* adios_read_open_file() allows for seeing all timesteps in the file */ ADIOS_FILE * f = adios_read_open_file ("adios_globaltime.bp", method, comm); if (f == NULL) { printf ("%s\n", adios_errmsg()); return -1; } ADIOS_VARINFO * v = adios_inq_var (f, "temperature"); // read in two timesteps data = malloc (2 * v->dims[0] * v->dims[1] * sizeof (double)); if (data == NULL) { fprintf (stderr, "malloc failed.\n"); return -1; } // read in timestep 'rank' (up to 12) step = rank % 13; start[0] = 0; count[0] = v->dims[0]; start[1] = 0; count[1] = v->dims[1]; /* Read a subset of the temperature array */ sel = adios_selection_boundingbox (v->ndim, start, count); /* 2 steps from 'step' */ adios_schedule_read (f, sel, "temperature", step, 2, data); adios_perform_reads (f, 1); if (rank == 0) printf ("Array size of temperature [0:%lld,0:%lld]\n", v->dims[0], v->dims[1]); if (rank > 0) { MPI_Recv (&token, 1, MPI_INT, rank-1, 0, comm, &status); } printf("------------------------------------------------\n", rank); printf("rank=%d: \n", rank); for (i = 0; i < 2; i++) { printf ("step %lld = [\n", step+i); for (j = 0; j < v->dims[0]; j++) { printf (" ["); for (k = 0; k < v->dims[1]; k++) { printf ("%g ", ((double *)data) [ i * v->dims[0] * v->dims[1] + j * v->dims[1] + k]); } printf ("]\n"); } printf ("]\n"); } printf ("\n"); if (rank < size-1) { MPI_Send (&token, 1, MPI_INT, rank+1, 0, comm); } free (data); adios_free_varinfo (v); adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (method); MPI_Finalize (); return 0; }
int main(int argc, char ** argv) { char xmlFileName[256]; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; MPI_Comm comm = MPI_COMM_WORLD; ADIOS_QUERY_TEST_INFO *queryInfo; ADIOS_FILE *f; MPI_Init(&argc, &argv); if (argc < 4 || argc > 7) { fprintf(stderr," usage: %s {input bp file} {xml file} {query engine (ALACRITY/FASTBIT)} [mode (FILE/stream)] [print points? (TRUE/false)] [read results? (true/FALSE)]\n", argv[0]); MPI_Abort(comm, 1); } else { strcpy(xmlFileName, argv[2]); } enum ADIOS_QUERY_METHOD query_method = ADIOS_QUERY_METHOD_UNKNOWN; if (strcasecmp(argv[3], "ALACRITY") == 0) { // init with ALACRITY //adios_query_init(ADIOS_QUERY_TOOL_ALACRITY); query_method = ADIOS_QUERY_METHOD_ALACRITY; } else if (strcasecmp(argv[3], "FASTBIT") == 0) { // init with FastBit query_method = ADIOS_QUERY_METHOD_FASTBIT; //fprintf(stderr,"FastBit not supported in this test yet, exiting...\n"); //MPI_Abort(comm, 1); } else { fprintf(stderr,"Unsupported query engine %s, exiting...\n", argv[3]); MPI_Abort(comm, 1); } const int use_streaming = (argc >= 5) && (strcasecmp(argv[4], "stream") == 0); const int read_results = (argc >= 6) && (strcasecmp(argv[5], "true") == 0); const int print_points = !(argc >= 7) || (strcasecmp(argv[6], "true") == 0); fprintf(stderr, "NOTE: Running the query in %s mode\n", use_streaming ? "STREAM" : "FILE"); fprintf(stderr, "NOTE: %s print query result points\n", print_points ? "WILL" : "WILL NOT"); fprintf(stderr, "NOTE: %s read data using query result point selection\n", read_results ? "WILL" : "WILL NOT"); // ADIOS init adios_read_init_method(method, comm, NULL); f = use_streaming ? adios_read_open(argv[1], method, comm, ADIOS_LOCKMODE_ALL, -1) : adios_read_open_file(argv[1], method, comm); if (f == NULL) { fprintf(stderr," can not open file %s \n", argv[1]); MPI_Abort(comm, 1); } // Parse the xml file to generate query info queryInfo = parseXml(xmlFileName, f); // perform query adios_query_set_method(queryInfo->query, query_method); performQuery(queryInfo, f, use_streaming, print_points, read_results); adios_read_close(f); adios_read_finalize_method(ADIOS_READ_METHOD_BP); MPI_Finalize(); return 0; }
int read_scalar_stepbystep () { ADIOS_FILE * f; float timeout_sec = 0.0; int steps = 0; int retval = 0; MPI_Comm comm = MPI_COMM_SELF; adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); printf ("\n--------- Read scalar in stream using varinfo->value ------------\n"); f = adios_read_open (fname, ADIOS_READ_METHOD_BP, comm, ADIOS_LOCKMODE_NONE, timeout_sec); if (adios_errno == err_file_not_found) { printf ("Stream not found after waiting %f seconds: %s\n", timeout_sec, adios_errmsg()); retval = adios_errno; } else if (adios_errno == err_end_of_stream) { printf ("Stream terminated before open. %s\n", adios_errmsg()); retval = adios_errno; } else if (f == NULL) { printf ("Error at opening stream: %s\n", adios_errmsg()); retval = adios_errno; } else { /* Processing loop over the steps (we are already in the first one) */ while (adios_errno != err_end_of_stream) { steps++; // steps start counting from 1 printf ("Step: %d\n", f->current_step); /* Check the scalar O with varinfo->value */ ADIOS_VARINFO * v = adios_inq_var (f, "NX"); int value = *(int*)v->value; printf ("Scalar NX = %d", value); if (value != block_count [f->current_step*nblocks_per_step*size]) { printf ("\tERROR expected = %llu", block_count [f->current_step*nblocks_per_step*size]); nerrors++; } printf ("\n"); // advance to 1) next available step with 2) blocking wait adios_advance_step (f, 0, timeout_sec); if (adios_errno == err_step_notready) { //printf ("No new step arrived within the timeout. Quit. %s\n", // adios_errmsg()); break; // quit while loop } } adios_read_close (f); } adios_read_finalize_method (ADIOS_READ_METHOD_BP); //printf ("We have processed %d steps\n", steps); return retval; }
int main (int argc, char ** argv) { int rank, j; int NX, NY; double *t; MPI_Comm comm = MPI_COMM_WORLD; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, ""); ADIOS_SELECTION *global_range_select; ADIOS_SELECTION scalar_block_select; scalar_block_select.type = ADIOS_SELECTION_WRITEBLOCK; scalar_block_select.u.block.index = 0; /* schedule_read of a scalar. */ int test_scalar = -1; ADIOS_FILE* afile = adios_read_open("arrays", ADIOS_READ_METHOD_FLEXPATH, comm, ADIOS_LOCKMODE_NONE, 0.0); int i; for(i=0; i<afile->nvars; i++){ printf("var: %s\n", afile->var_namelist[i]); } int ii = 0; while(adios_errno != err_end_of_stream){ /* get a bounding box - rank 0 for now*/ ADIOS_VARINFO *nx_info = adios_inq_var( afile, "/scalar/dim/NX"); ADIOS_VARINFO *ny_info = adios_inq_var( afile, "/scalar/dim/NY"); ADIOS_VARINFO *size_info = adios_inq_var( afile, "size"); ADIOS_VARINFO *arry = adios_inq_var( afile, "var_2d_array"); int nx_val = *((int*)nx_info->value); int ny_val = *((int*)ny_info->value); int size_val = *((int*)size_info->value); printf("nx: %d, ny: %d, size: %d\n", nx_val, ny_val, size_val); uint64_t xcount = arry->dims[0]; uint64_t ycount = arry->dims[1]; uint64_t starts[] = {0,0}; uint64_t counts[] = {xcount, ycount}; global_range_select = adios_selection_boundingbox(2, starts, counts); int nelem = xcount*ycount; if(nx_info->value) { NX = *((int *)nx_info->value); } if(ny_info->value){ NY= *((int*)ny_info->value); } if(rank == 0){ int n; printf("dims: [ "); for(n=0; n<arry->ndim; n++){ printf("%d ", (int)arry->dims[n]); } printf("]\n"); } /* Allocate space for the arrays */ int arr_size = sizeof(double) * nelem; t = (double *) malloc (arr_size); memset(t, 0, arr_size); //fprintf(stderr, "t %p\n", t); /* Read the arrays */ adios_schedule_read (afile, global_range_select, "var_2d_array", 0, 1, t); adios_schedule_read (afile, &scalar_block_select, "test_scalar", 0, 1, &test_scalar); adios_perform_reads (afile, 1); //sleep(20); printf("Rank=%d: test_scalar: %d step: %d, t[0,5+x] = [", rank, test_scalar, ii); for(j=0; j<nelem; j++) { printf(", %6.2f", t[j]); } printf("]\n\n"); adios_release_step(afile); adios_advance_step(afile, 0, 30); ii++; //MPI_Barrier (comm); //sleep(1); } // adios_read_close(afile); adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int rank, size, i, j, npl, token; MPI_Comm comm = MPI_COMM_WORLD; MPI_Status status; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel; void * data = NULL; uint64_t start[1], count[1]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=3"); ADIOS_FILE * f = adios_read_open ("adios_global_no_xml.bp", method, comm, ADIOS_LOCKMODE_NONE, 0); if (f == NULL) { printf ("%s\n", adios_errmsg()); return -1; } ADIOS_VARINFO * v = adios_inq_var (f, "temperature"); /* Using less readers to read the global array back, i.e., non-uniform */ uint64_t slice_size = v->dims[0]/size; start[0] = slice_size * rank; if (rank == size-1) /* last rank may read more lines */ slice_size = slice_size + v->dims[0]%size; count[0] = slice_size; data = malloc (slice_size * sizeof (double)); if (data == NULL) { fprintf (stderr, "malloc failed.\n"); return -1; } /* Read a subset of the temperature array */ sel = adios_selection_boundingbox (v->ndim, start, count); adios_schedule_read (f, sel, "temperature", 0, 1, data); adios_perform_reads (f, 1); if (rank > 0) { MPI_Recv (&token, 1, MPI_INT, rank-1, 0, comm, &status); } printf (" ======== Rank %d ========== \n", rank); npl = 10; for (i = 0; i < slice_size; i+=npl) { printf ("[%4.4" PRIu64 "] ", rank*slice_size+i); for (j= 0; j < npl; j++) { printf (" %6.6g", * ((double *)data + i + j)); } printf ("\n"); } fflush(stdout); sleep(1); if (rank < size-1) { MPI_Send (&token, 1, MPI_INT, rank+1, 0, comm); } free (data); adios_selection_delete (sel); adios_free_varinfo (v); adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (method); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int rank, size; int NX, NY; int len, off; double *t = NULL; MPI_Comm comm = MPI_COMM_WORLD; uint64_t start[2], count[2]; ADIOS_SELECTION *sel; int steps = 0; #ifdef _USE_GNUPLOT int i, j; double *tmp; FILE *pipe; #else // Variables for ADIOS write int64_t adios_handle; uint64_t adios_groupsize, adios_totalsize; char outfn[256]; #endif MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, ""); ADIOS_FILE* fp = adios_read_open("stream.bp", ADIOS_READ_METHOD_FLEXPATH, comm, ADIOS_LOCKMODE_NONE, 0.0); assert(fp != NULL); ADIOS_VARINFO* nx_info = adios_inq_var( fp, "NX"); ADIOS_VARINFO* ny_info = adios_inq_var( fp, "NY"); NX = *((int *)nx_info->value); NY= *((int*)ny_info->value); len = NX / size; off = len * rank; if (rank == size-1) len = len + NX % size; printf("\trank=%d: NX,NY,len,off = %d\t%d\t%d\t%d\n", rank, NX, NY, len, off); assert(len > 0); t = (double *) malloc(sizeof(double) * len * NY); memset(t, '\0', sizeof(double) * len * NY); assert(t != NULL); start[0] = off; start[1] = 0; count[0] = len; count[1] = NY; // Not working ... //sel = adios_selection_boundingbox (2, start, count); sel = malloc(sizeof(ADIOS_SELECTION)); sel->type=ADIOS_SELECTION_WRITEBLOCK; sel->u.block.index = rank; #ifdef _USE_GNUPLOT if ((NX % size) > 0) { fprintf(stderr, "Equal distribution is required\n"); return -1; } if (rank == 0) { pipe = popen("gnuplot", "w"); fprintf(pipe, "set view map\n"); fprintf(pipe, "set xrange [0:%d]\n", NX-1); tmp = (double *) malloc(sizeof(double) * NX * NY); assert(tmp != NULL); } #else // ADIOS write init adios_init ("adios.xml", comm); #endif //while(adios_errno != err_end_of_stream && adios_errno != err_step_notready) while(1) { steps++; // Reading adios_schedule_read (fp, sel, "var_2d_array", 0, 1, t); adios_perform_reads (fp, 1); printf("step=%d\trank=%d\tfp->current_step=%d\t[%d,%d]\n", steps, rank, fp->current_step, len, NY); /* // Debugging for (i=0; i<len; i++) { printf("%d: rank=%d: t[%d,0:4] = ", steps, rank, off+i); for (j=0; j<5; j++) { printf(", %g", t[i*NY + j]); } printf(" ...\n"); } */ // Do something #ifdef _USE_GNUPLOT // Option 1: plotting MPI_Gather(t, len * NY, MPI_DOUBLE, tmp, len * NY, MPI_DOUBLE, 0, comm); if (rank == 0) { fprintf(pipe, "set title 'Soft X-Rray Signal (shot #%d)'\n", steps); fprintf(pipe, "set xlabel 'Channel#'\n"); fprintf(pipe, "set ylabel 'Timesteps'\n"); fprintf(pipe, "set cblabel 'Voltage (eV)'\n"); # ifndef _GNUPLOT_INTERACTIVE fprintf(pipe, "set terminal png\n"); fprintf(pipe, "set output 'fig%03d.png'\n", steps); # endif fprintf(pipe, "splot '-' matrix with image\n"); //fprintf(pipe, "plot '-' with lines, '-' with lines, '-' with lines\n"); double *sum = calloc(NX, sizeof(double)); for (j = 0; j < NY; j++) { for (i = 0; i < NX; i++) { sum[i] += tmp[i * NY + j]; } } for (j = 0; j < NY; j++) { for (i = 0; i < NX; i++) { fprintf (pipe, "%g ", (-tmp[i * NY + j] + sum[i]/NY)/3276.8); } fprintf(pipe, "\n"); } fprintf(pipe, "e\n"); fprintf(pipe, "e\n"); fflush (pipe); # ifdef _GNUPLOT_INTERACTIVE printf ("Press [Enter] to continue . . ."); fflush (stdout); getchar (); # endif free(sum); } #else // Option 2: BP writing snprintf (outfn, sizeof(outfn), "reader_%3.3d.bp", steps); adios_open (&adios_handle, "reader", outfn, "w", comm); adios_groupsize = 4 * sizeof(int) + sizeof(double) * len * NY; adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "NX", &NX); adios_write (adios_handle, "NY", &NY); adios_write (adios_handle, "len", &len); adios_write (adios_handle, "off", &off); adios_write (adios_handle, "var", t); adios_close (adios_handle); #endif // Advance MPI_Barrier (comm); adios_advance_step(fp, 0, TIMEOUT_SEC); if (adios_errno == err_end_of_stream) { printf("rank %d, Stream terminated. Quit\n", rank); break; // quit while loop } else if (adios_errno == err_step_notready) { printf ("rank %d: No new step arrived within the timeout. Quit.\n", rank); break; // quit while loop } else if (adios_errno != err_no_error) { printf("ADIOS returned code=%d, msg:%s\n", adios_errno, adios_get_last_errmsg()); break; // quit while loop } } // free(t); adios_read_close(fp); //printf("rank %d, Successfully closed stream\n", rank); adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); //printf("rank %d, Successfully finalized read method\n", rank); #ifndef _USE_GNUPLOT adios_finalize (rank); //printf("rank %d, Successfully finalized adios\n", rank); #else if (rank==0) { free(tmp); pclose(pipe); } #endif MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int rank, size, i, j; MPI_Comm comm = MPI_COMM_WORLD; ADIOS_FILE * f; ADIOS_VARINFO * v; ADIOS_SELECTION * sel; int steps = 0; int retval = 0; float timeout_sec = 1.0; void * data = NULL; uint64_t start[2], count[2]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); f = adios_read_open ("adios_globaltime.bp", ADIOS_READ_METHOD_BP, comm, ADIOS_LOCKMODE_NONE, timeout_sec); if (adios_errno == err_file_not_found) { printf ("rank %d: Stream not found after waiting %f seconds: %s\n", rank, timeout_sec, adios_errmsg()); retval = adios_errno; } else if (adios_errno == err_end_of_stream) { printf ("rank %d: Stream terminated before open. %s\n", rank, adios_errmsg()); retval = adios_errno; } else if (f == NULL) { printf ("rank %d: Error at opening stream: %s\n", rank, adios_errmsg()); retval = adios_errno; } else { /* process file here... */ v = adios_inq_var (f, "temperature"); adios_inq_var_blockinfo (f, v); printf ("ndim = %d\n", v->ndim); //printf ("nsteps = %d\n", v->nsteps); printf ("dims[%llu][%llu]\n", v->dims[0], v->dims[1]); uint64_t slice_size = v->dims[0]/size; if (rank == size-1) slice_size = slice_size + v->dims[0]%size; start[0] = rank * slice_size; count[0] = slice_size; start[1] = 0; count[1] = v->dims[1]; data = malloc (slice_size * v->dims[1] * 8); /* Processing loop over the steps (we are already in the first one) */ while (adios_errno != err_end_of_stream) { steps++; // steps start counting from 1 sel = adios_selection_boundingbox (v->ndim, start, count); adios_schedule_read (f, sel, "temperature", 0, 1, data); adios_perform_reads (f, 1); if (rank == 0) printf ("--------- Step: %d --------------------------------\n", f->current_step); printf("rank=%d: [0:%lld,0:%lld] = [", rank, v->dims[0], v->dims[1]); for (i = 0; i < slice_size; i++) { printf (" ["); for (j = 0; j < v->dims[1]; j++) { printf ("%g ", *((double *)data + i * v->dims[1] + j)); } printf ("]"); } printf (" ]\n\n"); // advance to 1) next available step with 2) blocking wait adios_advance_step (f, 0, timeout_sec); if (adios_errno == err_step_notready) { printf ("rank %d: No new step arrived within the timeout. Quit. %s\n", rank, adios_errmsg()); break; // quit while loop } } adios_read_close (f); } if (rank==0) printf ("We have processed %d steps\n", steps); adios_read_finalize_method (ADIOS_READ_METHOD_BP); free (data); MPI_Finalize (); return retval; }
int main (int argc, char ** argv) { char filename [256] = "stream.bp"; int rank, size; int NX, NY; int len, off; double *t = NULL; MPI_Comm comm = MPI_COMM_WORLD; int64_t adios_handle; uint64_t adios_groupsize, adios_totalsize; uint64_t start[2], count[2]; ADIOS_SELECTION *sel; int steps = 0; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); // ADIOS read init adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); ADIOS_FILE* fp = adios_read_open_file ("kstar.bp", ADIOS_READ_METHOD_BP, comm); assert(fp != NULL); ADIOS_VARINFO* nx_info = adios_inq_var( fp, "N"); ADIOS_VARINFO* ny_info = adios_inq_var( fp, "L"); NX = *((int *)nx_info->value); NY= *((int*)ny_info->value); len = NX / size; off = len * rank; if (rank == size-1) len = len + NX % size; printf("\trank=%d: NX,NY,len,off = %d\t%d\t%d\t%d\n", rank, NX, NY, len, off); assert(len > 0); t = (double *) malloc(sizeof(double) * len * NY); memset(t, '\0', sizeof(double) * len * NY); assert(t != NULL); start[0] = off; start[1] = 0; count[0] = len; count[1] = NY; sel = adios_selection_boundingbox (2, start, count); // ADIOS write init adios_init ("adios.xml", comm); remove (filename); //int ii; //for(ii = 0; ii<10; ii++){ // for (i = 0; i < len * NY; i++) // t[i] = ii*1000 + rank; while(adios_errno != err_end_of_stream && adios_errno != err_step_notready) { steps++; // Reading adios_schedule_read (fp, sel, "var", 0, 1, t); adios_perform_reads (fp, 1); // Debugging //for (i = 0; i < len*NY; i++) t[i] = off * NY + i; printf("step=%d\trank=%d\t[%d,%d]\n", steps, rank, len, NY); // Writing adios_open (&adios_handle, "writer", filename, "a", comm); adios_groupsize = 4*4 + 8*len*NY; adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "NX", &NX); adios_write (adios_handle, "NY", &NY); adios_write (adios_handle, "len", &len); adios_write (adios_handle, "off", &off); adios_write (adios_handle, "var_2d_array", t); adios_close (adios_handle); // Advance MPI_Barrier (comm); adios_advance_step(fp, 0, TIMEOUT_SEC); } free(t); MPI_Barrier (comm); adios_read_close(fp); if (rank==0) printf ("We have processed %d steps\n", steps); MPI_Barrier (comm); adios_read_finalize_method(ADIOS_READ_METHOD_BP); adios_finalize (rank); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int rank, size, i, j; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=3"); ADIOS_FILE * f = adios_read_open_file("adios_stat.bp", method, comm); if (f == NULL) { fprintf (stderr, "%s\n", adios_errmsg()); return -1; } ADIOS_VARINFO * v = adios_inq_var (f, "temperature"); if (v) { /* get statistics, for each individual time-step */ adios_inq_var_stat (f, v, 1, 0); printf("Global MIN of temperature: %lf\n", * (double *) v->statistics->min); printf("Global MAX of temperature: %lf\n", * (double *) v->statistics->max); printf("Global AVG of temperature: %lf\n", * (double *) v->statistics->avg); printf("Global STD DEV of temperature: %lf\n", * (double *) v->statistics->std_dev); printf("\n"); printf("---------------------------------------------------------------------------\n"); if (v->statistics->steps) { printf("MIN\t\tMAX\t\tAVG\t\tSTD_DEV\t\tHISTOGRAM\n"); for(i = 0; i < v->nsteps; i++) { if (v->statistics->steps->mins[i]) printf("%lf\t", * (double *) v->statistics->steps->mins[i]); else printf("--\t\t"); if (v->statistics->steps->maxs[i]) printf("%lf\t", * (double *) v->statistics->steps->maxs[i]); else printf("--\t\t"); if (v->statistics->steps->avgs[i]) printf("%lf\t", * (double *) v->statistics->steps->avgs[i]); else printf("--\t\t"); if (v->statistics->steps->std_devs[i]) printf("%lf\t", * (double *) v->statistics->steps->std_devs[i]); else printf("--\t\t"); if (v->statistics->histogram) { for(j = 0; j <= v->statistics->histogram->num_breaks; j++) { printf("%d ", v->statistics->histogram->frequencies[i][j]); } printf("\n"); } else { printf("--\t\t"); } printf("\n"); } } else { printf ("Per step statistics is missing\n"); } printf("---------------------------------------------------------------------------\n"); printf("\n"); if (v->statistics->histogram) { printf("Break points:\t\t\t"); for(j = 0; j < v->statistics->histogram->num_breaks; j++) printf("%6.2lf\t", v->statistics->histogram->breaks[j]); printf("\n"); printf("Frequencies:\t\t\t"); for(j = 0; j <= v->statistics->histogram->num_breaks; j++) printf("%6d\t", v->statistics->histogram->gfrequencies[j]); } printf("\n\n"); #if 0 printf ("Auto covariance of MIN values of temperature over time: %lf\n", adios_stat_cov (v, v, "min", 0, 12, 0)); printf ("Auto correlation of MAX values of temperature over time, with lag 2 units: %lf\n", adios_stat_cor (v, v, "max", 0, 8, 2)); printf("\n\n"); #endif adios_free_varinfo (v); } else { fprintf (stderr, "ERROR: Cannot inquire statistics of variable 'temperature': %s\n", adios_errmsg()); } v = adios_inq_var (f, "complex"); if (v) { adios_inq_var_stat (f, v, 1, 0); double *C = v->statistics->min; printf("Global Minimum of variable complex - Magnitude: %lf\n", C[0]); printf("Global Minimum of variable complex - Real part: %lf\n", C[1]); printf("Global Minimum of variable complex - Imaginary part: %lfi\n", C[2]); double ** Cmin; Cmin = (double **) v->statistics->steps->mins; printf("\nMagnitude\t\tReal\t\t\tImaginary\n"); for (j = 0; j < v->nsteps; j++) { printf ("%lf\t\t%lf\t\t%lf\n", Cmin[j][0], Cmin[j][1], Cmin[j][2]); } printf("\n"); adios_free_varinfo (v); } else { fprintf (stderr, "ERROR: Cannot inquire statistics of variable 'complex': %s\n", adios_errmsg()); } adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (method); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int i, j, datasize; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel1; double * data = NULL; uint64_t start[2], count[2]; MPI_Init (&argc, &argv); #ifdef WITH_NCSU_TIMER timer_init(); #endif adios_read_init_method (method, comm, NULL); ADIOS_FILE * f = adios_read_open_file ("adios_global.bp", method, comm); ADIOS_VARINFO * varinfo = adios_inq_var (f, "temperature"); if (varinfo) { int nranks; assert(varinfo->ndim == 2); nranks = varinfo->dims[0]; assert(nranks % 4 == 0); assert(varinfo->dims[1] == 10); datasize = (nranks / 2) * varinfo->dims[1] * sizeof(double); data = malloc (datasize); start[0] = nranks / 4; start[1] = 2; count[0] = nranks / 2; count[1] = 6; sel1 = adios_selection_boundingbox (varinfo->ndim, start, count); adios_schedule_read (f, sel1, "temperature", 0, 1, data); adios_perform_reads (f, 1); printf("Subvolume at (%" PRIu64 ",%" PRIu64 ") of size (%" PRIu64 ",%" PRIu64 "):\n", start[0], start[1], count[0], count[1]); for (i = 0; i < count[0]; i++) { printf("[ "); for (j = 0; j < count[1]; j++) { printf("%.0lf ", data[i * count[1] + j]); } printf("]\n"); } adios_selection_delete (sel1); } adios_free_varinfo (varinfo); adios_read_close (f); adios_read_finalize_method (ADIOS_READ_METHOD_BP); #ifdef WITH_NCSU_TIMER printf("[TIMERS] "); timer_result_t *results = timer_get_results_sorted(); for (i = 0; i < timer_get_num_timers(); i++) { printf("%s: %0.4lf ", results[i].name, results[i].time); } printf("\n"); free(results); #endif #ifdef WITH_NCSU_TIMER timer_finalize(); #endif MPI_Finalize (); return 0; }
int main(int argc, char ** argv) { int rank, size, varid, numvars; int bins, step, mod; char *filename, *in_stream, *data_var_name; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_FLEXPATH; //enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * global_range_select; double *data; uint64_t tstep, global_size, mysize, mystart, sz; MPI_Init (&argc, &argv); MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); /* Command line parsing */ if (rank == 0 && argc < 4) { fprintf(stderr, "\nHistogram usage: <exec> input-stream-name num-bins" " arr1 [arr2] [arr3] [...]\n" "\t where arr1, arr2, arr3 ... are the names of the arrays to be analyzed.\n"); MPI_Abort(comm, -1); } MPI_Barrier(comm); in_stream = argv[1]; //Parse cmd line bins = atoi(argv[2]); numvars = argc - 3; const char *vars[numvars]; for (varid=0; varid < numvars; varid++) { vars[varid] = argv[varid + 3]; } /* Adios open and init */ adios_read_init_method (method, comm, "verbose=1"); ADIOS_FILE * f = adios_read_open (in_stream, method, comm, ADIOS_LOCKMODE_ALL, -1); step = 0; //not used now while (adios_errno != err_end_of_stream){ //resource monitor /*loop over different arrays inside stream*/ for (varid = 0; varid < numvars; varid++){ #ifdef ENABLE_MONITOR //double t1 = wfgettimeofday(); lib_mem_init(); ind_timer_start(0, "whole timestep"); #endif //Init variables.... global_size = 0; tstep = 0; mod = 0; mysize = 0; mystart = 0; adios_schedule_read (f, NULL, "ntimestep", 0, 1, &tstep); adios_perform_reads (f, 1); ADIOS_VARINFO * glob_info = adios_inq_var (f, vars[varid]); global_size = glob_info->dims[0]; //printf("[DEBUG] global_size = %" PRIu64 " ntimestep = %" PRIu64 "\n", // global_size, tstep); //printf("[HIST%d] received data for timestep %" PRIu64 " with ndim: %d and globalsize:%" // PRIu64 " \n", rank, tstep, ndim, global_size); //sleep(800); //debug //Array slice computation mod = global_size % size;//size = MPI size if (mod == 0){ mysize = global_size / size; mystart = mysize * rank; } else { mysize = global_size / (size); if (rank < mod){ mysize++; mystart = mysize * rank; } else { mystart = (mod * (mysize + 1)) + ((rank - mod) * mysize); } } #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif //printf("[HISTO%d]: mysize = %" PRIu64" mystart = %" PRIu64 "\n", rank, mysize, mystart); //debug //if (step == 0) sleep(800); uint64_t starts[] = {mystart}; uint64_t counts[] = {mysize}; global_range_select = adios_selection_boundingbox (1, starts, counts); //Allocate space for arrays uint64_t msize = ((uint64_t) sizeof(double) * mysize); //printf("[DEBUG] mysize = %" PRIu64 " msize= %" PRIu64" \n", mysize, msize); //data = (double *) malloc(sizeof(double) * mysize); data = new double[mysize]; if (data == NULL){ //printf("DEBUG: malloc returned NULL, size was %d\n", msize); } else { if (rank == 0) printf("[HIST0] DEBUG: malloc successful, size was %d\n", mysize); } //memset (data, 0, sizeof(double) * mysize); //Read data adios_schedule_read (f, global_range_select, vars[varid], 0, 1, data); adios_perform_reads (f, 1); #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif //printf("PERFORM_READS success of variable: %s\n", vars[varid]); /* Data check if (step == 4) { FILE *fp; char *log; asprintf(&log, "histo-input%d-%d.log", step, rank); fp = fopen(log, "w"); fprintf(fp, "timestep: %" PRIu64 " mysize: %"PRIu64 "\n", tstep, mysize); for (i=0; i<(int)mysize; i++){ fprintf(fp, "%lf\n", data[i]); } fclose(fp); sleep(800); } */ // find max and min sz = 0; sz = mysize; double min = data[0]; double max = data[0]; for (uint64_t i = 1; i < sz; ++i) { if (data[i] > max) max = data[i]; if (data[i] < min) min = data[i]; }//local max, min found. //local data should just use shared mem. double g_min, g_max; // Find the global max/min MPI_Allreduce (&min, &g_min, 1, MPI_DOUBLE, MPI_MIN, comm); MPI_Allreduce (&max, &g_max, 1, MPI_DOUBLE, MPI_MAX, comm); //printf("[HIST%d] glob-min: %f, glob-max: %f\n", rank, g_min, g_max); nohandler_mem(rank); double width = (g_max - g_min)/bins; std::vector<uint64_t> hist(bins); for (uint64_t i = 0; i < sz; ++i)//fill local bins { //printf("[HISTO%d] local filling adding index %" PRIu64 "\n", rank, i); int idx = int((data[i] - g_min)/width);//discover index if (idx == bins) // we hit the max --idx; //printf("[%d]: %f -> %d\n", rank, data[i], idx); ++hist[idx]; } delete[] data; // Global reduce histograms std::vector<uint64_t> g_hist(bins); MPI_Reduce(&hist[0], &g_hist[0], bins, MPI_UINT64_T, MPI_SUM, 0, comm); //debug //printf("[Completed histogram routine]\n"); if (rank == 0) //print histogram to file { FILE *fp; const char *log = "histograms.log"; fp = fopen(log, "a"); fprintf(fp, "Histogram for %s, timestep %" PRIu64"\n", vars[varid], tstep); for (int i = 0; i < bins; ++i) fprintf(fp, " %f-%f: %" PRIu64 "\n", g_min + i*width, g_min + (i+1)*width, g_hist[i]); fclose (fp); } #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif if (rank == 0) //print histogram to terminal { printf("Histogram for %s, timestep %" PRIu64"\n", vars[varid], tstep); for (int i = 0; i < bins; ++i) printf(" %f-%f: %" PRIu64 "\n", g_min + i*width, g_min + (i+1)*width, g_hist[i]); } //resource monitor #ifdef ENABLE_MONITOR //double t2 = wfgettimeofday(); ind_timer_end(0); char monitor_title[40]; sprintf(monitor_title, "histogram-%s", vars[varid]); monitor_out (rank, size, tstep, msize, t1, t2, comm, monitor_title); #endif } //end of read + analysis for 3 variables adios_release_step(f); //delete[] data; if (rank == 0) printf("[HIST%d] read and wrote data for timestep %" PRIu64 "\n", rank, tstep); step++; adios_advance_step(f, 0, -1); /* if (step == 6){ double t1 = wfgettimeofday(); FILE *tfp; tfp = fopen("time.log", "a"); fprintf(tfp, "rank %d histogram end time: %f\n", rank, t1); fclose(tfp); } */ }//end of adios stream while loop if (rank == 0) printf("[HIST%d] out of read loop\n", rank); /* performance measurement */ /* if (rank == 0){ double t3 = wfgettimeofday(); FILE *tfp; tfp = fopen("time.log", "a"); fprintf(tfp, "master histogram end time: %f\n", t3); fclose(tfp); } */ #ifdef ENABLE_MONITOR outer_timer_end(rank, "histogram"); #endif adios_read_close(f); adios_read_finalize_method(method); MPI_Finalize(); return 0; }
int main (int argc, char ** argv) { char filename [256]; int rank; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel1=NULL; int8_t v1 = 0; int16_t v2 = 0; int32_t v3 = 0; int64_t v4 = 0; uint8_t v5 = 0; uint16_t v6 = 0; uint32_t v7 = 0; uint64_t v8 = 0; float v9 = 0.0; double v10 = 0.0; char v11[256]; complex v12; v12.r = 0.0; v12.i = 0.0; double_complex v13; v13.r = 0.0; v13.i = 0.0; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); strcpy (filename, "scalars.bp"); adios_read_init_method (method, comm, "verbose=3"); ADIOS_FILE * f = adios_read_open (filename, method, comm, ADIOS_LOCKMODE_NONE, 0.0); adios_schedule_read (f, sel1, "var_byte", 0, 1, &v1); adios_schedule_read (f, sel1, "var_short", 0, 1, &v2); adios_schedule_read (f, sel1, "var_int", 0, 1, &v3); adios_schedule_read (f, sel1, "var_long", 0, 1, &v4); adios_schedule_read (f, sel1, "var_ubyte", 0, 1, &v5); adios_schedule_read (f, sel1, "var_ushort", 0, 1, &v6); adios_schedule_read (f, sel1, "var_uint", 0, 1, &v7); adios_schedule_read (f, sel1, "var_ulong", 0, 1, &v8); adios_schedule_read (f, sel1, "var_real", 0, 1, &v9); adios_schedule_read (f, sel1, "var_double", 0, 1, &v10); /* note that a string is an array and thus v11 a pointer already, so we pass the v11 instead of &v11 here */ adios_schedule_read (f, sel1, "var_string", 0, 1, v11); adios_schedule_read (f, sel1, "var_complex", 0, 1, &v12); adios_schedule_read (f, sel1, "var_double_complex", 0, 1, &v13); adios_perform_reads (f,1); if (rank == 0) { printf("byte v1 = %d\n", v1); printf("short v2 = %d\n", v2); printf("integer v3 = %d\n", v3); printf("long v4 = %" PRId64 "\n", v4); printf("uns.byte v5 = %u\n", v5); printf("uns.short v6 = %u\n", v6); printf("uns.int v7 = %u\n", v7); printf("uns.long v8 = %" PRIu64 "\n", v8); printf("float v9 = %g\n", v9); printf("double v10 = %g\n", v10); printf("string v11 = %s\n", v11); printf("complex v12 = (%g, i%g)\n", v12.r, v12.i); printf("dbl-complex v13 = (%g, i%g)\n", v13.r, v13.i); } adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (ADIOS_READ_METHOD_BP); MPI_Finalize (); return 0; }