int read_write(int step) { int retval = 0; int i; uint64_t total_size; // open output file adios_open (&fh, group_namelist[0], outfilename, (step==1 ? "w" : "a"), comm); adios_group_size (fh, write_total, &total_size); for (i=0; i<f->nvars; i++) { if (varinfo[i].writesize != 0) { // read variable subset print ("rank %d: Read variable %d: %s\n", rank, i, f->var_namelist[i]); ADIOS_SELECTION *sel = adios_selection_boundingbox (varinfo[i].v->ndim, varinfo[i].start, varinfo[i].count); adios_schedule_read_byid (f, sel, i, 1, 1, readbuf); adios_perform_reads (f, 1); // write (buffer) variable print ("rank %d: Write variable %d: %s\n", rank, i, f->var_namelist[i]); adios_write(fh, f->var_namelist[i], readbuf); } } adios_release_step (f); // this step is no longer needed to be locked in staging area adios_close (fh); // write out output buffer to file return retval; }
void eavlXGCParticleImporter::ReleaseTimeStep() { adios_release_step(fp); }
int main(int argc, char ** argv) { int rank, size, varid, numvars; int bins, step, mod; char *filename, *in_stream, *data_var_name; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_FLEXPATH; //enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * global_range_select; double *data; uint64_t tstep, global_size, mysize, mystart, sz; MPI_Init (&argc, &argv); MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); /* Command line parsing */ if (rank == 0 && argc < 4) { fprintf(stderr, "\nHistogram usage: <exec> input-stream-name num-bins" " arr1 [arr2] [arr3] [...]\n" "\t where arr1, arr2, arr3 ... are the names of the arrays to be analyzed.\n"); MPI_Abort(comm, -1); } MPI_Barrier(comm); in_stream = argv[1]; //Parse cmd line bins = atoi(argv[2]); numvars = argc - 3; const char *vars[numvars]; for (varid=0; varid < numvars; varid++) { vars[varid] = argv[varid + 3]; } /* Adios open and init */ adios_read_init_method (method, comm, "verbose=1"); ADIOS_FILE * f = adios_read_open (in_stream, method, comm, ADIOS_LOCKMODE_ALL, -1); step = 0; //not used now while (adios_errno != err_end_of_stream){ //resource monitor /*loop over different arrays inside stream*/ for (varid = 0; varid < numvars; varid++){ #ifdef ENABLE_MONITOR //double t1 = wfgettimeofday(); lib_mem_init(); ind_timer_start(0, "whole timestep"); #endif //Init variables.... global_size = 0; tstep = 0; mod = 0; mysize = 0; mystart = 0; adios_schedule_read (f, NULL, "ntimestep", 0, 1, &tstep); adios_perform_reads (f, 1); ADIOS_VARINFO * glob_info = adios_inq_var (f, vars[varid]); global_size = glob_info->dims[0]; //printf("[DEBUG] global_size = %" PRIu64 " ntimestep = %" PRIu64 "\n", // global_size, tstep); //printf("[HIST%d] received data for timestep %" PRIu64 " with ndim: %d and globalsize:%" // PRIu64 " \n", rank, tstep, ndim, global_size); //sleep(800); //debug //Array slice computation mod = global_size % size;//size = MPI size if (mod == 0){ mysize = global_size / size; mystart = mysize * rank; } else { mysize = global_size / (size); if (rank < mod){ mysize++; mystart = mysize * rank; } else { mystart = (mod * (mysize + 1)) + ((rank - mod) * mysize); } } #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif //printf("[HISTO%d]: mysize = %" PRIu64" mystart = %" PRIu64 "\n", rank, mysize, mystart); //debug //if (step == 0) sleep(800); uint64_t starts[] = {mystart}; uint64_t counts[] = {mysize}; global_range_select = adios_selection_boundingbox (1, starts, counts); //Allocate space for arrays uint64_t msize = ((uint64_t) sizeof(double) * mysize); //printf("[DEBUG] mysize = %" PRIu64 " msize= %" PRIu64" \n", mysize, msize); //data = (double *) malloc(sizeof(double) * mysize); data = new double[mysize]; if (data == NULL){ //printf("DEBUG: malloc returned NULL, size was %d\n", msize); } else { if (rank == 0) printf("[HIST0] DEBUG: malloc successful, size was %d\n", mysize); } //memset (data, 0, sizeof(double) * mysize); //Read data adios_schedule_read (f, global_range_select, vars[varid], 0, 1, data); adios_perform_reads (f, 1); #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif //printf("PERFORM_READS success of variable: %s\n", vars[varid]); /* Data check if (step == 4) { FILE *fp; char *log; asprintf(&log, "histo-input%d-%d.log", step, rank); fp = fopen(log, "w"); fprintf(fp, "timestep: %" PRIu64 " mysize: %"PRIu64 "\n", tstep, mysize); for (i=0; i<(int)mysize; i++){ fprintf(fp, "%lf\n", data[i]); } fclose(fp); sleep(800); } */ // find max and min sz = 0; sz = mysize; double min = data[0]; double max = data[0]; for (uint64_t i = 1; i < sz; ++i) { if (data[i] > max) max = data[i]; if (data[i] < min) min = data[i]; }//local max, min found. //local data should just use shared mem. double g_min, g_max; // Find the global max/min MPI_Allreduce (&min, &g_min, 1, MPI_DOUBLE, MPI_MIN, comm); MPI_Allreduce (&max, &g_max, 1, MPI_DOUBLE, MPI_MAX, comm); //printf("[HIST%d] glob-min: %f, glob-max: %f\n", rank, g_min, g_max); nohandler_mem(rank); double width = (g_max - g_min)/bins; std::vector<uint64_t> hist(bins); for (uint64_t i = 0; i < sz; ++i)//fill local bins { //printf("[HISTO%d] local filling adding index %" PRIu64 "\n", rank, i); int idx = int((data[i] - g_min)/width);//discover index if (idx == bins) // we hit the max --idx; //printf("[%d]: %f -> %d\n", rank, data[i], idx); ++hist[idx]; } delete[] data; // Global reduce histograms std::vector<uint64_t> g_hist(bins); MPI_Reduce(&hist[0], &g_hist[0], bins, MPI_UINT64_T, MPI_SUM, 0, comm); //debug //printf("[Completed histogram routine]\n"); if (rank == 0) //print histogram to file { FILE *fp; const char *log = "histograms.log"; fp = fopen(log, "a"); fprintf(fp, "Histogram for %s, timestep %" PRIu64"\n", vars[varid], tstep); for (int i = 0; i < bins; ++i) fprintf(fp, " %f-%f: %" PRIu64 "\n", g_min + i*width, g_min + (i+1)*width, g_hist[i]); fclose (fp); } #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif if (rank == 0) //print histogram to terminal { printf("Histogram for %s, timestep %" PRIu64"\n", vars[varid], tstep); for (int i = 0; i < bins; ++i) printf(" %f-%f: %" PRIu64 "\n", g_min + i*width, g_min + (i+1)*width, g_hist[i]); } //resource monitor #ifdef ENABLE_MONITOR //double t2 = wfgettimeofday(); ind_timer_end(0); char monitor_title[40]; sprintf(monitor_title, "histogram-%s", vars[varid]); monitor_out (rank, size, tstep, msize, t1, t2, comm, monitor_title); #endif } //end of read + analysis for 3 variables adios_release_step(f); //delete[] data; if (rank == 0) printf("[HIST%d] read and wrote data for timestep %" PRIu64 "\n", rank, tstep); step++; adios_advance_step(f, 0, -1); /* if (step == 6){ double t1 = wfgettimeofday(); FILE *tfp; tfp = fopen("time.log", "a"); fprintf(tfp, "rank %d histogram end time: %f\n", rank, t1); fclose(tfp); } */ }//end of adios stream while loop if (rank == 0) printf("[HIST%d] out of read loop\n", rank); /* performance measurement */ /* if (rank == 0){ double t3 = wfgettimeofday(); FILE *tfp; tfp = fopen("time.log", "a"); fprintf(tfp, "master histogram end time: %f\n", t3); fclose(tfp); } */ #ifdef ENABLE_MONITOR outer_timer_end(rank, "histogram"); #endif adios_read_close(f); adios_read_finalize_method(method); MPI_Finalize(); return 0; }
int main (int argc, char ** argv) { /* application data structures */ int rank; int NX, NY; double *t; int *p; /* MPI and ADIOS data structures */ MPI_Comm comm = MPI_COMM_WORLD; /* MPI and ADIOS setup */ MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, ""); /* First read in the scalars to calculate the size of the arrays */ /* get everything from single process - rank 0 for now*/ ADIOS_SELECTION process_select; process_select.type=ADIOS_SELECTION_WRITEBLOCK; process_select.u.block.index = rank; /* read the size of arrays using local inq_var */ /* Note: at this moment, timeout is not handled. It blocks until writer appears */ ADIOS_FILE* afile = adios_read_open("arrays", ADIOS_READ_METHOD_FLEXPATH, comm, ADIOS_LOCKMODE_NONE, 30.0); /* Read arrays for each time step */ while(adios_errno != err_end_of_stream){ ADIOS_VARINFO* nx_info = adios_inq_var( afile, "NX"); if(nx_info->value) { NX = *((int *)nx_info->value); } ADIOS_VARINFO* ny_info = adios_inq_var( afile, "NY"); if(ny_info->value) { NY = *((int *)ny_info->value); } /* Allocate space for the arrays */ t = (double *) malloc (NX*NY*sizeof(double)); p = (int *) malloc (NX*sizeof(int)); memset(t, 0, NX*NY*sizeof(double)); memset(p, 0, NX*sizeof(int)); /* schedule a read of the arrays */ adios_schedule_read (afile, &process_select, "var_double_2Darray", 0, 1, t); adios_schedule_read (afile, &process_select, "var_int_1Darray", 0, 1, p); /* commit request and retrieve data */ adios_perform_reads (afile, 1); /* print result */ printf("Results Rank=%d Step=%d p[] = [%d, %d,...] t[][] = [%.2f, %.2f]\n", rank, afile->current_step, p[0], p[1], t[0], t[1]); /* block until next step is available (30 sec timeout unsupported) */ adios_release_step(afile); adios_advance_step(afile, 0, 30); MPI_Barrier (comm); /* shutdown ADIOS and MPI */ } adios_read_close(afile); /* wait until all readers finish */ adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); MPI_Finalize (); return 0; }
/// Advance to the next available step. Returns true if successful, /// false otherwise. bool ADIOS_File::nextStep(float to) { if (m_handle == 0) return false; adios_release_step(m_handle); return (0 == adios_advance_step(m_handle, 0, to)); } // ADIOS_File::nextStep
int main (int argc, char ** argv) { int rank, j; int NX, NY; double *t; MPI_Comm comm = MPI_COMM_WORLD; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, ""); ADIOS_SELECTION *global_range_select; ADIOS_SELECTION scalar_block_select; scalar_block_select.type = ADIOS_SELECTION_WRITEBLOCK; scalar_block_select.u.block.index = 0; /* schedule_read of a scalar. */ int test_scalar = -1; ADIOS_FILE* afile = adios_read_open("arrays", ADIOS_READ_METHOD_FLEXPATH, comm, ADIOS_LOCKMODE_NONE, 0.0); int i; for(i=0; i<afile->nvars; i++){ printf("var: %s\n", afile->var_namelist[i]); } int ii = 0; while(adios_errno != err_end_of_stream){ /* get a bounding box - rank 0 for now*/ ADIOS_VARINFO *nx_info = adios_inq_var( afile, "/scalar/dim/NX"); ADIOS_VARINFO *ny_info = adios_inq_var( afile, "/scalar/dim/NY"); ADIOS_VARINFO *size_info = adios_inq_var( afile, "size"); ADIOS_VARINFO *arry = adios_inq_var( afile, "var_2d_array"); int nx_val = *((int*)nx_info->value); int ny_val = *((int*)ny_info->value); int size_val = *((int*)size_info->value); printf("nx: %d, ny: %d, size: %d\n", nx_val, ny_val, size_val); uint64_t xcount = arry->dims[0]; uint64_t ycount = arry->dims[1]; uint64_t starts[] = {0,0}; uint64_t counts[] = {xcount, ycount}; global_range_select = adios_selection_boundingbox(2, starts, counts); int nelem = xcount*ycount; if(nx_info->value) { NX = *((int *)nx_info->value); } if(ny_info->value){ NY= *((int*)ny_info->value); } if(rank == 0){ int n; printf("dims: [ "); for(n=0; n<arry->ndim; n++){ printf("%d ", (int)arry->dims[n]); } printf("]\n"); } /* Allocate space for the arrays */ int arr_size = sizeof(double) * nelem; t = (double *) malloc (arr_size); memset(t, 0, arr_size); //fprintf(stderr, "t %p\n", t); /* Read the arrays */ adios_schedule_read (afile, global_range_select, "var_2d_array", 0, 1, t); adios_schedule_read (afile, &scalar_block_select, "test_scalar", 0, 1, &test_scalar); adios_perform_reads (afile, 1); //sleep(20); printf("Rank=%d: test_scalar: %d step: %d, t[0,5+x] = [", rank, test_scalar, ii); for(j=0; j<nelem; j++) { printf(", %6.2f", t[j]); } printf("]\n\n"); adios_release_step(afile); adios_advance_step(afile, 0, 30); ii++; //MPI_Barrier (comm); //sleep(1); } // adios_read_close(afile); adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); MPI_Finalize (); return 0; }
void ADIOS1CommonRead::ReleaseStep() { adios_release_step(m_fh); }