int read_file (char *fname) { ADIOS_SELECTION *sel; ADIOS_FILE * f; ADIOS_VARINFO * vi; int err=0,i,n; uint64_t start[1] = {0}; uint64_t count[2] = {NX}; uint64_t ndim; reset_rarrays(); log ("Read and check data in %s\n", fname); f = adios_read_open_file (fname, ADIOS_READ_METHOD_BP, comm); if (f == NULL) { printE ("Error at opening file: %s\n", adios_errmsg()); return 1; } sel = adios_selection_boundingbox (1, start, count); adios_schedule_read (f, sel, "t1", 0, 1, r1); adios_schedule_read (f, sel, "t2", 0, 1, r2); adios_perform_reads (f, 1); adios_selection_delete (sel); CHECK_ARRAY (t, r1, NX); CHECK_ARRAY (t, r2, NX); endread: adios_read_close(f); MPI_Barrier (comm); return err; }
ADIOS_SELECTION * eavlXGCParticleImporter::MakeLimitedSelection(ADIOS_VARINFO *avi, uint64_t *s, uint64_t *c, int ION) { if(ION) { s[0] = IONstartIndex; s[1] = 0; s[2] = 0; c[0] = IONendIndex-IONstartIndex; c[1] = 0; c[2] = 0; for(int i = 1; i < avi->ndim; i++) { c[i] = avi->dims[i]; } } else { s[0] = ELECTRONstartIndex; s[1] = 0; s[2] = 0; c[0] = ELECTRONendIndex - ELECTRONstartIndex; c[1] = 0; c[2] = 0; for(int i = 1; i < avi->ndim; i++) { c[i] = avi->dims[i]; } } return adios_selection_boundingbox(avi->ndim, s, c); }
// k is numbered from 1 to sum_nblocks void verifyData(ADIOS_FILE* f, ADIOS_VARINFO* v, int k, int timestep) { uint64_t blockBytes = adios_type_size (v->type, v->value); int j=0; if (v->ndim <= 0) { return; } //printf("verify block[%d]: ", k); for (j=0; j<v->ndim; j++) { blockBytes *= v->blockinfo[k].count[j]; //printf("%" PRIu64 ":%" PRIu64 " ", v->blockinfo[k].start[j], v->blockinfo[k].count[j]); } void* data = NULL; data = malloc(blockBytes); ADIOS_SELECTION* sel = adios_selection_boundingbox (v->ndim, v->blockinfo[k].start, v->blockinfo[k].count); int err = adios_schedule_read_byid(f, sel, v->varid, timestep, 1, data); if (!err) { err = adios_perform_reads(f, 1); } //fastbit_adios_util_printData(data, v->type, blockBytes/adios_type_size(v->type, v->value)); adios_selection_delete(sel); free(data); data = NULL; }
void onBox(int rank, ADIOS_FILE* f, ADIOS_VARINFO* v, int timestep, uint64_t* start, uint64_t* count, FastBitDataType ft) { int i=0; char selName[100]; sprintf(selName, "box-%lu", start[0]); uint64_t totalCount = 1; for (i = 0; i<v->ndim; i++) { totalCount *= count[i]; } double* data = (double *) calloc (totalCount, sizeof (double)); uint64_t currStart = 0; ADIOS_SELECTION* boxSel = adios_selection_boundingbox(v->ndim, start, count); int err = adios_schedule_read_byid(f, boxSel, v->varid, timestep, 1, data); if (!err) { err = adios_perform_reads(f, 1); } processData(data, totalCount, rank, timestep, selName, ft, v); } // on box
int read_write(int step) { int retval = 0; int i; uint64_t total_size; // open output file adios_open (&fh, group_namelist[0], outfilename, (step==1 ? "w" : "a"), comm); adios_group_size (fh, write_total, &total_size); for (i=0; i<f->nvars; i++) { if (varinfo[i].writesize != 0) { // read variable subset print ("rank %d: Read variable %d: %s\n", rank, i, f->var_namelist[i]); ADIOS_SELECTION *sel = adios_selection_boundingbox (varinfo[i].v->ndim, varinfo[i].start, varinfo[i].count); adios_schedule_read_byid (f, sel, i, 1, 1, readbuf); adios_perform_reads (f, 1); // write (buffer) variable print ("rank %d: Write variable %d: %s\n", rank, i, f->var_namelist[i]); adios_write(fh, f->var_namelist[i], readbuf); } } adios_release_step (f); // this step is no longer needed to be locked in staging area adios_close (fh); // write out output buffer to file return retval; }
ADIOS_SELECTION * eavlXGCParticleImporter::MakeSelection(ADIOS_VARINFO *avi, uint64_t *s, uint64_t *c) { for(int i = 0; i < 3; i++) s[i] = c[i] = 0; for(int i = 0; i < avi->ndim; i++) c[i] = avi->dims[i]; return adios_selection_boundingbox(avi->ndim, s, c); }
/// Read a sub-array. /// /// @Note The length of the buffer is assumed to be sufficiently large /// if the argument bufLen is less or equal to 0. The caller is /// responsible for allocating the buffer (@c buf) of the currect size. int64_t ADIOS_Var::readData(void* buf, int64_t bufLen, const uint64_t* start, const uint64_t* count) { uint64_t total_elm = 1; for (int i=0; i<getNumDimension(); i++) { total_elm *= count[i]; #ifdef DEBUG LOGGER(ibis::gVerbose > 5) << "ADIOS_Var::readData: [" << i << "] start=" << start[i] << ", count=" << count[i]; #endif } { uint64_t total_bytes = total_elm * adios_type_size(getType(), readValue()); if (bufLen > 0 && (uint64_t)bufLen < total_bytes) { LOGGER(ibis::gVerbose > 0) << "Warning -- ADIOS_Var::readData: bufLen (" << bufLen << ") < total_bytes (" << total_bytes << ")"; return -1; } } ADIOS_SELECTION *sel = adios_selection_boundingbox (m_handle->ndim, start, count); if (sel == 0) { LOGGER(ibis::gVerbose > 1) << "Warning -- ADIOS_Var::readData failed to create a selection"; return -2; } IBIS_BLOCK_GUARD(adios_selection_delete, sel); int ierr = adios_schedule_read_byid (getFile(), sel, index(), getFile()->current_step, 1, buf); if (ierr != 0) { LOGGER(ibis::gVerbose > 0) << "Warning -- ADIOS_Var::readData call to " "adios_schedule_read_byid failed due to " << adios_errmsg(); return -3; } ierr = adios_perform_reads(getFile(), 1); // 1 == blocking if (ierr != 0) { LOGGER(ibis::gVerbose > 0) << "Warning -- ADIOS_Var::readData call to adios_perform_reads on " << getFileName() << " failed due to " << adios_errmsg(); return -4; } LOGGER(ibis::gVerbose > 5) << "ADIOS_Var::readData: competeled reading " << total_elm << " element" << (total_elm>1?"s":"") << " for " << getName() << " from " << getFileName(); return total_elm; } // ADIOS_Var::readData
void adios_checkpoint_verify_random_variables(ADIOS_FILE* fp, const char* name, uint_type* origin) { ADIOS_VARINFO *vi; int count_int = 1; int size; uint64_t *start; uint64_t *count; vi = adios_inq_var(fp, name); adios_inq_var_blockinfo (fp, vi); if (vi->ndim > 0) { start = (uint64_t *)malloc(vi->ndim * sizeof(uint64_t)); count = (uint64_t *)malloc(vi->ndim * sizeof(uint64_t)); } for (int j=0; j<vi->nblocks[0]; j++) { if(j == OHMMS::Controller->rank()) { for (int k=0; k<vi->ndim; k++) { start[k] = vi->blockinfo[j].start[k]; count[k] = vi->blockinfo[j].count[k]; count_int *= count[k]; //cout<<OHMMS::Controller->rank()<<" count "<<start[k]<<" "<<count[k]<<endl; } } } size = count_int*adios_type_size(vi->type, vi->value); uint_type *mem= (uint_type*)malloc(size); ADIOS_SELECTION *sel = adios_selection_boundingbox(vi->ndim, start, count); adios_schedule_read(fp, sel, name, 0, 1, mem); adios_perform_reads(fp, 1); int flag = 0; for(int i=0; i<count_int; i++) { if(mem[i] == origin[i]) { //cout<<name<<"["<<i<<"]verification passed "<<mem[i]<<endl; } else { flag = 1; cout<<name<<"["<<i<<"]verification not passed, readin: "<<mem[i]<<" writeout: "<<origin[i]<<endl; } } if (flag == 0) cout<<name<<" verification passed "<<endl; else cout<<name<<" verification not passed "<<endl; adios_free_varinfo (vi); adios_selection_delete(sel); free(start); free(count); free(mem); }
void ADIOS1CommonRead::ScheduleReadCommon(const std::string &name, const Dims &offs, const Dims &ldims, const int fromStep, const int nSteps, const bool readAsLocalValue, const bool readAsJoinedArray, void *data) { if (readAsLocalValue) { /* Get all the requested values from metadata now */ ADIOS_VARINFO *vi = adios_inq_var(m_fh, name.c_str()); if (vi) { adios_inq_var_stat(m_fh, vi, 0, 1); int elemsize = adios_type_size(vi->type, nullptr); long long blockidx = 0; for (int i = 0; i < fromStep; i++) { blockidx += vi->nblocks[i]; } char *dest = (char *)data; for (int i = fromStep; i < fromStep + nSteps; i++) { for (int j = 0; j < vi->nblocks[i]; j++) { memcpy(dest, vi->statistics->blocks->mins[blockidx], elemsize); ++blockidx; dest += elemsize; } } adios_free_varinfo(vi); } } else { uint64_t start[32], count[32]; for (int i = 0; i < ldims.size(); i++) { start[i] = (uint64_t)offs[i]; count[i] = (uint64_t)ldims[i]; } ADIOS_SELECTION *sel = nullptr; if (ldims.size() > 0) { sel = adios_selection_boundingbox(ldims.size(), start, count); } adios_schedule_read(m_fh, sel, name.c_str(), (int)fromStep, (int)nSteps, data); adios_selection_delete(sel); } }
/** Read the skalar field and optionally the attribute into the values referenced by the pointers */ void operator()(ThreadParams& params, const std::string& name, T_Scalar* value, const std::string& attrName = "", T_Attribute* attribute = nullptr) { log<picLog::INPUT_OUTPUT> ("ADIOS: read %1%D scalars: %2%") % simDim % name; std::string datasetName = params.adiosBasePath + name; ADIOS_VARINFO* varInfo; ADIOS_CMD_EXPECT_NONNULL( varInfo = adios_inq_var(params.fp, datasetName.c_str()) ); if(varInfo->ndim != simDim) throw std::runtime_error(std::string("Invalid dimensionality for ") + name); if(varInfo->type != traits::PICToAdios<T_Scalar>().type) throw std::runtime_error(std::string("Invalid type for ") + name); DataSpace<simDim> gridPos = Environment<simDim>::get().GridController().getPosition(); uint64_t start[varInfo->ndim]; uint64_t count[varInfo->ndim]; for(int d = 0; d < varInfo->ndim; ++d) { /* \see adios_define_var: z,y,x in C-order */ start[d] = gridPos.revert()[d]; count[d] = 1; } ADIOS_SELECTION* fSel = adios_selection_boundingbox(varInfo->ndim, start, count); // avoid deadlock between not finished pmacc tasks and mpi calls in adios __getTransactionEvent().waitForFinished(); /* specify what we want to read, but start reading at below at `adios_perform_reads` */ /* magic parameters (0, 1): `from_step` (not used in streams), `nsteps` to read (must be 1 for stream) */ log<picLog::INPUT_OUTPUT > ("ADIOS: Schedule read skalar %1%)") % datasetName; ADIOS_CMD( adios_schedule_read(params.fp, fSel, datasetName.c_str(), 0, 1, (void*)value) ); /* start a blocking read of all scheduled variables */ ADIOS_CMD( adios_perform_reads(params.fp, 1) ); adios_selection_delete(fSel); adios_free_varinfo(varInfo); if(!attrName.empty()) { log<picLog::INPUT_OUTPUT> ("ADIOS: read attribute %1% for scalars: %2%") % attrName % name; *attribute = readAttribute<T_Attribute>(params.fp, datasetName, attrName); } }
int worker(int argc, char* argv[]) { TAU_PROFILE_TIMER(timer, __func__, __FILE__, TAU_USER); TAU_PROFILE_START(timer); my_printf("%d of %d In worker B\n", myrank, commsize); static bool announced = false; /* validate input */ validate_input(argc, argv); my_printf("Worker B will execute until it sees n iterations.\n", iterations); /* ADIOS: These declarations are required to match the generated * gread_/gwrite_ functions. (And those functions are * generated by calling 'gpp.py adios_config.xml') ... * EXCEPT THAT THE generation of Reader code is broken. * So, we will write the reader code manually. */ uint64_t adios_groupsize; uint64_t adios_totalsize; uint64_t adios_handle; void * data = NULL; uint64_t start[2], count[2]; int i, j, steps = 0; int NX = 10; int NY = 1; double t[NX]; double p[NX]; /* ADIOS: Can duplicate, split the world, whatever. * This allows you to have P writers to N files. * With no splits, everyone shares 1 file, but * can write lock-free by using different areas. */ MPI_Comm adios_comm, adios_comm_b_to_c; adios_comm = MPI_COMM_WORLD; //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm); adios_comm_b_to_c = MPI_COMM_WORLD; //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm_b_to_c); enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_FLEXPATH; adios_read_init_method(method, adios_comm, "verbose=3"); if (adios_errno != err_no_error) { fprintf (stderr, "rank %d: Error %d at init: %s\n", myrank, adios_errno, adios_errmsg()); exit(4); } if (send_to_c) { adios_init("adios_config.xml", adios_comm); } /* ADIOS: Set up the adios communications and buffers, open the file. */ ADIOS_FILE *fp; // file handler ADIOS_VARINFO *vi; // information about one variable ADIOS_SELECTION * sel; char adios_filename_a_to_b[256]; char adios_filename_b_to_c[256]; enum ADIOS_LOCKMODE lock_mode = ADIOS_LOCKMODE_NONE; double timeout_sec = 1.0; sprintf(adios_filename_a_to_b, "adios_a_to_b.bp"); sprintf(adios_filename_b_to_c, "adios_b_to_c.bp"); my_printf ("rank %d: Worker B opening file: %s\n", myrank, adios_filename_a_to_b); fp = adios_read_open(adios_filename_a_to_b, method, adios_comm, lock_mode, timeout_sec); if (adios_errno == err_file_not_found) { fprintf (stderr, "rank %d: Stream not found after waiting %d seconds: %s\n", myrank, timeout_sec, adios_errmsg()); exit(1); } else if (adios_errno == err_end_of_stream) { // stream has been gone before we tried to open fprintf (stderr, "rank %d: Stream terminated before open. %s\n", myrank, adios_errmsg()); exit(2); } else if (fp == NULL) { // some other error happened fprintf (stderr, "rank %d: Error %d at opening: %s\n", myrank, adios_errno, adios_errmsg()); exit(3); } else { my_printf("Found file %s\n", adios_filename_a_to_b); my_printf ("File info:\n"); my_printf (" current step: %d\n", fp->current_step); my_printf (" last step: %d\n", fp->last_step); my_printf (" # of variables: %d:\n", fp->nvars); vi = adios_inq_var(fp, "temperature"); adios_inq_var_blockinfo(fp, vi); printf ("ndim = %d\n", vi->ndim); printf ("nsteps = %d\n", vi->nsteps); printf ("dims[%llu][%llu]\n", vi->dims[0], vi->dims[1]); uint64_t slice_size = vi->dims[0]/commsize; if (myrank == commsize-1) { slice_size = slice_size + vi->dims[0]%commsize; } start[0] = myrank * slice_size; count[0] = slice_size; start[1] = 0; count[1] = vi->dims[1]; data = malloc (slice_size * vi->dims[1] * 8); /* Processing loop over the steps (we are already in the first one) */ while (adios_errno != err_end_of_stream && steps < iterations) { steps++; // steps start counting from 1 TAU_PROFILE_TIMER(adios_recv_timer, "ADIOS recv", __FILE__, TAU_USER); TAU_PROFILE_START(adios_recv_timer); sel = adios_selection_boundingbox (vi->ndim, start, count); adios_schedule_read (fp, sel, "temperature", 0, 1, data); adios_perform_reads (fp, 1); if (myrank == 0) printf ("--------- B Step: %d --------------------------------\n", fp->current_step); #if 0 printf("B rank=%d: [0:%lld,0:%lld] = [", myrank, vi->dims[0], vi->dims[1]); for (i = 0; i < slice_size; i++) { printf (" ["); for (j = 0; j < vi->dims[1]; j++) { printf ("%g ", *((double *)data + i * vi->dims[1] + j)); } printf ("]"); } printf (" ]\n\n"); #endif // advance to 1) next available step with 2) blocking wait adios_advance_step (fp, 0, timeout_sec); if (adios_errno == err_step_notready) { printf ("B rank %d: No new step arrived within the timeout. Quit. %s\n", myrank, adios_errmsg()); break; // quit while loop } TAU_PROFILE_STOP(adios_recv_timer); /* Do some exchanges with neighbors */ //do_neighbor_exchange(); /* "Compute" */ compute(steps); for (i = 0; i < NX; i++) { t[i] = steps*100.0 + myrank*NX + i; } for (i = 0; i < NY; i++) { p[i] = steps*1000.0 + myrank*NY + i; } if (send_to_c) { TAU_PROFILE_TIMER(adios_send_timer, "ADIOS send", __FILE__, TAU_USER); TAU_PROFILE_START(adios_send_timer); /* ADIOS: write to the next application in the workflow */ if (steps == 0) { adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "w", adios_comm_b_to_c); } else { adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "a", adios_comm_b_to_c); } /* ADIOS: Actually write the data out. * Yes, this is the recommended method, and this way, changes in * configuration with the .XML file will, even in the worst-case * scenario, merely require running 'gpp.py adios_config.xml' * and typing 'make'. */ #include "gwrite_b_to_c.ch" /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_close(adios_handle); TAU_PROFILE_STOP(adios_send_timer); #if 1 if (!announced) { SOS_val foo; foo.i_val = NX; SOS_pack(example_pub, "NX", SOS_VAL_TYPE_INT, foo); SOS_announce(example_pub); SOS_publish(example_pub); announced = true; } #endif } MPI_Barrier(adios_comm_b_to_c); } MPI_Barrier(MPI_COMM_WORLD); adios_read_close(fp); /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_read_finalize_method(method); } if (send_to_c) { adios_finalize(myrank); } free(data); //MPI_Comm_free(&adios_comm); //MPI_Comm_free(&adios_comm_b_to_c); TAU_PROFILE_STOP(timer); /* exit */ return 0; }
int main (int argc, char ** argv) { int rank, size, i, j, npl, token; MPI_Comm comm = MPI_COMM_WORLD; MPI_Status status; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel; void * data = NULL; uint64_t start[1], count[1]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=3"); ADIOS_FILE * f = adios_read_open ("adios_global_no_xml.bp", method, comm, ADIOS_LOCKMODE_NONE, 0); if (f == NULL) { printf ("%s\n", adios_errmsg()); return -1; } ADIOS_VARINFO * v = adios_inq_var (f, "temperature"); /* Using less readers to read the global array back, i.e., non-uniform */ uint64_t slice_size = v->dims[0]/size; start[0] = slice_size * rank; if (rank == size-1) /* last rank may read more lines */ slice_size = slice_size + v->dims[0]%size; count[0] = slice_size; data = malloc (slice_size * sizeof (double)); if (data == NULL) { fprintf (stderr, "malloc failed.\n"); return -1; } /* Read a subset of the temperature array */ sel = adios_selection_boundingbox (v->ndim, start, count); adios_schedule_read (f, sel, "temperature", 0, 1, data); adios_perform_reads (f, 1); if (rank > 0) { MPI_Recv (&token, 1, MPI_INT, rank-1, 0, comm, &status); } printf (" ======== Rank %d ========== \n", rank); npl = 10; for (i = 0; i < slice_size; i+=npl) { printf ("[%4.4" PRIu64 "] ", rank*slice_size+i); for (j= 0; j < npl; j++) { printf (" %6.6g", * ((double *)data + i + j)); } printf ("\n"); } fflush(stdout); sleep(1); if (rank < size-1) { MPI_Send (&token, 1, MPI_INT, rank+1, 0, comm); } free (data); adios_selection_delete (sel); adios_free_varinfo (v); adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (method); MPI_Finalize (); return 0; }
int main (int argc, char **argv){ int rank =0, size =0; int NX = 0; double *t = NULL; // this is an array we expect as a reference array double *t_ref = NULL; MPI_Comm comm = MPI_COMM_WORLD; diag_t diag = DIAG_OK; // to store the diagnostic information struct test_info test_result = {TEST_PASSED, "1D_arr_global_noxml"}; struct err_counts err = { 0, 0}; struct adios_tsprt_opts adios_opts; GET_ENTRY_OPTIONS(adios_opts, "Runs readers. It is recommended to run as many readers as writers."); // adios read initialization MPI_Init( &argc, &argv); MPI_Comm_rank (comm, &rank); // depending on the method SET_ERROR_IF_NOT_ZERO(adios_read_init_method(adios_opts.method, comm, adios_opts.adios_options), err.adios); RET_IF_ERROR(err.adios, rank); // I will be working with streams so the lock mode is necessary, // return immediately if the stream unavailable ADIOS_FILE *adios_handle = adios_read_open(FILE_NAME, adios_opts.method, comm, ADIOS_LOCKMODE_NONE, 0.0); if ( !adios_handle){ p_error("Quitting ... (%d) %s\n", adios_errno, adios_errmsg()); return DIAG_ERR; } // define portions of data how they will be read ADIOS_SELECTION *sel = NULL; ADIOS_VARINFO *avi = NULL; // for storing the variables char buf[STR_BUFFER_SIZE]; int step = 0; // read how many processors wrote that array avi = adios_inq_var (adios_handle, "size"); if (!avi){ p_error("rank %d: Quitting ... (%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto close_adios; } size = *((int*)avi->value); adios_free_varinfo(avi); avi = NULL; // if I run the more readers than writers; just release // the excessive readers if (rank >= size){ p_info("rank %d: I am an excessive rank. Nothing to read ...\n", rank); diag = DIAG_OK; goto close_adios; } // read the size of the array avi = adios_inq_var (adios_handle, "NX"); if (!avi){ p_error("rank %d: Quitting ... (%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto close_adios; } // I expect a scalar that will tell me the size of an array assert(0 == avi->ndim); assert(adios_integer == avi->type); NX = *((int*)avi->value); // I don't need that variable any more adios_free_varinfo(avi); assert(NX_DIM == NX); avi = NULL; // this will define the slice that we want to read; each rank should // read its own slice written by a corresponding writer rank uint64_t count[1] = { NX }; uint64_t start[1] = { 0 }; start[0] = rank*NX; sel = adios_selection_boundingbox(1,start, count); if( !sel ){ p_error("rank %d: Quitting ... (%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto close_adios; } // make the reference array with reference values I expect to get t_ref = calloc(NX, sizeof(double)); if (gen_1D_array(t_ref, NX, rank) == DIAG_ERR){ p_error("Generating 1D array. Quitting ...\n"); diag = DIAG_ERR; goto close_adios; } // allocate the memory for the actual array to be read t = calloc(NX, sizeof(double)); if (adios_schedule_read(adios_handle, sel, "var_1d_array",0,1,t) != 0){ p_error("rank %d: Quitting ...(%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto just_clean; } // not sure if this assumption is correct; difficult to find in the ADIOS sources if (adios_perform_reads(adios_handle, 1) != 0){ p_error("rank %d: Quitting ...(%d) %s\n", rank, adios_errno, adios_errmsg()); diag = DIAG_ERR; goto just_clean; } sprintf(buf, "Rank %d: var_1d_array: step %d: t: ", rank, step); int i = 0; for(i=0; i < NX; ++i){ if( t[i] != t_ref[i] ){ p_test_failed("%s: rank %d: for t[%d] (expected %.1f, got %.1f)\n", test_result.name, rank, i, t_ref[i], t[i] ); test_result.result = TEST_FAILED; break; } } if (TEST_PASSED == test_result.result) p_test_passed("%s: rank %d\n", test_result.name, rank); just_clean: // clean everything adios_selection_delete(sel); sel = NULL; free(t); t = NULL; free(t_ref); t_ref = NULL; close_adios: CLOSE_ADIOS_READER(adios_handle, adios_opts.method); if ((DIAG_OK == diag) && (TEST_PASSED == test_result.result)) { return 0; } else { return 1; } }
int main (int argc, char ** argv) { int i, j, datasize; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel1; double * data = NULL; uint64_t start[2], count[2]; MPI_Init (&argc, &argv); #ifdef WITH_NCSU_TIMER timer_init(); #endif adios_read_init_method (method, comm, NULL); ADIOS_FILE * f = adios_read_open_file ("adios_global.bp", method, comm); ADIOS_VARINFO * varinfo = adios_inq_var (f, "temperature"); if (varinfo) { int nranks; assert(varinfo->ndim == 2); nranks = varinfo->dims[0]; assert(nranks % 4 == 0); assert(varinfo->dims[1] == 10); datasize = (nranks / 2) * varinfo->dims[1] * sizeof(double); data = malloc (datasize); start[0] = nranks / 4; start[1] = 2; count[0] = nranks / 2; count[1] = 6; sel1 = adios_selection_boundingbox (varinfo->ndim, start, count); adios_schedule_read (f, sel1, "temperature", 0, 1, data); adios_perform_reads (f, 1); printf("Subvolume at (%" PRIu64 ",%" PRIu64 ") of size (%" PRIu64 ",%" PRIu64 "):\n", start[0], start[1], count[0], count[1]); for (i = 0; i < count[0]; i++) { printf("[ "); for (j = 0; j < count[1]; j++) { printf("%.0lf ", data[i * count[1] + j]); } printf("]\n"); } adios_selection_delete (sel1); } adios_free_varinfo (varinfo); adios_read_close (f); adios_read_finalize_method (ADIOS_READ_METHOD_BP); #ifdef WITH_NCSU_TIMER printf("[TIMERS] "); timer_result_t *results = timer_get_results_sorted(); for (i = 0; i < timer_get_num_timers(); i++) { printf("%s: %0.4lf ", results[i].name, results[i].time); } printf("\n"); free(results); #endif #ifdef WITH_NCSU_TIMER timer_finalize(); #endif MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { char filename [256] = "stream.bp"; int rank, size; int NX, NY; int len, off; double *t = NULL; MPI_Comm comm = MPI_COMM_WORLD; int64_t adios_handle; uint64_t adios_groupsize, adios_totalsize; uint64_t start[2], count[2]; ADIOS_SELECTION *sel; int steps = 0; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); // ADIOS read init adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); ADIOS_FILE* fp = adios_read_open_file ("kstar.bp", ADIOS_READ_METHOD_BP, comm); assert(fp != NULL); ADIOS_VARINFO* nx_info = adios_inq_var( fp, "N"); ADIOS_VARINFO* ny_info = adios_inq_var( fp, "L"); NX = *((int *)nx_info->value); NY= *((int*)ny_info->value); len = NX / size; off = len * rank; if (rank == size-1) len = len + NX % size; printf("\trank=%d: NX,NY,len,off = %d\t%d\t%d\t%d\n", rank, NX, NY, len, off); assert(len > 0); t = (double *) malloc(sizeof(double) * len * NY); memset(t, '\0', sizeof(double) * len * NY); assert(t != NULL); start[0] = off; start[1] = 0; count[0] = len; count[1] = NY; sel = adios_selection_boundingbox (2, start, count); // ADIOS write init adios_init ("adios.xml", comm); remove (filename); //int ii; //for(ii = 0; ii<10; ii++){ // for (i = 0; i < len * NY; i++) // t[i] = ii*1000 + rank; while(adios_errno != err_end_of_stream && adios_errno != err_step_notready) { steps++; // Reading adios_schedule_read (fp, sel, "var", 0, 1, t); adios_perform_reads (fp, 1); // Debugging //for (i = 0; i < len*NY; i++) t[i] = off * NY + i; printf("step=%d\trank=%d\t[%d,%d]\n", steps, rank, len, NY); // Writing adios_open (&adios_handle, "writer", filename, "a", comm); adios_groupsize = 4*4 + 8*len*NY; adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "NX", &NX); adios_write (adios_handle, "NY", &NY); adios_write (adios_handle, "len", &len); adios_write (adios_handle, "off", &off); adios_write (adios_handle, "var_2d_array", t); adios_close (adios_handle); // Advance MPI_Barrier (comm); adios_advance_step(fp, 0, TIMEOUT_SEC); } free(t); MPI_Barrier (comm); adios_read_close(fp); if (rank==0) printf ("We have processed %d steps\n", steps); MPI_Barrier (comm); adios_read_finalize_method(ADIOS_READ_METHOD_BP); adios_finalize (rank); MPI_Finalize (); return 0; }
int main(int argc, char ** argv) { int rank, size, varid, numvars; int bins, step, mod; char *filename, *in_stream, *data_var_name; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_FLEXPATH; //enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * global_range_select; double *data; uint64_t tstep, global_size, mysize, mystart, sz; MPI_Init (&argc, &argv); MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &size); /* Command line parsing */ if (rank == 0 && argc < 4) { fprintf(stderr, "\nHistogram usage: <exec> input-stream-name num-bins" " arr1 [arr2] [arr3] [...]\n" "\t where arr1, arr2, arr3 ... are the names of the arrays to be analyzed.\n"); MPI_Abort(comm, -1); } MPI_Barrier(comm); in_stream = argv[1]; //Parse cmd line bins = atoi(argv[2]); numvars = argc - 3; const char *vars[numvars]; for (varid=0; varid < numvars; varid++) { vars[varid] = argv[varid + 3]; } /* Adios open and init */ adios_read_init_method (method, comm, "verbose=1"); ADIOS_FILE * f = adios_read_open (in_stream, method, comm, ADIOS_LOCKMODE_ALL, -1); step = 0; //not used now while (adios_errno != err_end_of_stream){ //resource monitor /*loop over different arrays inside stream*/ for (varid = 0; varid < numvars; varid++){ #ifdef ENABLE_MONITOR //double t1 = wfgettimeofday(); lib_mem_init(); ind_timer_start(0, "whole timestep"); #endif //Init variables.... global_size = 0; tstep = 0; mod = 0; mysize = 0; mystart = 0; adios_schedule_read (f, NULL, "ntimestep", 0, 1, &tstep); adios_perform_reads (f, 1); ADIOS_VARINFO * glob_info = adios_inq_var (f, vars[varid]); global_size = glob_info->dims[0]; //printf("[DEBUG] global_size = %" PRIu64 " ntimestep = %" PRIu64 "\n", // global_size, tstep); //printf("[HIST%d] received data for timestep %" PRIu64 " with ndim: %d and globalsize:%" // PRIu64 " \n", rank, tstep, ndim, global_size); //sleep(800); //debug //Array slice computation mod = global_size % size;//size = MPI size if (mod == 0){ mysize = global_size / size; mystart = mysize * rank; } else { mysize = global_size / (size); if (rank < mod){ mysize++; mystart = mysize * rank; } else { mystart = (mod * (mysize + 1)) + ((rank - mod) * mysize); } } #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif //printf("[HISTO%d]: mysize = %" PRIu64" mystart = %" PRIu64 "\n", rank, mysize, mystart); //debug //if (step == 0) sleep(800); uint64_t starts[] = {mystart}; uint64_t counts[] = {mysize}; global_range_select = adios_selection_boundingbox (1, starts, counts); //Allocate space for arrays uint64_t msize = ((uint64_t) sizeof(double) * mysize); //printf("[DEBUG] mysize = %" PRIu64 " msize= %" PRIu64" \n", mysize, msize); //data = (double *) malloc(sizeof(double) * mysize); data = new double[mysize]; if (data == NULL){ //printf("DEBUG: malloc returned NULL, size was %d\n", msize); } else { if (rank == 0) printf("[HIST0] DEBUG: malloc successful, size was %d\n", mysize); } //memset (data, 0, sizeof(double) * mysize); //Read data adios_schedule_read (f, global_range_select, vars[varid], 0, 1, data); adios_perform_reads (f, 1); #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif //printf("PERFORM_READS success of variable: %s\n", vars[varid]); /* Data check if (step == 4) { FILE *fp; char *log; asprintf(&log, "histo-input%d-%d.log", step, rank); fp = fopen(log, "w"); fprintf(fp, "timestep: %" PRIu64 " mysize: %"PRIu64 "\n", tstep, mysize); for (i=0; i<(int)mysize; i++){ fprintf(fp, "%lf\n", data[i]); } fclose(fp); sleep(800); } */ // find max and min sz = 0; sz = mysize; double min = data[0]; double max = data[0]; for (uint64_t i = 1; i < sz; ++i) { if (data[i] > max) max = data[i]; if (data[i] < min) min = data[i]; }//local max, min found. //local data should just use shared mem. double g_min, g_max; // Find the global max/min MPI_Allreduce (&min, &g_min, 1, MPI_DOUBLE, MPI_MIN, comm); MPI_Allreduce (&max, &g_max, 1, MPI_DOUBLE, MPI_MAX, comm); //printf("[HIST%d] glob-min: %f, glob-max: %f\n", rank, g_min, g_max); nohandler_mem(rank); double width = (g_max - g_min)/bins; std::vector<uint64_t> hist(bins); for (uint64_t i = 0; i < sz; ++i)//fill local bins { //printf("[HISTO%d] local filling adding index %" PRIu64 "\n", rank, i); int idx = int((data[i] - g_min)/width);//discover index if (idx == bins) // we hit the max --idx; //printf("[%d]: %f -> %d\n", rank, data[i], idx); ++hist[idx]; } delete[] data; // Global reduce histograms std::vector<uint64_t> g_hist(bins); MPI_Reduce(&hist[0], &g_hist[0], bins, MPI_UINT64_T, MPI_SUM, 0, comm); //debug //printf("[Completed histogram routine]\n"); if (rank == 0) //print histogram to file { FILE *fp; const char *log = "histograms.log"; fp = fopen(log, "a"); fprintf(fp, "Histogram for %s, timestep %" PRIu64"\n", vars[varid], tstep); for (int i = 0; i < bins; ++i) fprintf(fp, " %f-%f: %" PRIu64 "\n", g_min + i*width, g_min + (i+1)*width, g_hist[i]); fclose (fp); } #ifdef ENABLE_MONITOR nohandler_mem(rank); #endif if (rank == 0) //print histogram to terminal { printf("Histogram for %s, timestep %" PRIu64"\n", vars[varid], tstep); for (int i = 0; i < bins; ++i) printf(" %f-%f: %" PRIu64 "\n", g_min + i*width, g_min + (i+1)*width, g_hist[i]); } //resource monitor #ifdef ENABLE_MONITOR //double t2 = wfgettimeofday(); ind_timer_end(0); char monitor_title[40]; sprintf(monitor_title, "histogram-%s", vars[varid]); monitor_out (rank, size, tstep, msize, t1, t2, comm, monitor_title); #endif } //end of read + analysis for 3 variables adios_release_step(f); //delete[] data; if (rank == 0) printf("[HIST%d] read and wrote data for timestep %" PRIu64 "\n", rank, tstep); step++; adios_advance_step(f, 0, -1); /* if (step == 6){ double t1 = wfgettimeofday(); FILE *tfp; tfp = fopen("time.log", "a"); fprintf(tfp, "rank %d histogram end time: %f\n", rank, t1); fclose(tfp); } */ }//end of adios stream while loop if (rank == 0) printf("[HIST%d] out of read loop\n", rank); /* performance measurement */ /* if (rank == 0){ double t3 = wfgettimeofday(); FILE *tfp; tfp = fopen("time.log", "a"); fprintf(tfp, "master histogram end time: %f\n", t3); fclose(tfp); } */ #ifdef ENABLE_MONITOR outer_timer_end(rank, "histogram"); #endif adios_read_close(f); adios_read_finalize_method(method); MPI_Finalize(); return 0; }
int main (int argc, char ** argv) { int rank, size, i, j; MPI_Comm comm = MPI_COMM_WORLD; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel; void * data = NULL; uint64_t start[2], count[2]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=4"); adios_logger_open ("log_read_C", rank); ADIOS_FILE * f = adios_read_open ("global_array_C.bp", method, comm, ADIOS_LOCKMODE_NONE, 0); if (f == NULL) { log_error ("%s\n", adios_errmsg()); return -1; } ADIOS_VARINFO * v = adios_inq_var (f, "temperature"); /* Using less readers to read the global array back, i.e., non-uniform */ uint64_t slice_size = v->dims[0]/size; start[0] = slice_size * rank; if (rank == size-1) /* last rank may read more lines */ slice_size = slice_size + v->dims[0]%size; count[0] = slice_size; start[1] = 0; count[1] = v->dims[1]; data = malloc (slice_size * v->dims[1] * sizeof (double)); if (data == NULL) { log_error (stderr, "malloc failed.\n"); return -1; } /* Read a subset of the temperature array */ sel = adios_selection_boundingbox (v->ndim, start, count); adios_schedule_read (f, sel, "temperature", 0, 1, data); adios_perform_reads (f, 1); for (i = 0; i < slice_size; i++) { log_test ("rank %d: [%lld,%d:%lld]", rank, start[0]+i, 0, slice_size); for (j = 0; j < v->dims[1]; j++) log_test (" %6.6g", * ((double *)data + i * v->dims[1] + j)); log_test ("\n"); } free (data); adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (method); adios_logger_close(); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int rank, size, i, j; MPI_Comm comm = MPI_COMM_WORLD; ADIOS_FILE * f; ADIOS_VARINFO * v; ADIOS_SELECTION * sel; int steps = 0; int retval = 0; float timeout_sec = 1.0; void * data = NULL; uint64_t start[2], count[2]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); f = adios_read_open ("adios_globaltime.bp", ADIOS_READ_METHOD_BP, comm, ADIOS_LOCKMODE_NONE, timeout_sec); if (adios_errno == err_file_not_found) { printf ("rank %d: Stream not found after waiting %f seconds: %s\n", rank, timeout_sec, adios_errmsg()); retval = adios_errno; } else if (adios_errno == err_end_of_stream) { printf ("rank %d: Stream terminated before open. %s\n", rank, adios_errmsg()); retval = adios_errno; } else if (f == NULL) { printf ("rank %d: Error at opening stream: %s\n", rank, adios_errmsg()); retval = adios_errno; } else { /* process file here... */ v = adios_inq_var (f, "temperature"); adios_inq_var_blockinfo (f, v); printf ("ndim = %d\n", v->ndim); //printf ("nsteps = %d\n", v->nsteps); printf ("dims[%llu][%llu]\n", v->dims[0], v->dims[1]); uint64_t slice_size = v->dims[0]/size; if (rank == size-1) slice_size = slice_size + v->dims[0]%size; start[0] = rank * slice_size; count[0] = slice_size; start[1] = 0; count[1] = v->dims[1]; data = malloc (slice_size * v->dims[1] * 8); /* Processing loop over the steps (we are already in the first one) */ while (adios_errno != err_end_of_stream) { steps++; // steps start counting from 1 sel = adios_selection_boundingbox (v->ndim, start, count); adios_schedule_read (f, sel, "temperature", 0, 1, data); adios_perform_reads (f, 1); if (rank == 0) printf ("--------- Step: %d --------------------------------\n", f->current_step); printf("rank=%d: [0:%lld,0:%lld] = [", rank, v->dims[0], v->dims[1]); for (i = 0; i < slice_size; i++) { printf (" ["); for (j = 0; j < v->dims[1]; j++) { printf ("%g ", *((double *)data + i * v->dims[1] + j)); } printf ("]"); } printf (" ]\n\n"); // advance to 1) next available step with 2) blocking wait adios_advance_step (f, 0, timeout_sec); if (adios_errno == err_step_notready) { printf ("rank %d: No new step arrived within the timeout. Quit. %s\n", rank, adios_errmsg()); break; // quit while loop } } adios_read_close (f); } if (rank==0) printf ("We have processed %d steps\n", steps); adios_read_finalize_method (ADIOS_READ_METHOD_BP); free (data); MPI_Finalize (); return retval; }
int main (int argc, char ** argv) { char filename [256]; int rank, size, i, j, k, token; MPI_Comm comm = MPI_COMM_WORLD; MPI_Status status; enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_BP; ADIOS_SELECTION * sel; void * data = NULL; uint64_t start[3], count[3], step = 0; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method (method, comm, "verbose=3"); /* adios_read_open_file() allows for seeing all timesteps in the file */ ADIOS_FILE * f = adios_read_open_file ("adios_globaltime.bp", method, comm); if (f == NULL) { printf ("%s\n", adios_errmsg()); return -1; } ADIOS_VARINFO * v = adios_inq_var (f, "temperature"); // read in two timesteps data = malloc (2 * v->dims[0] * v->dims[1] * sizeof (double)); if (data == NULL) { fprintf (stderr, "malloc failed.\n"); return -1; } // read in timestep 'rank' (up to 12) step = rank % 13; start[0] = 0; count[0] = v->dims[0]; start[1] = 0; count[1] = v->dims[1]; /* Read a subset of the temperature array */ sel = adios_selection_boundingbox (v->ndim, start, count); /* 2 steps from 'step' */ adios_schedule_read (f, sel, "temperature", step, 2, data); adios_perform_reads (f, 1); if (rank == 0) printf ("Array size of temperature [0:%lld,0:%lld]\n", v->dims[0], v->dims[1]); if (rank > 0) { MPI_Recv (&token, 1, MPI_INT, rank-1, 0, comm, &status); } printf("------------------------------------------------\n", rank); printf("rank=%d: \n", rank); for (i = 0; i < 2; i++) { printf ("step %lld = [\n", step+i); for (j = 0; j < v->dims[0]; j++) { printf (" ["); for (k = 0; k < v->dims[1]; k++) { printf ("%g ", ((double *)data) [ i * v->dims[0] * v->dims[1] + j * v->dims[1] + k]); } printf ("]\n"); } printf ("]\n"); } printf ("\n"); if (rank < size-1) { MPI_Send (&token, 1, MPI_INT, rank+1, 0, comm); } free (data); adios_free_varinfo (v); adios_read_close (f); MPI_Barrier (comm); adios_read_finalize_method (method); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int rank, j; int NX, NY; double *t; MPI_Comm comm = MPI_COMM_WORLD; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, ""); ADIOS_SELECTION *global_range_select; ADIOS_SELECTION scalar_block_select; scalar_block_select.type = ADIOS_SELECTION_WRITEBLOCK; scalar_block_select.u.block.index = 0; /* schedule_read of a scalar. */ int test_scalar = -1; ADIOS_FILE* afile = adios_read_open("arrays", ADIOS_READ_METHOD_FLEXPATH, comm, ADIOS_LOCKMODE_NONE, 0.0); int i; for(i=0; i<afile->nvars; i++){ printf("var: %s\n", afile->var_namelist[i]); } int ii = 0; while(adios_errno != err_end_of_stream){ /* get a bounding box - rank 0 for now*/ ADIOS_VARINFO *nx_info = adios_inq_var( afile, "/scalar/dim/NX"); ADIOS_VARINFO *ny_info = adios_inq_var( afile, "/scalar/dim/NY"); ADIOS_VARINFO *size_info = adios_inq_var( afile, "size"); ADIOS_VARINFO *arry = adios_inq_var( afile, "var_2d_array"); int nx_val = *((int*)nx_info->value); int ny_val = *((int*)ny_info->value); int size_val = *((int*)size_info->value); printf("nx: %d, ny: %d, size: %d\n", nx_val, ny_val, size_val); uint64_t xcount = arry->dims[0]; uint64_t ycount = arry->dims[1]; uint64_t starts[] = {0,0}; uint64_t counts[] = {xcount, ycount}; global_range_select = adios_selection_boundingbox(2, starts, counts); int nelem = xcount*ycount; if(nx_info->value) { NX = *((int *)nx_info->value); } if(ny_info->value){ NY= *((int*)ny_info->value); } if(rank == 0){ int n; printf("dims: [ "); for(n=0; n<arry->ndim; n++){ printf("%d ", (int)arry->dims[n]); } printf("]\n"); } /* Allocate space for the arrays */ int arr_size = sizeof(double) * nelem; t = (double *) malloc (arr_size); memset(t, 0, arr_size); //fprintf(stderr, "t %p\n", t); /* Read the arrays */ adios_schedule_read (afile, global_range_select, "var_2d_array", 0, 1, t); adios_schedule_read (afile, &scalar_block_select, "test_scalar", 0, 1, &test_scalar); adios_perform_reads (afile, 1); //sleep(20); printf("Rank=%d: test_scalar: %d step: %d, t[0,5+x] = [", rank, test_scalar, ii); for(j=0; j<nelem; j++) { printf(", %6.2f", t[j]); } printf("]\n\n"); adios_release_step(afile); adios_advance_step(afile, 0, 30); ii++; //MPI_Barrier (comm); //sleep(1); } // adios_read_close(afile); adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); MPI_Finalize (); return 0; }
static void test_file_mode_reads_on_var(ADIOS_FILE *fp, const char *bp_filename, const char *varname) { int i; ADIOS_VARINFO *varinfo = adios_inq_var(fp, varname); MPI_Assert(COMM, varinfo); if (varinfo->value != NULL) { //if (rank == 0) fprintf(stderr, "(skipping scalar variable '%s')\n", varname); adios_free_varinfo(varinfo); return; } fprintf(stderr, "[rank %d/%d] Starting file-mode writeblock reads on %s:/%s\n", rank, size, bp_filename, varname); adios_inq_var_blockinfo(fp, varinfo); MPI_Assert(COMM, varinfo->blockinfo); const enum ADIOS_DATATYPES datatype = varinfo->type; const int datatypesize = adios_get_type_size(datatype, NULL); int timestep, timestep_blockidx, blockidx = 0; for (timestep = 0; timestep < varinfo->nsteps; ++timestep) { for (timestep_blockidx = 0; timestep_blockidx < varinfo->nblocks[timestep]; ++timestep_blockidx, ++blockidx) { if (blockidx % size != rank) continue; const ADIOS_VARBLOCK *vb = &varinfo->blockinfo[blockidx]; ADIOS_SELECTION *block_bb = adios_selection_boundingbox(varinfo->ndim, vb->start, vb->count); ADIOS_SELECTION *block_wb = adios_selection_writeblock(timestep_blockidx); ADIOS_SELECTION *block_abs_wb = adios_selection_writeblock(blockidx); block_abs_wb->u.block.is_absolute_index = 1; uint64_t blocksize = datatypesize; for (i = 0; i < varinfo->ndim; ++i) blocksize *= vb->count[i]; void *buf_bb = malloc(blocksize); void *buf_wb = malloc(blocksize); void *buf_abs_wb = malloc(blocksize); memset(buf_bb, 0, blocksize); memset(buf_wb, 1, blocksize); memset(buf_abs_wb, 2, blocksize); MPI_Assert(COMM, buf_bb && buf_wb && buf_abs_wb); adios_schedule_read(fp, block_bb, varname, timestep, 1, buf_bb ); adios_schedule_read(fp, block_wb, varname, timestep, 1, buf_wb ); adios_schedule_read(fp, block_abs_wb, varname, timestep, 1, buf_abs_wb); adios_perform_reads(fp, 1); fprintf(stderr, "[rank %d/%d] Checking file-mode blockidx %d BB vs. WB...\n", rank, size, blockidx); MPI_Assert(COMM, memcmp(buf_bb, buf_wb, blocksize) == 0); fprintf(stderr, "[rank %d/%d] Checking file-mode blockidx %d BB vs. abs-WB...\n", rank, size, blockidx); MPI_Assert(COMM, memcmp(buf_bb, buf_abs_wb, blocksize) == 0); free(buf_bb); free(buf_wb); free(buf_abs_wb); adios_selection_delete(block_bb); adios_selection_delete(block_wb); adios_selection_delete(block_abs_wb); } } adios_free_varinfo(varinfo); fprintf(stderr, "[rank %d/%d] Finished file-mode writeblock reads on %s:/%s\n", rank, size, bp_filename, varname); }
int main (int argc, char ** argv) { int i, j, k, l, t; MPI_Comm comm_dummy = 0; /* MPI_Comm is defined through adios_read.h */ void * data = NULL; uint64_t start[] = {0,0,0,0,0,0,0,0,0,0}; uint64_t count[10]; ADIOS_SELECTION *sel; if (argc < 2) { printf("Usage: %s <BP-file>\n", argv[0]); return 1; } ADIOS_FILE * f; //int step; //for (step=0; step < 2; step++) { f = adios_read_open_file (argv[1], ADIOS_READ_METHOD_BP, comm_dummy); if (f == NULL) { printf ("%s\n", adios_errmsg()); return -1; } /* For all variables */ printf(" Variables=%d:\n", f->nvars); for (i = 0; i < f->nvars; i++) { ADIOS_VARINFO * v = adios_inq_var_byid (f, i); adios_inq_var_stat (f, v, 0, 0); uint64_t total_size = adios_type_size (v->type, v->value); for (j = 0; j < v->ndim; j++) total_size *= v->dims[j]; printf(" %-9s %s", adios_type_to_string(v->type), f->var_namelist[i]); if (v->ndim == 0) { /* Scalars do not need to be read in, we get it from the metadata when using adios_inq_var */ printf(" = %s\n", value_to_string(v->type, v->value, 0)); } else { /* Arrays have to be read in from the file */ if (v->nsteps > 1) { printf(" %d*",v->nsteps); } printf("[%" PRIu64,v->dims[0]); for (j = 1; j < v->ndim; j++) printf(", %" PRIu64,v->dims[j]); //printf("] = \n"); if (v->type == adios_integer) printf("] = min=%d max=%d\n", (*(int*)v->statistics->min), (*(int*)v->statistics->max)); else if (v->type == adios_double) printf("] = min=%lg max=%lg\n", (*(double*)v->statistics->min), (*(double*)v->statistics->max)); if (total_size > 1024*1024*1024) { printf(" // too big, do not read in\n"); } else { data = malloc (total_size); if (data == NULL) { fprintf (stderr, "malloc failed.\n"); return -1; } for (j = 0; j < v->ndim; j++) count[j] = v->dims[j]; for (t=0; t<v->nsteps; t++) { sel = adios_selection_boundingbox (v->ndim, start, count); adios_schedule_read_byid (f, sel, i, t, 1, data); adios_perform_reads (f, 1); printf(" Step %d:\n", t); if (adios_errno) { printf ("%s\n", adios_errmsg()); } else if (total_size > 1024*1024) { printf ("Too big to print\n"); } else if (v->ndim == 1) { printf (" ["); for (j = 0; j < v->dims[0]; j++) printf("%s ", value_to_string(v->type, data, j)); printf ("]\n"); } else if (v->ndim == 2) { for (j = 0; j < v->dims[0]; j++) { printf (" row %d: [", j); for (k = 0; k < v->dims[1]; k++) printf("%s ", value_to_string(v->type, data, j*v->dims[1] + k)); printf ("]\n"); } } else if (v->ndim == 3) { for (j = 0; j < v->dims[0]; j++) { printf (" block %d: \n", j); for (k = 0; k < v->dims[1]; k++) { printf (" row %d: [", k); for (l = 0; l < v->dims[2]; l++) { // NCSU ALACRITY-ADIOS - Fixed bug, k*v->dims[1] changed to k*v->dims[2] printf("%s ", value_to_string(v->type, data, j*v->dims[1]*v->dims[2] + k*v->dims[2] + l)); } printf ("]\n"); } printf ("\n"); } } else { printf (" cannot print arrays with >3 dimensions\n"); } } free (data); } } adios_free_varinfo (v); } /* variables */ /* For all attributes */ printf(" Attributes=%d:\n", f->nattrs); for (i = 0; i < f->nattrs; i++) { enum ADIOS_DATATYPES atype; int asize; void *adata; adios_get_attr_byid (f, i, &atype, &asize, &adata); int type_size = adios_type_size (atype, adata); int nelems = asize / type_size; printf(" %-9s %s = ", adios_type_to_string(atype), f->attr_namelist[i]); char *p = (char*)adata; if (nelems>1) printf("{"); for (j=0; j<nelems; j++) { if (j>0) printf(", "); printf ("%s", value_to_string(atype, p, 0)); p += type_size; } if (nelems>1) printf("}"); printf("\n"); free(adata); } /* attributes */ adios_read_close (f); //} /* loop 'step' */ return 0; }