int declare_group () { adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10); adios_declare_group (&m_adios_group, "restart", "iter", adios_flag_yes); adios_select_method (m_adios_group, "MPI", "verbose=2", ""); adios_define_var (m_adios_group, "NX" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "t1" ,"", adios_double ,"NX", "100", "0"); adios_define_var (m_adios_group, "NX" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "t2" ,"", adios_double ,"NX", "100", "0"); return 0; }
int main(int argc, char **argv) { parseArgs(argc, argv); if (datafile.empty() || varName.empty()) { std::cout << "Usage:\n" << *argv << " -f data-file-name" << " -n variable-name" << " [-p variable-path]" << " [-a attribute-name]" //<< " [-m file-format [HDF5(default), H5PART, NETCDF]" << " [-v verboseness]" << " [-x (xport)]\n\n" << "\tFor More detailed usage description and examples," " please see file GUIDE" << std::endl; return -1; } #ifndef FQ_NOMPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); #endif adios_init_noxml(); adios_allocate_buffer(ADIOS_BUFFER_ALLOC_NOW, 1000); LOGGER(ibis::gVerbose >= 0 && mpi_rank == 0) << "BPread running with mpi_size = " << mpi_size << " ..."; if (logfile.str().empty() != true){ if (ibis::gVerbose > 1) std::cout << "DEBUG: using logfile \"" << logfile.str().c_str() << "\" ...\n"; logfile << "-" << mpi_rank << ".log"; } bool berr = doRead(); adios_finalize(mpi_rank); LOGGER(ibis::gVerbose >= 0) << *argv << " invoked adios_finalize(" << mpi_rank << ")\n"; #ifndef FQ_NOMPI MPI_Finalize(); #endif if (berr) { LOGGER(ibis::gVerbose > 0) << *argv << " successfully complete reading data"; return 0; } else { LOGGER(ibis::gVerbose > 0) << "Warning -- " << *argv << " failed to complete reading data"; return -1; } }
/// The default constructor. It is private and can not be directly called /// by any user code. BPCommon::BPCommon() { int ierr = adios_init_noxml(); if (ierr != 0) { std::cerr << "BPCommon::ctor failed to initialize adios, " << adios_errmsg() << std::endl; throw "BPCommon failed to initialize adios"; } ierr = adios_allocate_buffer(ADIOS_BUFFER_ALLOC_NOW, FQ_ADIOS_DEFAULT_BUFFER_MB); if (ierr != 0) { std::cerr << "BPCommon::ctor failed to allocated " << FQ_ADIOS_DEFAULT_BUFFER_MB << " MB for ADIOS buffer, " << adios_errmsg() << std::endl; } }
int main (int argc, char ** argv) { int size, i, block; MPI_Comm comm = 0; // dummy mpi /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; int64_t g; int64_t f; char dimstr[32]; adios_init_noxml (comm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 1); adios_declare_group (&g, "table", "", adios_flag_yes); adios_select_method (g, "POSIX1", "", ""); sprintf (dimstr, "%d,%d", NX, NY); adios_define_var (g, "A" ,"", adios_integer, dimstr, dimstr, "0,0"); sprintf (dimstr, "%d,%d", n_of_elements, Elements_length); adios_define_var (g, "Elements" ,"", adios_byte, dimstr, dimstr, "0,0"); sprintf (dimstr, "%d,%d", NY, Columns_length); adios_define_var (g, "Columns" ,"", adios_byte, dimstr, dimstr, "0,0"); adios_open (&f, "table", "table.bp", "w", comm); adios_groupsize = NX*NY*sizeof(int32_t) /* size of A */ + n_of_elements * Elements_length /* size of Elements */ + NY * Columns_length; /* size of Columns */ adios_group_size (f, adios_groupsize, &adios_totalsize); adios_write (f, "A", A); adios_write (f, "Elements", Elements); adios_write (f, "Columns", Columns); adios_close (f); adios_finalize (0); return 0; }
int main (int argc, char ** argv) { int err, step ; int do_write = 1; int do_read = 1; int m = 0; char write_method[16]; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); if (argc > 1) { m = strtol (argv[1], NULL, 10); if (errno) { printf("Invalid 1st argument %s\n", argv[1]); Usage(); return 1;} } if (argc > 2) { if (argv[2][0] == 'w' || argv[2][0] == 'W') { do_read = 0; } else if (argv[2][0] == 'r' || argv[2][0] == 'R') { do_write = 0; } else { printE ("Invalid command line argument %s. Allowed ones:\n" " w: do write only\n" " r: do read only\n", argv[2]); MPI_Finalize (); return 1; } } if (m==0) { read_method = ADIOS_READ_METHOD_BP; strcpy(write_method,"MPI"); } else { read_method = ADIOS_READ_METHOD_DATASPACES; strcpy(write_method,"DATASPACES"); } log ("Writing: %s method=%s\n" "Reading: %s method=%d\n", (do_write ? "yes" : "no"), write_method, (do_read ? "yes" : "no"), read_method); alloc_vars(); if (do_write) { adios_init_noxml (comm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10); } if (do_read) { err = adios_read_init_method(read_method, comm, "verbose=2"); if (err) { printE ("%s\n", adios_errmsg()); } } if (do_write) { adios_declare_group (&m_adios_group, "selections", "iter", adios_flag_yes); adios_select_method (m_adios_group, write_method, "verbose=2", ""); define_vars(); for (step=0; step<NSTEPS; step++) { if (!err) { set_vars (step); err = write_file (step); sleep(1); } } adios_free_group (m_adios_group); } if (!err && do_read) err = read_points (); //if (!err && do_read) // err = read_writerblocks (); if (do_read) { adios_read_finalize_method (read_method); } fini_vars(); if (do_write) { adios_finalize (rank); } MPI_Finalize (); return err; }
int main (int argc, char ** argv) { char filename [256]; int rank, size, i, j; int NX = 100, gb, offset; //local/global/offset double t[NX]; int nblocks = 3; MPI_Comm comm = MPI_COMM_WORLD; char g_str[100], o_str[100], l_str[100]; // attributes (from C variables) int someints[5] = {5,4,3,2,1}; double somedoubles[5] = {5.55555, 4.4444, 3.333, 2.22, 1.1}; /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); gb = nblocks * NX * size; sprintf (g_str, "%d", gb); sprintf (l_str, "%d", NX); strcpy (filename, "global_array_byid_noxml_C.bp"); adios_init_noxml (comm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10); int64_t m_adios_group; int64_t m_adios_file; int64_t var_ids[nblocks]; adios_declare_group (&m_adios_group, "restart", "iter", adios_flag_yes); adios_select_method (m_adios_group, "MPI", "", ""); for (i = 0; i < nblocks; i++) { offset = rank * nblocks * NX + i * NX; sprintf (o_str, "%d", offset); var_ids[i] = adios_define_var (m_adios_group, "temperature" ,"", adios_double ,l_str, g_str, o_str ); adios_set_transform (var_ids[i], "none"); } // add some attributes adios_define_attribute_byvalue (m_adios_group, "single_string","", adios_string, 1, "A single string attribute"); char *strings[] = {"X","Yy","ZzZ"}; adios_define_attribute_byvalue (m_adios_group, "three_strings","", adios_string_array, 3, strings); adios_define_attribute_byvalue (m_adios_group, "single_int", "", adios_integer, 1, &someints); adios_define_attribute_byvalue (m_adios_group, "single_double","", adios_double, 1, &somedoubles); adios_define_attribute_byvalue (m_adios_group, "five_ints", "", adios_integer, 5, &someints); adios_define_attribute_byvalue (m_adios_group, "five_double", "", adios_double, 5, &somedoubles); adios_open (&m_adios_file, "restart", filename, "w", comm); adios_groupsize = nblocks * (4 + 4 + 4 + NX * 8); adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize); /* now we will write the data for each sub block */ for (i = 0; i < nblocks; i++) { offset = rank * nblocks * NX + i * NX; for (j = 0; j < NX; j++) t[j] = offset + j; adios_write_byid(m_adios_file, var_ids[i], t); } adios_close (m_adios_file); MPI_Barrier (comm); adios_finalize (rank); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int err,i,M; int iconnect; MPI_Init (&argc, &argv); MPI_Comm_rank (wcomm, &wrank); MPI_Comm_size (wcomm, &wsize); if (argc < 2) { Usage(); return 1; } errno = 0; M = strtol (argv[1], NULL, 10); if (errno || M < 1 || M > wsize) { printE("Invalid 1st argument %s\n", argv[1]); Usage(); return 1; } iconnect = (wrank >= wsize-M); // connect to server from ranks N-M, N-M+1, ..., N MPI_Comm_split (wcomm, iconnect, wrank+M-wsize, &subcomm); MPI_Comm_rank (subcomm, &subrank); MPI_Comm_size (subcomm, &subsize); if (iconnect) { if (subsize != M) { printE ("Something wrong with communicator split: N=%d, M=%d, splitted size=%d\n", wsize, M, subsize); return 2; } log ("connect as subrank %d\n", subrank); } alloc_vars(); adios_read_init_method(ADIOS_READ_METHOD_DATASPACES, subcomm, "verbose=4"); adios_init_noxml (subcomm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 1); if (iconnect) { adios_declare_group (&m_adios_group, "connect", "iter", adios_flag_yes); adios_select_method (m_adios_group, "DATASPACES", "verbose=4", ""); adios_define_var (m_adios_group, "ldim1", "", adios_integer, 0, 0, 0); adios_define_var (m_adios_group, "gdim1", "", adios_integer, 0, 0, 0); adios_define_var (m_adios_group, "offs1", "", adios_integer, 0, 0, 0); adios_define_var (m_adios_group, "a1", "", adios_integer, "ldim1", "gdim1", "offs1"); for (i=0; i<NSTEPS; i++) { if (!err) { set_vars (i); err = write_file (i); } } } log ("done with work, sync with others...\n"); MPI_Barrier (wcomm); log ("call adios_finalize...\n"); adios_finalize (wrank); log ("call adios_read_finalize_method...\n"); adios_read_finalize_method (ADIOS_READ_METHOD_DATASPACES); fini_vars(); MPI_Finalize (); return err; }
int main (int argc, char ** argv) { //For varriable definitions: //gbounds = global bounds string, lbounds = local bounds string, offs = offset string, tstring = temp string to hold temperary stuff char gbounds[1007], lbounds[1007], offs[1007],tstring[100]; //size = number of cores, gidx = adios group index int rank, size, gidx, i, j, k, ii; //data = pointer to read-in data void * data = NULL; uint64_t s[] = {0,0,0,0,0,0,0,0,0,0}; //starting offset uint64_t c[] = {1,1,1,1,1,1,1,1,1,1}; //chunk block array uint64_t bytes_read = 0; int element_size; int64_t new_adios_group, m_adios_file; uint64_t var_size; //portion_bound, uint64_t adios_groupsize, adios_totalsize; int read_buffer; //possible maximum size you the user would like for each chunk in MB int write_buffer = 1536; //actual buffer size you use in MB int itime; int WRITEME=1; uint64_t chunk_size; //chunk size in # of elements char *var_path, *var_name; // full path cut into dir path and name MPI_Init(&argc,&argv); MPI_Comm_rank(comm,&rank); MPI_Comm_size(comm,&size); // timing numbers // we will time: // 0: adios_open, adios_group_size // 1: the total time to read in the data // 2: times around each write (will only work if we do NOT buffer.... // 3: the time in the close // 4: fopen, fclose // 5: total time // timers: the total I/O time int timers = 6; double start_time[timers], end_time[timers], total_time[timers]; if (TIMING==100) { for (itime=0;itime<timers;itime++) { start_time[itime] = 0; end_time[itime] = 0; total_time[itime]=0; } //MPI_Barrier(MPI_COMM_WORLD); start_time[5] = MPI_Wtime(); } if(rank==0) printf("converting...\n"); if (argc < 5) { if (rank==0) printf("Usage: %s <BP-file> <ADIOS-file> read_buffer(MB) write_buffer(MB) METHOD (LUSTRE_strip_count) (LUSTRE_strip_size) (LUSTRE_block_size)\n", argv[0]); return 1; } if(TIMING==100) start_time[4] = MPI_Wtime(); ADIOS_FILE * f = adios_fopen (argv[1], MPI_COMM_SELF); if(TIMING==100){ end_time[4] = MPI_Wtime(); total_time[4] = end_time[4]-start_time[4]; } adios_init_noxml(comm); // no xml will be used to write the new adios file read_buffer = atoi(argv[3]); write_buffer = atoi(argv[4]); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, write_buffer); // allocate MB buffer if (f == NULL) { printf("rank=%d, file cant be opened\n", rank); if (DEBUG) printf ("%s\n", adios_errmsg()); return -1; } for (gidx = 0; gidx < f->groups_count; gidx++) { //group part adios_groupsize = 0; ADIOS_GROUP * g = adios_gopen (f, f->group_namelist[gidx]); if (g == NULL) { if (DEBUG) printf ("%s\n", adios_errmsg()); printf("rank %d: group cannot be opened.\n", rank); return -1; } /* First create all of the groups */ // now I need to create this group in the file that will be written adios_declare_group(&new_adios_group,f->group_namelist[gidx],"",adios_flag_yes); if(strcmp(argv[5],"MPI_LUSTRE")!=0) //see whether or not the user uses MPI_LUSTRE method adios_select_method (new_adios_group, argv[5], "", ""); //non-MPI_LUSTRE methods... like MPI, POSIX.... else{ char lustre_pars[1000]; strcpy(lustre_pars, ""); strcat(lustre_pars, "stripe_count="); sprintf(tstring, "%d", atoi(argv[6])); strcat(lustre_pars, tstring); strcat(lustre_pars, ",stripe_size="); sprintf(tstring, "%d", atoi(argv[7])); strcat(lustre_pars, tstring); strcat(lustre_pars, ",block_size="); sprintf(tstring, "%d", atoi(argv[8])); strcat(lustre_pars, tstring); if(rank==0) printf("lustre_pars=%s\n", lustre_pars); adios_select_method (new_adios_group, argv[5], lustre_pars, ""); //Use MPI Lustre method } // variable definition part for (i = 0; i < g->vars_count; i++) { ADIOS_VARINFO * v = adios_inq_var_byid (g, i); getbasename (g->var_namelist[i], &var_path, &var_name); if (v->ndim == 0) { // scalars: every process does them the same. adios_define_var(new_adios_group,var_name,var_path,v->type,0,0,0); getTypeInfo( v->type, &element_size); //element_size is size per element based on its type if (v->type == adios_string) { //special case when the scalar is string. adios_groupsize += strlen(v->value); } else { adios_groupsize += element_size; } } else { // vector variables getTypeInfo( v->type, &element_size); var_size=1; for (ii=0;ii<v->ndim;ii++) { var_size*=v->dims[ii]; } uint64_t total_size = var_size; //total_size tells you the total number of elements in the current vector variable var_size*=element_size; //var_size tells you the size of the current vector variable in bytess //re-initialize the s and c variables for(j=0; j<v->ndim; j++){ s[j] = 0; c[j] = 1; } //find the approximate chunk_size you would like to use. chunk_size = calcChunkSize(total_size, read_buffer*1024*1024/element_size, size); //set the chunk block array with the total size as close to chunk_size as possible calcC(chunk_size, v, c); strcpy(lbounds,""); for(j=0; j<v->ndim; j++){ sprintf(tstring, "%" PRId64 ",", c[j]); strcat(lbounds, tstring); } printf("rank=%d, name=%s, chunk_size1=%" PRId64 " c[]=%s\n",rank,g->var_namelist[i],chunk_size,lbounds); chunk_size = 1; for(ii=0; ii<v->ndim; ii++) //reset chunk_size based on the created c. Now the chunk_size is exact. chunk_size *= c[ii]; //current step points to where the process is in processing the vector. First sets with respect to rank. uint64_t current_step = rank*chunk_size; //First advance the starting point s by current_step. Of course, you don't do it if the current_step exceeds total_size. if(current_step<total_size) rS(v, s, current_step, rank); uint64_t elements_defined = 0; //First, the number of elements you have defined is 0. //You (the process) process your part of the vector when your current_step is smaller than the total_size while(current_step < total_size) { //ts, temporary s, is introduced for the sake of the inner do while loop below. Copy s to ts. uint64_t ts[] = {0,0,0,0,0,0,0,0,0,0}; arrCopy(s, ts); //for every outer while iteration, you always have the whole chunk_size remained to process. uint64_t remain_chunk = chunk_size; if(current_step+chunk_size>total_size) //except when you are nearing the end of the vector.... remain_chunk = total_size-current_step; //tc, temporary c, is introduced for the sake of the inner do while loop below. Copy s to tc. uint64_t tc[] = {1,1,1,1,1,1,1,1,1,1}; arrCopy(c, tc); do{ //how much of the remain chunk you wanna process? initially you think you can do all of it.... uint64_t used_chunk = remain_chunk; //you feel like you should process the vector with tc block size, but given ts, you might go over bound. uint64_t uc[] = {1,1,1,1,1,1,1,1,1,1}; //so you verify it by setting a new legit chunck block uc, and getting a new remain_chunk. remain_chunk = checkBound(v, ts, tc, uc, remain_chunk); //you check whether or not ts+uc goes over the bound. This is just checking to make sure there's no error. //Thereotically, there should be no problem at all. checkOverflow(0, v, ts, uc); //the below code fragment simply calculates gbounds, and sets place holders for lbounds and offs. strcpy(gbounds,""); strcpy(lbounds,""); strcpy(offs,""); for(j=0; j<v->ndim-1; j++){ sprintf(tstring, "%d,", (int)v->dims[j]); strcat(gbounds, tstring); //sprintf(tstring, "ldim%d_%s,", j, var_name); sprintf(tstring, "ldim%d,", j); strcat(lbounds, tstring); //sprintf(tstring, "offs%d_%s,", j, var_name); sprintf(tstring, "offs%d,", j); strcat(offs, tstring); } sprintf(tstring, "%d", (int)v->dims[v->ndim-1]); strcat(gbounds, tstring); //sprintf(tstring, "ldim%d_%s", v->ndim-1, var_name); sprintf(tstring, "ldim%d", v->ndim-1); strcat(lbounds, tstring); //sprintf(tstring, "offs%d_%s", v->ndim-1, var_name); sprintf(tstring, "offs%d", v->ndim-1); strcat(offs, tstring); //sprintf(tstring, "%d", v->ndim); for(j=0; j<v->ndim; j++){ //sprintf(tstring, "ldim%d_%s", j, var_name); sprintf(tstring, "ldim%d", j); adios_define_var(new_adios_group, tstring, "bp2bp", adios_unsigned_long, 0, 0, 0); //sprintf(tstring, "offs%d_%s", j, var_name); sprintf(tstring, "offs%d", j); adios_define_var(new_adios_group, tstring, "bp2bp", adios_unsigned_long, 0, 0, 0); } adios_define_var(new_adios_group,var_name,var_path,v->type,lbounds,gbounds,offs); if (DEBUG){ strcpy(lbounds,""); strcpy(offs,""); for(j=0; j<v->ndim; j++){ sprintf(tstring, "%" PRId64 ",", ts[j]); strcat(offs, tstring); sprintf(tstring, "%" PRId64 ",", uc[j]); strcat(lbounds, tstring); } printf("rank=%d, name=%s, gbounds=%s: lbounds=%s: offs=%s \n",rank,g->var_namelist[i],gbounds, lbounds, offs); } used_chunk -= remain_chunk; //you get the actual used_chunk here. elements_defined += used_chunk; if(remain_chunk!=0){ rS(v, ts, used_chunk, rank); //advance ts by used_chunk. for(k=0; k<10; k++) tc[k] = 1; calcC(remain_chunk, v, tc); //based on the remain_chunk, calculate new tc chunk block remained to process. } adios_groupsize+= used_chunk*element_size+2*v->ndim*8; }while(remain_chunk!=0); current_step += size*chunk_size; //once a whole chunk_size is processed, advance the current_step in roll-robin manner. if(current_step<total_size){ //advance s in the same way. rS(v, s, size*chunk_size, rank); } } //beside checkOverflow above, here you check whether or not the total number of elements processed across processes matches //the total number of elements in the original vector. if(DEBUG){ uint64_t* sb = (uint64_t *) malloc(sizeof(uint64_t)); uint64_t* rb = (uint64_t *) malloc(sizeof(uint64_t)); sb[0] = elements_defined; MPI_Reduce(sb,rb,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm); if(rank==0 && rb[0]!=total_size) printf("some array define mismatch. please use debug mode\n"); free(sb); free(rb); } } free (var_name); free (var_path); } // finished declaring all of the variables // Now we can define the attributes.... for (i = 0; i < g->attrs_count; i++) { enum ADIOS_DATATYPES atype; int asize; void *adata; adios_get_attr_byid (g, i, &atype, &asize, &adata); // if (DEBUG) printf("attribute name=%s\n",g->attr_namelist[i]); adios_define_attribute(new_adios_group,g->attr_namelist[i],"",atype,adata,0); } /*------------------------------ NOW WE WRITE -------------------------------------------- */ // Now we have everything declared... now we need to write them out!!!!!! if (WRITEME==1) { // open up the file for writing.... if (DEBUG) printf("rank=%d, opening file = %s, with group %s, size=%" PRId64 "\n",rank,argv[2],f->group_namelist[gidx],adios_groupsize); if(TIMING==100) start_time[0] = MPI_Wtime(); adios_open(&m_adios_file, f->group_namelist[gidx],argv[2],"w",comm); adios_group_size( m_adios_file, adios_groupsize, &adios_totalsize); //get both the total adios_totalsize and total adios_groupsize summed across processes. uint64_t* sb = (uint64_t *) malloc(sizeof(uint64_t));; uint64_t* rb = (uint64_t *) malloc(sizeof(uint64_t)); sb[0] = adios_groupsize; MPI_Reduce(sb,rb,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm); uint64_t* sb2 = (uint64_t *) malloc(sizeof(uint64_t));; uint64_t* rb2 = (uint64_t *) malloc(sizeof(uint64_t)); sb2[0] = adios_totalsize; MPI_Reduce(sb2,rb2,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm); if(rank==0){ printf("total adios_totalsize = %" PRId64 "\n", *rb2); printf("total adios_groupsize = %" PRId64 "\n", *rb); } free(sb); free(rb); free(sb2); free(rb2); if (TIMING==100) { end_time[0] = MPI_Wtime(); total_time[0]+=end_time[0] - start_time[0]; //variable definition time taken } // now we have to write out the variables.... since they are all declared now // This will be the place we actually write out the data!!!!!!!! for (i = 0; i < g->vars_count; i++) { ADIOS_VARINFO * v = adios_inq_var_byid (g, i); getbasename (g->var_namelist[i], &var_path, &var_name); if (v->ndim == 0) { if (DEBUG) { printf ("ADIOS WRITE SCALAR: rank=%d, name=%s value=", rank,g->var_namelist[i]); print_data (v->value, 0, v->type); printf ("\n"); } if (TIMING==100) { start_time[2] = MPI_Wtime(); } adios_write(m_adios_file,g->var_namelist[i],v->value); if (TIMING==100) { end_time[2] = MPI_Wtime(); total_time[2]+=end_time[2] - start_time[2]; //IO write time... } } else { for(j=0; j<v->ndim; j++){ s[j] = 0; c[j] = 1; } getTypeInfo( v->type, &element_size); uint64_t total_size = 1; for (ii=0;ii<v->ndim;ii++) total_size*=v->dims[ii]; chunk_size = calcChunkSize(total_size, read_buffer*1024*1024/element_size, size); calcC(chunk_size, v, c); chunk_size = 1; for(ii=0; ii<v->ndim; ii++) chunk_size *= c[ii]; uint64_t current_step = rank*chunk_size; if(current_step<total_size) rS(v, s, current_step, rank); uint64_t elements_written = 0; while(current_step < total_size) { uint64_t ts[] = {0,0,0,0,0,0,0,0,0,0}; arrCopy(s, ts); uint64_t remain_chunk = chunk_size; if(current_step+chunk_size>total_size) remain_chunk = total_size-current_step; uint64_t tc[] = {1,1,1,1,1,1,1,1,1,1}; arrCopy(c, tc); do{ uint64_t uc[] = {1,1,1,1,1,1,1,1,1,1}; uint64_t used_chunk = remain_chunk; remain_chunk = checkBound(v, ts, tc, uc, remain_chunk); checkOverflow(1, v, ts, uc); used_chunk -= remain_chunk; elements_written += used_chunk; //allocated space for data read-in data = (void *) malloc(used_chunk*element_size); if (TIMING==100) { start_time[1] = MPI_Wtime(); } if(PERFORMANCE_CHECK) printf("rank=%d, read start\n",rank); bytes_read = adios_read_var_byid(g,v->varid,ts,uc,data); if(PERFORMANCE_CHECK) printf("rank=%d, read end\n",rank); if (TIMING==100) { end_time[1] = MPI_Wtime(); total_time[1]+=end_time[1] -start_time[1]; //IO read time } if (DEBUG) printf ("ADIOS WRITE: rank=%d, name=%s datasize=%" PRId64 "\n",rank,g->var_namelist[i],bytes_read); if (TIMING==100) { start_time[2] = MPI_Wtime(); } if (DEBUG){ printf("rank=%d, write ts=",rank); int k; for(k=0; k<v->ndim; k++) printf("%" PRId64 ",", ts[k]); printf(" uc="); for(k=0; k<v->ndim; k++) printf("%" PRId64 ",", uc[k]); printf("\n"); } //local bounds and offets placeholders are not written out with actual values. if(PERFORMANCE_CHECK) printf("rank=%d, adios write start\n", rank); for(k=0; k<v->ndim; k++){ //sprintf(tstring, "ldim%d_%s", k, var_name); sprintf(tstring, "ldim%d", k); if (DEBUG) { printf ("ADIOS WRITE DIMENSION: rank=%d, name=%s value=", rank,tstring); print_data (&uc[k], 0, adios_unsigned_long); printf ("\n"); } adios_write(m_adios_file, tstring, &uc[k]); //sprintf(tstring, "offs%d_%s", k, var_name); sprintf(tstring, "offs%d", k); if (DEBUG) { printf ("ADIOS WRITE OFFSET: rank=%d, name=%s value=", rank,tstring); print_data (&ts[k], 0, adios_unsigned_long); printf ("\n"); } adios_write(m_adios_file, tstring, &ts[k]); } adios_write(m_adios_file,g->var_namelist[i],data); if(PERFORMANCE_CHECK) printf("rank=%d, adios write end\n", rank); if (TIMING==100) { end_time[2] = MPI_Wtime(); total_time[2]+=end_time[2] - start_time[2]; //IO write time } free(data); if(remain_chunk!=0){ rS(v, ts, used_chunk, rank); for(k=0; k<10; k++) tc[k] = 1; calcC(remain_chunk, v, tc); } }while(remain_chunk!=0); current_step += size*chunk_size; if(current_step<total_size) rS(v, s, size*chunk_size,rank); } if(DEBUG){ uint64_t* sb = (uint64_t *) malloc(sizeof(uint64_t));; uint64_t* rb = (uint64_t *) malloc(sizeof(uint64_t)); sb[0] = elements_written; MPI_Reduce(sb,rb,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm); if(rank==0 && rb[0]!=total_size) printf("some array read mismatch. please use debug mode\n"); free(sb); free(rb); } } free (var_name); free (var_path); }// end of the writing of the variable.. if (TIMING==100) { start_time[3] = MPI_Wtime(); } if(PERFORMANCE_CHECK) printf("rank=%d, adios_close start\n", rank); adios_close(m_adios_file); if(PERFORMANCE_CHECK) printf("rank=%d, adios_close end\n", rank); if (TIMING==100) { end_time[3] = MPI_Wtime(); total_time[3]+=end_time[3] - start_time[3]; } adios_gclose(g); } //end of WRITEME } // end of all of the groups if(rank==0) printf("conversion done!\n"); if(TIMING==100) start_time[4] = MPI_Wtime(); adios_fclose(f); if(TIMING==100){ end_time[4] = MPI_Wtime(); total_time[4] = total_time[4]+end_time[4]-start_time[4]; } adios_finalize(rank); // now, we write out the timing data, for each category, we give max, min, avg, std, all in seconds, across all processes. if(TIMING==100){ // 0: adios_open, adios_group_size // 1: the total time to read in the data // 2: times around each write (will only work if we do NOT buffer.... // 3: the time in the close // 4: fopen, fclose // 5: total time end_time[5] = MPI_Wtime(); total_time[5] = end_time[5] - start_time[5]; double sb[7]; sb[0] = total_time[1]; sb[1] = total_time[4]; //read_var, fopen+fclose sb[2] = sb[0]+sb[1]; sb[3] = total_time[0]; sb[4] = total_time[2]+total_time[3]; //adios_open+adios_group_size, write+close sb[5] = sb[3]+sb[4]; sb[6] = total_time[5]; //total double * rb = NULL; if(rank==0) rb = (double *)malloc(size*7*sizeof(double)); //MPI_Barrier(comm); MPI_Gather(sb, 7, MPI_DOUBLE, rb, 7, MPI_DOUBLE, 0, comm); if(rank==0){ double read_avg1 = 0; double read_avg2 = 0; double tread_avg = 0; double write_avg1 = 0; double write_avg2 = 0; double twrite_avg = 0; double total_avg = 0; for(j=0; j<size; j++){ read_avg1 += rb[7*j]; read_avg2 += rb[7*j+1]; tread_avg += rb[7*j+2]; write_avg1 += rb[7*j+3]; write_avg2 += rb[7*j+4]; twrite_avg += rb[7*j+5]; total_avg += rb[7*j+6]; } read_avg1 /= size; read_avg2 /= size; tread_avg /= size; write_avg1 /= size; write_avg2 /= size; twrite_avg /= size; total_avg /= size; double read1_max = rb[0]; double read1_min = rb[0]; double read1_std = rb[0]-read_avg1; read1_std *= read1_std; double read2_max = rb[1]; double read2_min = rb[1]; double read2_std = rb[1]-read_avg2; read2_std *= read2_std; double tread_max = rb[2]; double tread_min = rb[2]; double tread_std = rb[2]-tread_avg; tread_std *= tread_std; double write1_max = rb[3]; double write1_min = rb[3]; double write1_std = rb[3]-write_avg1; write1_std *= write1_std; double write2_max = rb[4]; double write2_min = rb[4]; double write2_std = rb[4]-write_avg2; write2_std *= write2_std; double twrite_max = rb[5]; double twrite_min = rb[5]; double twrite_std = rb[5]-twrite_avg; twrite_std *= twrite_std; double total_max = rb[6]; double total_min = rb[6]; double total_std = rb[6]-total_avg; total_std *= total_std; for(j=1; j<size; j++){ if(rb[7*j]>read1_max) read1_max = rb[7*j]; else if(rb[7*j]<read1_min) read1_min = rb[7*j]; double std = rb[7*j]-read_avg1; std *= std; read1_std += std; if(rb[7*j+1]>read2_max) read2_max = rb[7*j+1]; else if(rb[7*j+1]<read2_min) read2_min = rb[7*j+1]; std = rb[7*j+1]-read_avg2; std *= std; read2_std += std; if(rb[7*j+2]>tread_max) tread_max = rb[7*j+2]; else if(rb[7*j+2]<tread_min) tread_min = rb[7*j+2]; std = rb[7*j+2]-tread_avg; std *= std; tread_std += std; if(rb[7*j+3]>write1_max) write1_max = rb[7*j+3]; else if(rb[7*j+3]<write1_min) write1_min = rb[7*j+3]; std = rb[7*j+3]-write_avg1; std *= std; write1_std += std; if(rb[7*j+4]>write2_max) write2_max = rb[7*j+4]; else if(rb[7*j+4]<write2_min) write2_min = rb[7*j+4]; std = rb[7*j+4]-write_avg2; std *= std; write2_std += std; if(rb[7*j+5]>twrite_max) twrite_max = rb[7*j+5]; else if(rb[7*j+5]<twrite_min) twrite_min = rb[7*j+5]; std = rb[7*j+5]-twrite_avg; std *= std; twrite_std += std; if(rb[7*j+6]>total_max) total_max = rb[7*j+6]; else if(rb[7*j+6]<total_min) total_min = rb[7*j+6]; std = rb[7*j+6]-total_avg; std *= std; total_std += std; } read1_std /= size; read1_std = sqrt(read1_std); read2_std /= size; read2_std = sqrt(read2_std); tread_std /= size; tread_std = sqrt(tread_std); write1_std /= size; write1_std = sqrt(write1_std); write2_std /= size; write2_std = sqrt(write2_std); twrite_std /= size; twrite_std = sqrt(twrite_std); total_std /= size; total_std = sqrt(total_std); printf("---type--- max\tmin\tavg\tstd\n"); printf("---read_var--- %lf\t%lf\t%lf\t%lf\n", read1_max, read1_min, read_avg1, read1_std); printf("---fopen+fclose--- %lf\t%lf\t%lf\t%lf\n", read2_max, read2_min, read_avg2, read2_std); printf("---total_read--- %lf\t%lf\t%lf\t%lf\n", tread_max, tread_min, tread_avg, tread_std); printf("---adios_open+adios_groupsize--- %lf\t%lf\t%lf\t%lf\n", write1_max, write1_min, write_avg1, write1_std); printf("---write+close--- %lf\t%lf\t%lf\t%lf\n", write2_max, write2_min, write_avg2, write2_std); printf("---total_write--- %lf\t%lf\t%lf\t%lf\n", twrite_max, twrite_min, twrite_avg, twrite_std); printf("---total--- %lf\t%lf\t%lf\t%lf\n", total_max, total_min, total_avg, total_std); free(rb); } } // if (TIMING==100 && rank==0) { // printf("------------------------------------------------------------------\n"); // printf("Define variables = %lf\n",total_time[0]); // printf("Read variables = %lf\n",total_time[1]); // printf("Write variables = %lf\n",total_time[2]); // printf("Close File for write = %lf\n",total_time[3]); // printf("Total write time = %lf\n",total_time[2] + total_time[3]); // for (itime=0;itime<timers-1;itime++) // total_time[timers-1]+=total_time[itime]; // printf("Total I/O time = %lf\n",total_time[timers-1]); // } MPI_Finalize(); return(0); }
int main (int argc, char ** argv) { char filename [256]; int rank, size, i, block; int NX = 100, Global_bounds, Offsets; double t[NX]; int sub_blocks = 3; MPI_Comm comm = MPI_COMM_WORLD; /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); Global_bounds = sub_blocks * NX * size; strcpy (filename, "adios_global_no_xml.bp"); adios_init_noxml (comm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10); int64_t m_adios_group; int64_t m_adios_file; adios_declare_group (&m_adios_group, "restart", "iter", adios_flag_yes); adios_select_method (m_adios_group, "MPI", "", ""); adios_define_var (m_adios_group, "NX" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "Global_bounds" ,"", adios_integer ,0, 0, 0); for (i=0;i<sub_blocks;i++) { adios_define_var (m_adios_group, "Offsets" ,"", adios_integer ,0, 0, 0); int64_t varid; varid = adios_define_var (m_adios_group, "temperature" ,"", adios_double ,"NX", "Global_bounds", "Offsets"); adios_set_transform (varid, "none"); } adios_open (&m_adios_file, "restart", filename, "w", comm); adios_groupsize = sub_blocks * (4 + 4 + 4 + NX * 8); adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize); adios_write(m_adios_file, "NX", (void *) &NX); adios_write(m_adios_file, "Global_bounds", (void *) &Global_bounds); /* now we will write the data for each sub block */ for (block=0;block<sub_blocks;block++) { Offsets = rank * sub_blocks * NX + block*NX; adios_write(m_adios_file, "Offsets", (void *) &Offsets); for (i = 0; i < NX; i++) t[i] = Offsets + i; adios_write(m_adios_file, "temperature", t); } adios_close (m_adios_file); MPI_Barrier (comm); adios_finalize (rank); MPI_Finalize (); return 0; }
int write_blocks () { int NX, G, O; double *t; /* ADIOS variables declarations for matching gwrite_temperature.ch */ int it, i, r; uint64_t adios_groupsize, adios_totalsize; if (!rank) printf ("------- Write blocks -------\n"); // We will have "3 steps * 2 blocks per process * number of processes" blocks nsteps = 3; nblocks_per_step = 2; block_offset = (uint64_t*) malloc (sizeof(uint64_t) * nsteps * nblocks_per_step * size); block_count = (uint64_t*) malloc (sizeof(uint64_t) * nsteps * nblocks_per_step * size); gdims = (uint64_t*) malloc (sizeof(uint64_t) * nsteps); adios_init_noxml (comm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10); int64_t m_adios_group; int64_t m_adios_file; adios_declare_group (&m_adios_group, "restart", "", adios_flag_yes); adios_select_method (m_adios_group, "MPI", "", ""); adios_define_var (m_adios_group, "NX" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "G" ,"", adios_integer ,0, 0, 0); /* have to define O and temperature as many times as we write them within one step (twice) */ for (it=0; it < nblocks_per_step; it++) { adios_define_var (m_adios_group, "O" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "t" ,"", adios_double ,"NX", "G", "O"); } for (it =0; it < nsteps; it++) { if (!rank) printf ("Step %d:\n", it); NX = 10+it; G = nblocks_per_step * NX * size; t = (double *) malloc (NX*sizeof(double)); for (i = 0; i < NX; i++) t[i] = rank + it*0.1 + 0.01; MPI_Barrier (comm); if (it==0) adios_open (&m_adios_file, "restart", fname, "w", comm); else adios_open (&m_adios_file, "restart", fname, "a", comm); adios_groupsize = 4 + 4 + 4 + NX * 8 + 4 + 4 + 4 + NX * 8; adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize); adios_write(m_adios_file, "NX", (void *) &NX); adios_write(m_adios_file, "G", (void *) &G); O = rank * nblocks_per_step * NX; adios_write(m_adios_file, "O", (void *) &O); adios_write(m_adios_file, "t", t); printf ("rank %d: block 1: size=%d, offset=%d\n", rank, NX, O); for (r = 0; r < size; r++) { block_count [it*nblocks_per_step*size + nblocks_per_step*r] = NX; block_offset [it*nblocks_per_step*size + nblocks_per_step*r] = r * nblocks_per_step * NX; } for (i = 0; i < NX; i++) t[i] += 0.01; O = rank * nblocks_per_step * NX + NX; adios_write(m_adios_file, "O", (void *) &O); adios_write(m_adios_file, "t", t); printf ("rank %d: block 2: size=%d, offset=%d\n", rank, NX, O); for (r = 0; r < size; r++) { block_count [it*nblocks_per_step*size + nblocks_per_step*r + 1] = NX; block_offset [it*nblocks_per_step*size + nblocks_per_step*r + 1] = r * nblocks_per_step * NX + NX; } gdims [it] = G; adios_close (m_adios_file); MPI_Barrier (comm); free(t); } adios_finalize (rank); return 0; }
int main (int argc, char ** argv) { char filename [256]; char color_str[256]; int rank, size, i, color; int NX = 100, Global_bounds, Offsets; double t[NX]; MPI_Comm comm = MPI_COMM_WORLD; /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); Global_bounds = NX * size; strcpy (filename, "adios_global_aggregate_by_color.bp"); adios_init_noxml (comm); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10); int64_t m_adios_group; int64_t m_adios_file; adios_declare_group (&m_adios_group, "restart", "iter", adios_flag_yes); // split into 2 groups color = (rank % 2 == 0 ? 0 : 1); sprintf (color_str, "color=%d", color); adios_select_method (m_adios_group, "MPI", color_str, ""); adios_define_var (m_adios_group, "NX" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "Global_bounds" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "Offsets" ,"", adios_integer ,0, 0, 0); adios_define_var (m_adios_group, "temperature" ,"", adios_double ,"NX", "Global_bounds", "Offsets"); adios_open (&m_adios_file, "restart", filename, "w", comm); adios_groupsize = 4 + 4 + 4 + NX * 8; adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize); adios_write(m_adios_file, "NX", (void *) &NX); adios_write(m_adios_file, "Global_bounds", (void *) &Global_bounds); Offsets = rank * NX; adios_write(m_adios_file, "Offsets", (void *) &Offsets); for (i = 0; i < NX; i++) t[i] = Offsets + i; adios_write(m_adios_file, "temperature", t); adios_close (m_adios_file); MPI_Barrier (comm); adios_finalize (rank); MPI_Finalize (); return 0; }
int process_metadata(int step) { int retval = 0; int i, j; char gdims[256], ldims[256], offs[256]; uint64_t sum_count; ADIOS_VARINFO *v; // shortcut pointer if (step > 1) { // right now, nothing to prepare in later steps print("Step %d. return immediately\n",step); return 0; } /* First step processing */ // get groupname of stream, then declare for output adios_get_grouplist(f, &group_namelist); print0("Group name is %s\n", group_namelist[0]); adios_declare_group(&gh,group_namelist[0],"",adios_flag_yes); varinfo = (VarInfo *) malloc (sizeof(VarInfo) * f->nvars); if (!varinfo) { print("ERROR: rank %d cannot allocate %lu bytes\n", rank, sizeof(VarInfo)*f->nvars); return 1; } write_total = 0; largest_block = 0; // Decompose each variable and calculate output buffer size for (i=0; i<f->nvars; i++) { print0 ("Get info on variable %d: %s\n", i, f->var_namelist[i]); varinfo[i].v = adios_inq_var_byid (f, i); v = varinfo[i].v; // just a shortcut if (v == NULL) { print ("rank %d: ERROR: Variable %s inquiry failed: %s\n", rank, f->var_namelist[i], adios_errmsg()); return 1; } // print variable type and dimensions print0(" %-9s %s", adios_type_to_string(v->type), f->var_namelist[i]); if (v->ndim > 0) { print0("[%llu", v->dims[0]); for (j = 1; j < v->ndim; j++) print0(", %llu", v->dims[j]); print0("] :\n"); } else { print0("\tscalar\n"); } // determine subset we will write decompose (numproc, rank, v->ndim, v->dims, decomp_values, varinfo[i].count, varinfo[i].start, &sum_count); varinfo[i].writesize = sum_count * adios_type_size(v->type, v->value); if (varinfo[i].writesize != 0) { write_total += varinfo[i].writesize; if (largest_block < varinfo[i].writesize) largest_block = varinfo[i].writesize; } } // determine output buffer size and allocate it uint64_t bufsize = write_total + f->nvars*128 + f->nattrs*32 + 1024; if (bufsize > max_write_buffer_size) { print ("ERROR: rank %d: write buffer size needs to hold about %llu bytes, " "but max is set to %d\n", rank, bufsize, max_write_buffer_size); return 1; } print0 ("Rank %d: allocate %llu MB for output buffer\n", rank, bufsize/1048576+1); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, bufsize/1048576+1); // allocate read buffer bufsize = largest_block + 128; if (bufsize > max_read_buffer_size) { print ("ERROR: rank %d: read buffer size needs to hold at least %llu bytes, " "but max is set to %d\n", rank, bufsize, max_read_buffer_size); return 1; } print0 ("Rank %d: allocate %g MB for input buffer\n", rank, (double)bufsize/1048576.0); readbuf = (char *) malloc ((size_t)bufsize); if (!readbuf) { print ("ERROR: rank %d: cannot allocate %llu bytes for read buffer\n", rank, bufsize); return 1; } // Select output method adios_select_method (gh, wmethodname, wmethodparams, ""); // Define variables for output based on decomposition char *vpath, *vname; for (i=0; i<f->nvars; i++) { v = varinfo[i].v; if (varinfo[i].writesize != 0) { // define variable for ADIOS writes getbasename (f->var_namelist[i], &vpath, &vname); if (v->ndim > 0) { int64s_to_str (v->ndim, v->dims, gdims); int64s_to_str (v->ndim, varinfo[i].count, ldims); int64s_to_str (v->ndim, varinfo[i].start, offs); print ("rank %d: Define variable path=\"%s\" name=\"%s\" " "gdims=%s ldims=%s offs=%s\n", rank, vpath, vname, gdims, ldims, offs); adios_define_var (gh, vname, vpath, v->type, ldims, gdims, offs); } else { print ("rank %d: Define scalar path=\"%s\" name=\"%s\"\n", rank, vpath, vname); adios_define_var (gh, vname, vpath, v->type, "", "", ""); } free(vpath); free(vname); } } if (rank == 0) { // get and define attributes enum ADIOS_DATATYPES attr_type; void * attr_value; char * attr_value_str; int attr_size; for (i=0; i<f->nattrs; i++) { adios_get_attr_byid (f, i, &attr_type, &attr_size, &attr_value); attr_value_str = (char *)value_to_string (attr_type, attr_value, 0); getbasename (f->attr_namelist[i], &vpath, &vname); if (vpath && !strcmp(vpath,"/__adios__")) { // skip on /__adios/... attributes print ("rank %d: Ignore this attribute path=\"%s\" name=\"%s\" value=\"%s\"\n", rank, vpath, vname, attr_value_str); } else { adios_define_attribute (gh, vname, vpath, attr_type, attr_value_str, ""); print ("rank %d: Define attribute path=\"%s\" name=\"%s\" value=\"%s\"\n", rank, vpath, vname, attr_value_str); free (attr_value); } } } return retval; }
int main (int argc, char** argv) { fastbit_init(0); fastbit_set_verbose_level(0); ADIOS_FILE * f; //MPI_Comm comm_dummy = 0; // MPI_Comm is defined through adios_read.h MPI_Comm comm_dummy = MPI_COMM_WORLD; int rank, size; MPI_Init (&argc, &argv); MPI_Comm_rank (comm_dummy, &rank); MPI_Comm_size (comm_dummy, &size); adios_init_noxml (comm_dummy); if (argc < 2) { printf("Usage: index_fastbit fileName (attrName)"); return 0; } f = adios_read_open_file (argv[1], ADIOS_READ_METHOD_BP, comm_dummy); if (f == NULL) { printf ("::%s\n", adios_errmsg()); return -1; } /* adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, (f->file_size)*2/1048576 + 5); // +5MB for extra room in buffer adios_declare_group (&gAdios_group, gGroupNameFastbitIdx, "", adios_flag_yes); adios_select_method (gAdios_group, "MPI", "", ""); */ gIdxFileName = fastbit_adios_util_getFastbitIndexFileName(argv[1]); unlink(gIdxFileName); adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 500); // +5MB for extra room in buffer adios_declare_group (&gAdios_group, gGroupNameFastbitIdx, "", adios_flag_yes); adios_select_method (gAdios_group, "MPI", "", ""); adios_open (&gAdios_write_file, gGroupNameFastbitIdx, gIdxFileName, "w", MPI_COMM_WORLD); #ifdef MULTI_BLOCK int testid = adios_define_var (gAdios_group, "pack", "", adios_integer , 0, 0, 0); #endif #ifdef BOX int testid = adios_define_var (gAdios_group, "elements", "", adios_integer , 0, 0, 0); #endif //uint64_t estimatedbytes = (nb+nk+no)*adios_type_size(adios_double, NULL); int jobCounter = getJobCounter(f); uint64_t estimatedbytes = getByteEstimationOnFile(f, rank); if (size > 1) { int maxJobsPP = jobCounter/size + 1; estimatedbytes = estimatedbytes * maxJobsPP /jobCounter +1048576; } estimatedbytes += 1048576; uint64_t adios_totalsize; // adios_group_size needs to be call before any write_byid, Otherwise write_byid does nothing adios_group_size (gAdios_write_file, estimatedbytes , &adios_totalsize); printf("=> .. adios open output file: %s, rank %d allocated %" PRIu64 " bytes... \n", gIdxFileName, rank, adios_totalsize); // IMPORTANT: // can only call open/close once in a process // otherwise data is tangled or only the data in the last open/close call is recorded #ifdef MULTI_BLOCK adios_write_byid(gAdios_write_file, testid, &pack); #endif #ifdef BOX adios_write_byid(gAdios_write_file, testid, &recommended_index_ele); #endif sumLogTime(-1); sumLogTimeMillis(-1); if (argc >= 3) { int i=2; while (i<argc) { const char* varName = argv[i]; if(strstr(varName, "<binning prec") != NULL) { if (gBinningOption == NULL) { gBinningOption = argv[i]; } if (argc == 3) { buildIndexOnAllVar(f, rank, size); break; } i++; continue; } else { ADIOS_VARINFO * v = adios_inq_var(f, varName); if (v == NULL) { printf("No such variable: %s\n", varName); return 0; } printf("building fastbit index on variable: %s\n", varName); buildIndex_mpi(f, v, rank, size); adios_free_varinfo(v); i++; } } } else { buildIndexOnAllVar(f, rank, size); } sumLogTime(0); sumLogTimeMillis(0); adios_close(gAdios_write_file); adios_read_close(f); // // writing file clean up // // read back: f = adios_read_open_file (gIdxFileName, ADIOS_READ_METHOD_BP, comm_dummy); if (f == NULL) { printf("No such file: %s \n", gIdxFileName); return 0; } int numVars = f->nvars; int i=0; int k=0; int j=0; for (i=0; i<numVars; i++) { char* varName = f->var_namelist[i]; ADIOS_VARINFO* v = adios_inq_var(f, varName); adios_inq_var_blockinfo(f,v); int timestep = 0; for (k=0; k<v->sum_nblocks; k++) { verifyData(f, v, k, timestep); } adios_free_varinfo(v); } adios_read_close(f); if (rank == 0) { printf(" ==> index file is at: %s\n", gIdxFileName); } // clean up MPI_Barrier (comm_dummy); adios_finalize (rank); MPI_Finalize (); free (gIdxFileName); fastbit_cleanup(); return 0; }