int main (int argc, char ** argv) { char filename [256]; int rank; int NX = 10; double t[NX]; char result[1024], s[32]; int i; /* ADIOS variables declarations for matching gread_temperature.ch */ int adios_err; uint64_t adios_groupsize, adios_totalsize, adios_buf_size; int64_t adios_handle; MPI_Comm comm = MPI_COMM_WORLD; MPI_Init (&argc, &argv); MPI_Comm_rank (MPI_COMM_WORLD, &rank); sprintf (filename, "restart.bp"); adios_init ("config.xml", comm); adios_open (&adios_handle, "temperature", filename, "r", comm); #include "gread_temperature.ch" adios_close (adios_handle); adios_finalize (rank); MPI_Finalize (); sprintf(result, "rank=%d t=[%g", rank, t[0]); for (i=1; i<NX; i++) { sprintf (s, ",%g", t[i]); strcat (result, s); } printf("%s]\n", result); return 0; }
int main (int argc, char ** argv) { char filename [256]; int rank, size, i; int NX = 10; double t[NX]; MPI_Comm comm = MPI_COMM_WORLD; /* ADIOS variables declarations for matching gwrite_temperature.ch */ int adios_err; uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); for (i = 0; i < NX; i++) t[i] = rank*NX + i; strcpy (filename, "adios_global.bp"); adios_init ("adios_global.xml", comm); adios_open (&adios_handle, "temperature", filename, "w", comm); #include "gwrite_temperature.ch" adios_close (adios_handle); MPI_Barrier (comm); adios_finalize (rank); MPI_Finalize (); return 0; }
void DumpData(simulation_data *sim, char *Filename, int tindex, const char *postfix) { char lname[512]; #ifdef ADIOS if(sim->cycle == 1){ //fprintf(stderr,"Calling adios_init()\n"); //adios_init ("/users/jfavre/Projects/ADIOS/benchmark.xml"); //adios_init ("/users/jfavre/Projects/ADIOS/benchmark.xml", sim->comm_cart); adios_init ("benchmark.xml", sim->comm_cart); } sprintf(lname,"%s.%04d.%s", Filename, tindex, postfix); ADIOS_WriteData(sim, lname); if(sim->cycle == NUMBER_OF_ITERATIONS){ //fprintf(stderr, "Calling adios_finalize()\n"); adios_finalize (sim->par_rank); } #elif NETCDF4 sprintf(lname,"%s.%04d.%s", Filename, tindex, postfix); NETCDF4_WriteData(sim, Filename); #elif HDF5 sprintf(lname,"%s.%04d.%s", Filename, tindex, postfix); HDF5_WriteData(sim, lname); #elif BOV if(gzipped) BOV_WriteData(sim, Filename, tindex, "bof.gz"); else BOV_WriteData(sim, Filename, tindex, "bof"); if(sim->par_rank == 0) { sprintf(lname,"%s.%04d.bov", Filename, tindex); FILE *fp = fopen(lname,"w"); fprintf(fp,"# BOV version: 1.0\n"); fprintf(fp,"# file written by IO benchmark program\n"); if(gzipped) fprintf(fp,"DATA_FILE: %s.%%05d.%04d.bof.gz\n", Filename, tindex); else fprintf(fp,"DATA_FILE: %s.%%05d.%04d.bof\n", Filename, tindex); fprintf(fp,"DATA SIZE: %d %d %d\n", sim->global_dims[0], sim->global_dims[1], sim->global_dims[2]); fprintf(fp,"DATA_BRICKLETS: %d %d %d\n", sim->grid.Nrows, sim->grid.Ncolumns, sim->grid.Nlevels); fprintf(fp,"DATA FORMAT: FLOAT\n"); fprintf(fp,"VARIABLE: node_data\n"); //fprintf(fp,"VARIABLE PALETTE MIN: 0\n"); //fprintf(fp,"VARIABLE PALETTE MAX: 14.7273\n"); fprintf(fp,"BRICK ORIGIN: 0.0 0.0 0.0\n"); fprintf(fp,"BRICK SIZE: %f %f %f\n", 1.0*sim->global_dims[0], 1.0*sim->global_dims[1], 1.0*sim->global_dims[2]); fprintf(fp,"BRICK X_AXIS: 1.000 0.000 0.000\n"); fprintf(fp,"BRICK Y_AXIS: 0.000 1.000 0.000\n"); fprintf(fp,"BRICK Z_AXIS: 0.000 0.000 1.000\n"); fprintf(fp,"DATA_ENDIAN: LITTLE\n"); fprintf(fp,"CENTERING: nodal\n"); fprintf(fp,"BYTE_OFFSET: 0\n"); fclose(fp); } #else sprintf(lname,"%s.%04d.%s", Filename, tindex, postfix); MPIIO_WriteData(sim, lname); #endif }
int main(int argc, char ** argv){ int rank=0, size=0; int NX = NX_DIM; // size of 1D array we will write double t[NX_DIM]; // this will contain the variables MPI_Comm comm = MPI_COMM_WORLD; // required for ADIOS int64_t adios_handle; // the ADIOS file handler int retval; struct adios_tsprt_opts adios_opts; int err_count = 0; GET_ENTRY_OPTIONS(adios_opts, "Runs writers. It is recommended to run as many writers as readers."); // I assume that I have all required options set in adios_opts // sanity check assert(NX==NX_DIM); // ADIOS initialization MPI_Init(&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); SET_ERROR_IF_NOT_ZERO(adios_init(adios_opts.xml_adios_init_filename, comm), err_count); RET_IF_ERROR(err_count, rank); // init the array that I will transport if (gen_1D_array(t, NX, rank) == DIAG_ERR){ printf("ERROR: Generating 1D array. Quitting ...\n"); return DIAG_ERR; } uint64_t adios_groupsize, adios_totalsize; // open with the group name as specified in the xml file adios_open( &adios_handle, "temperature", FILE_NAME, "w", comm); adios_groupsize = 4 + 4 + 4 + 8 * (NX); retval=adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); fprintf(stderr, "Rank=%d adios_group_size(): adios_groupsize=%" PRIu64 ", adios_totalsize=%" PRIu64 ", retval=%d\n", rank, adios_groupsize, adios_totalsize, retval); // write; don't check errors for simplicity reasons adios_write(adios_handle, "NX", &NX); adios_write(adios_handle, "size", &size); adios_write(adios_handle, "rank", &rank); adios_write(adios_handle, "var_1d_array", t); fprintf(stderr, "Rank=%d committed write\n", rank); adios_close(adios_handle); // clean and finalize the system adios_finalize(rank); MPI_Finalize(); return DIAG_OK; }
int main (int argc, char ** argv) { char filename [256]; int rank; MPI_Comm comm = MPI_COMM_WORLD; uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; int8_t v1 = -4; int16_t v2 = -3; int32_t v3 = -2; int64_t v4 = -1; uint8_t v5 = 1; uint16_t v6 = 2; uint32_t v7 = 3; uint64_t v8 = 4; float v9 = 5.0; double v10 = 6.0; char * v11 = "ADIOS example"; complex v12; v12.r = 8.0; v12.i = 9.0; double_complex v13; v13.r = 10.0; v13.i = 11.0; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); strcpy (filename, "scalars.bp"); /* adios_open() opens a "group in a file", here the "scalars" group. GWRITE is the convenient way to write all variables defined in the xml file but of course one can write the individual adios_write() statements here too */ adios_init ("scalars.xml", comm); adios_open (&adios_handle, "scalars", filename, "w", comm); #include "gwrite_scalars.ch" adios_close (adios_handle); MPI_Barrier (comm); adios_finalize (rank); MPI_Finalize (); return 0; }
int main(int argc, char **argv) { MPI_Init(&argc, &argv); adios_init("transforms.xml", comm); double *arr = malloc(N * sizeof(double)); memset(arr, 123, N * sizeof(double)); write_test_file(arr); read_test_file(arr); adios_finalize(0); MPI_Finalize(); }
int main (int argc, char ** argv) { char filename [256]; int rank, size; int NX = 10; int N = 3; /* number of files to write */ double t[NX]; int i; /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; int color, key; MPI_Comm comm; MPI_Init (&argc, &argv); MPI_Comm_rank (MPI_COMM_WORLD, &rank); MPI_Comm_size (MPI_COMM_WORLD, &size); /* MPI_Comm_split partitions the world group into N disjoint subgroups, * the processes are ranked in terms of the argument key. * A new communicator comm is returned for this specific grid configuration */ color = rank % N; key = rank / N; MPI_Comm_split (MPI_COMM_WORLD, color, key, &comm); for (i=0; i<NX; i++) t[i] = rank*NX + i; /* every P/N processes write into the same file * there are N files generated. */ sprintf (filename, "restart_%5.5d.bp", color); adios_init ("config.xml", MPI_COMM_WORLD); adios_open (&adios_handle, "temperature", filename, "w", comm); #include "gwrite_temperature.ch" adios_close (adios_handle); adios_finalize (rank); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { char filename [256]; int rank, size, i, j; int NX = 10, NY = 100; double t[NX][NY]; int p[NX]; MPI_Comm comm = MPI_COMM_WORLD; int adios_err; uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); for (i = 0; i < NX; i++) for (j = 0; j< NY; j++) t[i][j] = rank * NX + i + j*(1.0/NY); for (i = 0; i < NX; i++) p[i] = rank * NX + i; strcpy (filename, "arrays.bp"); adios_init ("arrays.xml", comm); adios_open (&adios_handle, "arrays", filename, "w", comm); #include "gwrite_arrays.ch" adios_close (adios_handle); MPI_Barrier (comm); adios_finalize (rank); MPI_Finalize (); return 0; }
int main (int argc, char ** argv ) { MPI_Comm comm = MPI_COMM_WORLD; int rank; int ndx, ndy; // size of array per processor double *data; double *X; //X coordinate double *Y; //Y coordinate // Offsets and sizes int offs_x, offs_y; //offset in x and y direction int nx_local, ny_local; //local address int nx_global, ny_global; //global address int posx, posy; // position index in the array int i,j; /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &nproc); if (processArgs(argc, argv)) { return 1; } //will work with each core writing ndx = 65, ndy = 129, (65*3,129*4) global ndx = 65; ndy = 129; //2D array with block,block decomposition posx = rank%npx; // 1st dim posy = rank/npx; // 2nd dim offs_x = posx * ndx; offs_y = posy * ndy; nx_local = ndx; ny_local = ndy; nx_global = npx * ndx; ny_global = npy * ndy; data = malloc (ndx * ndy * sizeof(double)); for( i = 0; i < ndx; i++ ) for( j = 0; j < ndy; j++) data[i*ndy + j] = 1.0*rank; X = malloc (ndx * sizeof(double)); for( i = 0; i < ndx; i++ ) //X[i] = 0.1*i*i+ndx*posx; X[i] = 0.1*(i+offs_x)*(i+offs_x); Y = malloc (ndy * sizeof(double)); for( i = 0; i < ndy; i++ ) //Y[i] = 0.1*i*i+ndx*posy; Y[i] = 0.1*(i+offs_y)*(i+offs_y); adios_init ("rectilinear2d.xml", comm); adios_open (&adios_handle, "rectilinear2d", "rectilinear2d.bp", "w", comm); adios_groupsize = 7*sizeof(int) \ + sizeof(double) * (nx_local*ny_local) \ + sizeof(double) * (nx_local) \ + sizeof(double) * (ny_local); adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "nproc", &nproc); adios_write (adios_handle, "nx_global", &nx_global); adios_write (adios_handle, "ny_global", &ny_global); adios_write (adios_handle, "offs_x", &offs_x); adios_write (adios_handle, "offs_y", &offs_y); adios_write (adios_handle, "nx_local", &nx_local); adios_write (adios_handle, "ny_local", &ny_local); if( rank < npx ) { adios_write (adios_handle, "X", X); } //printf ("rank %d: check if to print Y, rank%%npx=%d offs_y=%d\n", rank, rank%npx, offs_y); if( rank % npx == 0 ) { adios_write (adios_handle, "Y", Y); } adios_write (adios_handle, "data", data); adios_close (adios_handle); MPI_Barrier (comm); free (data); free (X); free (Y); adios_finalize (rank); MPI_Finalize (); return 0; }
/* --------------------------------- Main --------------------------------- */ int main( int argc, char ** argv) { char filename [256]; MPI_Comm comm = MPI_COMM_WORLD; int rank, size; /* ADIOS variables declarations for matching gwrite_schema.ch */ int adios_err; uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; float tmax = 10.0; float dt = 0.5; // run from 0.0 increasing with 'dt' up to 'tmax' int i; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_init ("local_array_time.xml", comm); strcpy(filename, "local_array_time.bp"); // Declare and Initialize essential variables int num_points = 37; float angles[num_points]; float cos_of_angles[num_points]; float sin_of_angles[num_points]; float pi; // Obtain pi once for all pi = 4.0*atan(1.0); // Initialize angles in degrees float angle_degree = 0; for (i=0; i<num_points; i++) { angles[i] = pi * angle_degree/180.0; angle_degree = angle_degree + 10.0; } // Scan over time float timestep = 0.0; for (timestep = 0.0; timestep <= tmax; timestep = timestep + dt) { if (timestep == 0.0) { printf("\n\n\nopen file\n\n\n"); adios_open (&adios_handle, "schema", filename, "w", comm); } else { adios_open (&adios_handle, "schema", filename, "a", comm); } for (i=0; i<num_points; i++) { cos_of_angles[i] = cos(angles[i]*timestep); sin_of_angles[i] = sin(angles[i]*timestep); } adios_groupsize = 4 + 4 \ + 4*num_points \ + 4*num_points; if (timestep == 0 && rank == 0) { adios_groupsize += 4 + 4 + 4*num_points; } adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "num_points", &num_points); adios_write (adios_handle, "t", ×tep); if (timestep == 0 && rank == 0) { adios_write (adios_handle, "tmax", &tmax); adios_write (adios_handle, "dt", &dt); adios_write (adios_handle, "angles", angles); } adios_write (adios_handle, "cos", cos_of_angles); adios_write (adios_handle, "sin", sin_of_angles); adios_close (adios_handle); // Write out raw data print_data_1D(timestep, num_points, angles, sin_of_angles, 0); } MPI_Barrier (comm); adios_finalize (rank); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { char filename [256] = "stream.bp"; int rank, size; int NX, NY; int len, off; double *t = NULL; MPI_Comm comm = MPI_COMM_WORLD; int64_t adios_handle; uint64_t adios_groupsize, adios_totalsize; uint64_t start[2], count[2]; ADIOS_SELECTION *sel; int steps = 0; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); // ADIOS read init adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3"); ADIOS_FILE* fp = adios_read_open_file ("kstar.bp", ADIOS_READ_METHOD_BP, comm); assert(fp != NULL); ADIOS_VARINFO* nx_info = adios_inq_var( fp, "N"); ADIOS_VARINFO* ny_info = adios_inq_var( fp, "L"); NX = *((int *)nx_info->value); NY= *((int*)ny_info->value); len = NX / size; off = len * rank; if (rank == size-1) len = len + NX % size; printf("\trank=%d: NX,NY,len,off = %d\t%d\t%d\t%d\n", rank, NX, NY, len, off); assert(len > 0); t = (double *) malloc(sizeof(double) * len * NY); memset(t, '\0', sizeof(double) * len * NY); assert(t != NULL); start[0] = off; start[1] = 0; count[0] = len; count[1] = NY; sel = adios_selection_boundingbox (2, start, count); // ADIOS write init adios_init ("adios.xml", comm); remove (filename); //int ii; //for(ii = 0; ii<10; ii++){ // for (i = 0; i < len * NY; i++) // t[i] = ii*1000 + rank; while(adios_errno != err_end_of_stream && adios_errno != err_step_notready) { steps++; // Reading adios_schedule_read (fp, sel, "var", 0, 1, t); adios_perform_reads (fp, 1); // Debugging //for (i = 0; i < len*NY; i++) t[i] = off * NY + i; printf("step=%d\trank=%d\t[%d,%d]\n", steps, rank, len, NY); // Writing adios_open (&adios_handle, "writer", filename, "a", comm); adios_groupsize = 4*4 + 8*len*NY; adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "NX", &NX); adios_write (adios_handle, "NY", &NY); adios_write (adios_handle, "len", &len); adios_write (adios_handle, "off", &off); adios_write (adios_handle, "var_2d_array", t); adios_close (adios_handle); // Advance MPI_Barrier (comm); adios_advance_step(fp, 0, TIMEOUT_SEC); } free(t); MPI_Barrier (comm); adios_read_close(fp); if (rank==0) printf ("We have processed %d steps\n", steps); MPI_Barrier (comm); adios_read_finalize_method(ADIOS_READ_METHOD_BP); adios_finalize (rank); MPI_Finalize (); return 0; }
int main (int argc, char ** argv) { int rank, size; int NX, NY; int len, off; double *t = NULL; MPI_Comm comm = MPI_COMM_WORLD; uint64_t start[2], count[2]; ADIOS_SELECTION *sel; int steps = 0; #ifdef _USE_GNUPLOT int i, j; double *tmp; FILE *pipe; #else // Variables for ADIOS write int64_t adios_handle; uint64_t adios_groupsize, adios_totalsize; char outfn[256]; #endif MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, ""); ADIOS_FILE* fp = adios_read_open("stream.bp", ADIOS_READ_METHOD_FLEXPATH, comm, ADIOS_LOCKMODE_NONE, 0.0); assert(fp != NULL); ADIOS_VARINFO* nx_info = adios_inq_var( fp, "NX"); ADIOS_VARINFO* ny_info = adios_inq_var( fp, "NY"); NX = *((int *)nx_info->value); NY= *((int*)ny_info->value); len = NX / size; off = len * rank; if (rank == size-1) len = len + NX % size; printf("\trank=%d: NX,NY,len,off = %d\t%d\t%d\t%d\n", rank, NX, NY, len, off); assert(len > 0); t = (double *) malloc(sizeof(double) * len * NY); memset(t, '\0', sizeof(double) * len * NY); assert(t != NULL); start[0] = off; start[1] = 0; count[0] = len; count[1] = NY; // Not working ... //sel = adios_selection_boundingbox (2, start, count); sel = malloc(sizeof(ADIOS_SELECTION)); sel->type=ADIOS_SELECTION_WRITEBLOCK; sel->u.block.index = rank; #ifdef _USE_GNUPLOT if ((NX % size) > 0) { fprintf(stderr, "Equal distribution is required\n"); return -1; } if (rank == 0) { pipe = popen("gnuplot", "w"); fprintf(pipe, "set view map\n"); fprintf(pipe, "set xrange [0:%d]\n", NX-1); tmp = (double *) malloc(sizeof(double) * NX * NY); assert(tmp != NULL); } #else // ADIOS write init adios_init ("adios.xml", comm); #endif //while(adios_errno != err_end_of_stream && adios_errno != err_step_notready) while(1) { steps++; // Reading adios_schedule_read (fp, sel, "var_2d_array", 0, 1, t); adios_perform_reads (fp, 1); printf("step=%d\trank=%d\tfp->current_step=%d\t[%d,%d]\n", steps, rank, fp->current_step, len, NY); /* // Debugging for (i=0; i<len; i++) { printf("%d: rank=%d: t[%d,0:4] = ", steps, rank, off+i); for (j=0; j<5; j++) { printf(", %g", t[i*NY + j]); } printf(" ...\n"); } */ // Do something #ifdef _USE_GNUPLOT // Option 1: plotting MPI_Gather(t, len * NY, MPI_DOUBLE, tmp, len * NY, MPI_DOUBLE, 0, comm); if (rank == 0) { fprintf(pipe, "set title 'Soft X-Rray Signal (shot #%d)'\n", steps); fprintf(pipe, "set xlabel 'Channel#'\n"); fprintf(pipe, "set ylabel 'Timesteps'\n"); fprintf(pipe, "set cblabel 'Voltage (eV)'\n"); # ifndef _GNUPLOT_INTERACTIVE fprintf(pipe, "set terminal png\n"); fprintf(pipe, "set output 'fig%03d.png'\n", steps); # endif fprintf(pipe, "splot '-' matrix with image\n"); //fprintf(pipe, "plot '-' with lines, '-' with lines, '-' with lines\n"); double *sum = calloc(NX, sizeof(double)); for (j = 0; j < NY; j++) { for (i = 0; i < NX; i++) { sum[i] += tmp[i * NY + j]; } } for (j = 0; j < NY; j++) { for (i = 0; i < NX; i++) { fprintf (pipe, "%g ", (-tmp[i * NY + j] + sum[i]/NY)/3276.8); } fprintf(pipe, "\n"); } fprintf(pipe, "e\n"); fprintf(pipe, "e\n"); fflush (pipe); # ifdef _GNUPLOT_INTERACTIVE printf ("Press [Enter] to continue . . ."); fflush (stdout); getchar (); # endif free(sum); } #else // Option 2: BP writing snprintf (outfn, sizeof(outfn), "reader_%3.3d.bp", steps); adios_open (&adios_handle, "reader", outfn, "w", comm); adios_groupsize = 4 * sizeof(int) + sizeof(double) * len * NY; adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "NX", &NX); adios_write (adios_handle, "NY", &NY); adios_write (adios_handle, "len", &len); adios_write (adios_handle, "off", &off); adios_write (adios_handle, "var", t); adios_close (adios_handle); #endif // Advance MPI_Barrier (comm); adios_advance_step(fp, 0, TIMEOUT_SEC); if (adios_errno == err_end_of_stream) { printf("rank %d, Stream terminated. Quit\n", rank); break; // quit while loop } else if (adios_errno == err_step_notready) { printf ("rank %d: No new step arrived within the timeout. Quit.\n", rank); break; // quit while loop } else if (adios_errno != err_no_error) { printf("ADIOS returned code=%d, msg:%s\n", adios_errno, adios_get_last_errmsg()); break; // quit while loop } } // free(t); adios_read_close(fp); //printf("rank %d, Successfully closed stream\n", rank); adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); //printf("rank %d, Successfully finalized read method\n", rank); #ifndef _USE_GNUPLOT adios_finalize (rank); //printf("rank %d, Successfully finalized adios\n", rank); #else if (rank==0) { free(tmp); pclose(pipe); } #endif MPI_Finalize (); return 0; }
int main (int argc, char ** argv ) { MPI_Comm comm = MPI_COMM_WORLD; int rank; int ndx, ndy; // size of array per processor double * data; int O1 = 0; //origin in x direction int O2 = 0; //origin in y direction int S1 = 1; //spacing in x direction int S2 = 2; //spacing in y direction // Offsets and sizes int offs_x, offs_y; //offset in x and y direction int nx_local, ny_local; //local address int nx_global, ny_global; //global address int posx, posy; // position index in the array int i,j; /* ADIOS variables declarations for matching gwrite_temperature.ch */ uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &nproc); if (processArgs(argc, argv)) { return 1; } //will work with each core writing ndx = 65, ndy = 129, (65*4,129*3) global ndx = 65; ndy = 129; //2D array with block,block decomposition posx = rank%npx; // 1st dim posy = rank/npx; // 2nd dim offs_x = posx * ndx; offs_y = posy * ndy; nx_local = ndx; ny_local = ndy; nx_global = npx * ndx; ny_global = npy * ndy; data = malloc (ndx * ndy * sizeof(double)); for( i = 0; i < ndx; i++ ) for( j = 0; j < ndy; j++) data[i*ndy + j] = 1.0*rank; adios_init ("uniform2d.xml", comm); adios_open (&adios_handle, "uniform2d", "uniform2d.bp", "w", comm); adios_groupsize = 7*sizeof(int) + 4*sizeof(double)\ + sizeof(double) * (nx_local*ny_local) ; adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "nproc", &nproc); adios_write (adios_handle, "nx_global", &nx_global); adios_write (adios_handle, "ny_global", &ny_global); adios_write (adios_handle, "offs_x", &offs_x); adios_write (adios_handle, "offs_y", &offs_y); adios_write (adios_handle, "nx_local", &nx_local); adios_write (adios_handle, "ny_local", &ny_local); adios_write (adios_handle, "O1", &O1); adios_write (adios_handle, "O2", &O2); adios_write (adios_handle, "S1", &S1); adios_write (adios_handle, "S2", &S2); adios_write (adios_handle, "data", data); adios_close (adios_handle); MPI_Barrier (comm); free (data); adios_finalize (rank); MPI_Finalize (); return 0; }
static void build_dataset_from_specs( const char *filename_prefix, const char *transform_name, const dataset_xml_spec_t *xml_spec, const dataset_global_spec_t *global_spec, int num_ts, int num_pgs_per_ts, dataset_pg_spec_t pg_specs[num_ts][num_pgs_per_ts]) // Not const because C has an corner case here (http://c-faq.com/ansi/constmismatch.html) { int var; char xml_filename[strlen(filename_prefix) + strlen(".xml") + 1]; char bp_filename[strlen(filename_prefix) + strlen(".bp") + 1]; int timestep, pg_in_timestep; char dimvar[32]; // Construct the XML and BP filenames sprintf(xml_filename, "%s.xml", filename_prefix); sprintf(bp_filename, "%s.bp", filename_prefix); // Write out the XML file FILE *xml_out = fopen(xml_filename, "w"); assert(xml_out); produce_xml(xml_out, xml_spec, transform_name); fclose(xml_out); // Write out the BP file adios_init(xml_filename, MPI_COMM_WORLD); // Compute the groupsize contribution of the dimension scalars const uint64_t base_groupsize = xml_spec->ndim * 3 * 4; // *3 for 3 scalars (N, D, O) *4 for sizeof(adios_integer) (not sure how what function in the User API to call to get this programatically // For each timestep, for each PG in that timestep, write out all variables using the provided vardata buffers int64_t adios_file; for (timestep = 0; timestep < global_spec->num_ts; ++timestep) { for (pg_in_timestep = 0; pg_in_timestep < global_spec->num_pgs_per_ts; ++pg_in_timestep) { // (Re-)open the file in write or append mode, depending on whether or not this is the first PG written const int is_first_pg = (timestep == 0 && pg_in_timestep == 0); adios_open(&adios_file, xml_spec->group_name, bp_filename, is_first_pg ? "w" : "a", MPI_COMM_WORLD); // Pin the timestep to allow multiple adios_open/adios_close cycles to write // to the same timestep (this simulates a parallel file write with fewer core) adios_pin_timestep(timestep + 1); // +1 because we want the timesteps to be 1-based const dataset_pg_spec_t *pg_spec = &pg_specs[timestep][pg_in_timestep]; // Compute the group size uint64_t groupsize = compute_groupsize(base_groupsize, xml_spec, pg_spec); uint64_t out_groupsize; adios_group_size(adios_file, groupsize, &out_groupsize); write_adios_dimension_scalars(adios_file, "N", xml_spec->ndim, global_spec->global_dims); write_adios_dimension_scalars(adios_file, "D", xml_spec->ndim, pg_spec->pg_dim); write_adios_dimension_scalars(adios_file, "O", xml_spec->ndim, pg_spec->pg_offset); // Write each variable for (var = 0; var < xml_spec->nvar; ++var) { adios_write(adios_file, xml_spec->varnames[var], (void*)pg_spec->vardata[var]); // (void*) to get rid of compiler complaining about constness } // Close the file to commit it adios_close(adios_file); } } }
int main (int argc, char ** argv) { int i = 0; if(argc < 4) { printf("wrong args\n"); usage(); return -1; } DIM_GLOBAL = atoi (argv[1]); DIM_LOCAL = atoi (argv[2]); char* option = argv[3]; char bp_file_name[NAME_LEN] = {0}; char xml_file_name[NAME_LEN] = {0}; snprintf(bp_file_name, NAME_LEN-1, "output/%s.bp", option); snprintf(xml_file_name, NAME_LEN-1, "conf/%s.xml", option); // MPI related intialization int rank, nproc; MPI_Comm comm = MPI_COMM_WORLD; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &nproc); double t1 = 0.0; double t2 = 0.0; double t3 = 0.0; double t4 = 0.0; // variable dimensions int gndx = DIM_GLOBAL; int gndy = DIM_GLOBAL; int gndz = DIM_GLOBAL; int ndx = DIM_LOCAL; int ndy = DIM_LOCAL; int ndz = DIM_LOCAL; int npx = gndx / ndx; int npy = gndy / ndy; int npz = gndz / ndz; if(nproc != npx * npy * npz) { printf("process num error! nproc != npx * npy * npz\n"); MPI_Finalize(); return -1; } int posx = rank / (npx * npy); int posy = rank % (npx * npy) / npy; int posz = rank % (npx * npy) % npy; // posx = mod(rank, npx) // 1st dim easy: 0, npx, 2npx... are in the same X position // posy = mod(rank/npx, npy) // 2nd dim: (0, npx-1) have the same dim (so divide with npx first) // posz = rank/(npx*npy) // 3rd dim: npx*npy processes belong into one dim int offx = posx * ndx; int offy = posy * ndy; int offz = posz * ndz; int timesteps = 0; srand(0); // all procs generate the same random datasets double* double_xyz = (double*) malloc (sizeof(double) * ndx * ndy * ndz); for(i = 0; i < ndx * ndy * ndz; i++) { double_xyz[i] = (double) rand () / RAND_MAX; } int adios_err; uint64_t adios_groupsize, adios_totalsize; int64_t adios_handle; if(rank == 0) t3 = dclock(); MPI_Barrier(comm); t1 = dclock(); adios_init (xml_file_name, comm); adios_open (&adios_handle, GROUP_NAME, bp_file_name, "w", comm); ////////////////////////////////////////////////////////////////////////////////////// adios_groupsize = 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 * (ndx) * (ndy) * (ndz) + 8 * (ndx) * (ndy) * (ndz); adios_group_size (adios_handle, adios_groupsize, &adios_totalsize); adios_write (adios_handle, "gndx", &gndx); adios_write (adios_handle, "gndy", &gndy); adios_write (adios_handle, "gndz", &gndz); adios_write (adios_handle, "nproc", &nproc); adios_write (adios_handle, "npx", &npx); adios_write (adios_handle, "npy", &npy); adios_write (adios_handle, "npz", &npz); adios_write (adios_handle, "offx", &offx); adios_write (adios_handle, "offy", &offy); adios_write (adios_handle, "offz", &offz); adios_write (adios_handle, "ndx", &ndx); adios_write (adios_handle, "ndy", &ndy); adios_write (adios_handle, "ndz", &ndz); adios_write (adios_handle, "temperature", double_xyz); adios_write (adios_handle, "preasure", double_xyz); ////////////////////////////////////////////////////////////////////////////////////// adios_close (adios_handle); /* t2 = dclock(); double tt = t2 - t1; MPI_Barrier (comm); if(rank == 0) { t4 = dclock(); } */ adios_finalize (rank); /* double* all_tt = (double*) malloc (sizeof(double) * nproc); // calling MPI_Gather int rtn = MPI_Gather (&tt, 1, MPI_DOUBLE, all_tt, 1, MPI_DOUBLE, 0, comm); MPI_Barrier (comm); if(rank == 0) { int k = 0; double sum = 0.0; for(k = 0; k < nproc; k++) { // printf("proc %d time %f\n", k, all_tt[k]); sum += all_tt[k]; } printf("%s average_write_time %f\n", xml_file_name, sum / nproc); printf("%s total_write_time %f\n", xml_file_name, t4 - t3); } if(all_tt) { free(all_tt); } */ MPI_Finalize (); if(double_xyz) { free(double_xyz); } return 0; }
int main (int argc, char *argv[]) { validate_input(argc, argv); /* * Initialize TAU and start a timer for the main function. */ TAU_INIT(&argc, &argv); TAU_PROFILE_SET_NODE(0); TAU_PROFILE_TIMER(tautimer, __func__, my_name, TAU_USER); TAU_PROFILE_START(tautimer); /* * Initialize MPI. We don't require threaded support, but with threads * we can send the TAU data over SOS asynchronously. */ int rc = MPI_SUCCESS; int provided = 0; rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); if (rc != MPI_SUCCESS) { char *errorstring; int length = 0; MPI_Error_string(rc, errorstring, &length); fprintf(stderr, "Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring); fflush(stderr); exit(99); } MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); my_printf("%s %s %d Running with comm_size %d\n", argv[0], my_name, getpid(), comm_size); MPI_Comm adios_comm; MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm); adios_init ("arrays.xml", adios_comm); /* * Loop and do the things */ int iter = 0; char tmpstr[256] = {0}; int * return_codes = (int *)(calloc(num_sources,sizeof(int))); while (iter < iterations) { int index; /* * Read upstream input */ for (index = 0 ; index < num_sources ; index++) { if (return_codes[index] > 0) { my_printf("%s source is gone\n", sources[index]); continue; // this input is gone } my_printf ("%s reading from %s.\n", my_name, sources[index]); sprintf(tmpstr,"%s READING FROM %s", my_name, sources[index]); TAU_START(tmpstr); //mpi_reader(adios_comm, sources[index]); return_codes[index] = flexpath_reader(adios_comm, index); TAU_STOP(tmpstr); } /* * "compute" */ my_printf ("%s computing.\n", my_name); compute(iter); bool time_to_go = (num_sources == 0) ? (iter == (iterations-1)) : true; for (index = 0 ; index < num_sources ; index++) { if (return_codes[index] == 0) { time_to_go = false; break; // out of this for loop } } /* * Send output downstream */ for (index = 0 ; index < num_sinks ; index++) { my_printf ("%s writing to %s.\n", my_name, sinks[index]); sprintf(tmpstr,"%s WRITING TO %s", my_name, sinks[index]); TAU_START(tmpstr); //mpi_writer(adios_comm, sinks[index]); flexpath_writer(adios_comm, index, (iter > 0), time_to_go); TAU_STOP(tmpstr); } if (time_to_go) { break; // out of the while loop } my_printf ("%s not time to go...\n", my_name); iter++; } /* * Finalize ADIOS */ const char const * dot_filename = ".finished"; if (num_sources > 0) { adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH); #if 0 } else { while (true) { // assume this is the main process. It can't exit until // the last process is done. if( access( dot_filename, F_OK ) != -1 ) { // file exists unlink(dot_filename); break; } else { // file doesn't exist sleep(1); } } #endif } if (num_sinks > 0) { adios_finalize (my_rank); #if 0 } else { // assume this is the last process. // Tell the main process we are done. FILE *file; if (file = fopen(dot_filename, "w")) { fprintf(file, "done.\n"); fclose(file); } #endif } /* * Finalize MPI */ MPI_Comm_free(&adios_comm); MPI_Finalize(); my_printf ("%s Done.\n", my_name); TAU_PROFILE_STOP(tautimer); return 0; }
int worker(int argc, char* argv[]) { TAU_PROFILE_TIMER(timer, __func__, __FILE__, TAU_USER); TAU_PROFILE_START(timer); static bool announced = false; my_printf("%d of %d In worker A\n", myrank, commsize); /* validate input */ validate_input(argc, argv); my_printf("Worker A will execute %d iterations.\n", iterations); /* ADIOS: These declarations are required to match the generated * gread_/gwrite_ functions. (And those functions are * generated by calling 'gpp.py adios_config.xml') ... */ uint64_t adios_groupsize; uint64_t adios_totalsize; uint64_t adios_handle; char adios_filename[256]; MPI_Comm adios_comm; /* ADIOS: Can duplicate, split the world, whatever. * This allows you to have P writers to N files. * With no splits, everyone shares 1 file, but * can write lock-free by using different areas. */ //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm); adios_comm = MPI_COMM_WORLD; int NX = 10; int NY = 1; double t[NX]; double p[NX]; /* ADIOS: Set up the adios communications and buffers, open the file. */ if (send_to_b) { sprintf(adios_filename, "adios_a_to_b.bp"); adios_init("adios_config.xml", adios_comm); } int index, i; for (index = 0 ; index < iterations ; index++ ) { /* Do some exchanges with neighbors */ do_neighbor_exchange(); /* "Compute" */ compute(index); /* Write output */ //my_printf("a"); for (i = 0; i < NX; i++) { t[i] = index*100.0 + myrank*NX + i; } for (i = 0; i < NY; i++) { p[i] = index*1000.0 + myrank*NY + i; } if (send_to_b) { TAU_PROFILE_TIMER(adiostimer, "ADIOS send", __FILE__, TAU_USER); TAU_PROFILE_START(adiostimer); if (index == 0) { adios_open(&adios_handle, "a_to_b", adios_filename, "w", adios_comm); } else { adios_open(&adios_handle, "a_to_b", adios_filename, "a", adios_comm); } /* ADIOS: Actually write the data out. * Yes, this is the recommended method, and this way, changes in * configuration with the .XML file will, even in the worst-case * scenario, merely require running 'gpp.py adios_config.xml' * and typing 'make'. */ #include "gwrite_a_to_b.ch" /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_close(adios_handle); TAU_PROFILE_STOP(adiostimer); #if 1 if (!announced) { SOS_val foo; foo.i_val = NX; SOS_pack(example_pub, "NX", SOS_VAL_TYPE_INT, foo); SOS_announce(example_pub); SOS_publish(example_pub); announced = true; } #endif } MPI_Barrier(MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); if (send_to_b) { adios_finalize(myrank); } my_printf("Worker A exting.\n"); //MPI_Comm_free(&adios_comm); TAU_PROFILE_STOP(timer); /* exit */ return 0; }
int worker(int argc, char* argv[]) { TAU_PROFILE_TIMER(timer, __func__, __FILE__, TAU_USER); TAU_PROFILE_START(timer); my_printf("%d of %d In worker B\n", myrank, commsize); static bool announced = false; /* validate input */ validate_input(argc, argv); my_printf("Worker B will execute until it sees n iterations.\n", iterations); /* ADIOS: These declarations are required to match the generated * gread_/gwrite_ functions. (And those functions are * generated by calling 'gpp.py adios_config.xml') ... * EXCEPT THAT THE generation of Reader code is broken. * So, we will write the reader code manually. */ uint64_t adios_groupsize; uint64_t adios_totalsize; uint64_t adios_handle; void * data = NULL; uint64_t start[2], count[2]; int i, j, steps = 0; int NX = 10; int NY = 1; double t[NX]; double p[NX]; /* ADIOS: Can duplicate, split the world, whatever. * This allows you to have P writers to N files. * With no splits, everyone shares 1 file, but * can write lock-free by using different areas. */ MPI_Comm adios_comm, adios_comm_b_to_c; adios_comm = MPI_COMM_WORLD; //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm); adios_comm_b_to_c = MPI_COMM_WORLD; //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm_b_to_c); enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_FLEXPATH; adios_read_init_method(method, adios_comm, "verbose=3"); if (adios_errno != err_no_error) { fprintf (stderr, "rank %d: Error %d at init: %s\n", myrank, adios_errno, adios_errmsg()); exit(4); } if (send_to_c) { adios_init("adios_config.xml", adios_comm); } /* ADIOS: Set up the adios communications and buffers, open the file. */ ADIOS_FILE *fp; // file handler ADIOS_VARINFO *vi; // information about one variable ADIOS_SELECTION * sel; char adios_filename_a_to_b[256]; char adios_filename_b_to_c[256]; enum ADIOS_LOCKMODE lock_mode = ADIOS_LOCKMODE_NONE; double timeout_sec = 1.0; sprintf(adios_filename_a_to_b, "adios_a_to_b.bp"); sprintf(adios_filename_b_to_c, "adios_b_to_c.bp"); my_printf ("rank %d: Worker B opening file: %s\n", myrank, adios_filename_a_to_b); fp = adios_read_open(adios_filename_a_to_b, method, adios_comm, lock_mode, timeout_sec); if (adios_errno == err_file_not_found) { fprintf (stderr, "rank %d: Stream not found after waiting %d seconds: %s\n", myrank, timeout_sec, adios_errmsg()); exit(1); } else if (adios_errno == err_end_of_stream) { // stream has been gone before we tried to open fprintf (stderr, "rank %d: Stream terminated before open. %s\n", myrank, adios_errmsg()); exit(2); } else if (fp == NULL) { // some other error happened fprintf (stderr, "rank %d: Error %d at opening: %s\n", myrank, adios_errno, adios_errmsg()); exit(3); } else { my_printf("Found file %s\n", adios_filename_a_to_b); my_printf ("File info:\n"); my_printf (" current step: %d\n", fp->current_step); my_printf (" last step: %d\n", fp->last_step); my_printf (" # of variables: %d:\n", fp->nvars); vi = adios_inq_var(fp, "temperature"); adios_inq_var_blockinfo(fp, vi); printf ("ndim = %d\n", vi->ndim); printf ("nsteps = %d\n", vi->nsteps); printf ("dims[%llu][%llu]\n", vi->dims[0], vi->dims[1]); uint64_t slice_size = vi->dims[0]/commsize; if (myrank == commsize-1) { slice_size = slice_size + vi->dims[0]%commsize; } start[0] = myrank * slice_size; count[0] = slice_size; start[1] = 0; count[1] = vi->dims[1]; data = malloc (slice_size * vi->dims[1] * 8); /* Processing loop over the steps (we are already in the first one) */ while (adios_errno != err_end_of_stream && steps < iterations) { steps++; // steps start counting from 1 TAU_PROFILE_TIMER(adios_recv_timer, "ADIOS recv", __FILE__, TAU_USER); TAU_PROFILE_START(adios_recv_timer); sel = adios_selection_boundingbox (vi->ndim, start, count); adios_schedule_read (fp, sel, "temperature", 0, 1, data); adios_perform_reads (fp, 1); if (myrank == 0) printf ("--------- B Step: %d --------------------------------\n", fp->current_step); #if 0 printf("B rank=%d: [0:%lld,0:%lld] = [", myrank, vi->dims[0], vi->dims[1]); for (i = 0; i < slice_size; i++) { printf (" ["); for (j = 0; j < vi->dims[1]; j++) { printf ("%g ", *((double *)data + i * vi->dims[1] + j)); } printf ("]"); } printf (" ]\n\n"); #endif // advance to 1) next available step with 2) blocking wait adios_advance_step (fp, 0, timeout_sec); if (adios_errno == err_step_notready) { printf ("B rank %d: No new step arrived within the timeout. Quit. %s\n", myrank, adios_errmsg()); break; // quit while loop } TAU_PROFILE_STOP(adios_recv_timer); /* Do some exchanges with neighbors */ //do_neighbor_exchange(); /* "Compute" */ compute(steps); for (i = 0; i < NX; i++) { t[i] = steps*100.0 + myrank*NX + i; } for (i = 0; i < NY; i++) { p[i] = steps*1000.0 + myrank*NY + i; } if (send_to_c) { TAU_PROFILE_TIMER(adios_send_timer, "ADIOS send", __FILE__, TAU_USER); TAU_PROFILE_START(adios_send_timer); /* ADIOS: write to the next application in the workflow */ if (steps == 0) { adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "w", adios_comm_b_to_c); } else { adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "a", adios_comm_b_to_c); } /* ADIOS: Actually write the data out. * Yes, this is the recommended method, and this way, changes in * configuration with the .XML file will, even in the worst-case * scenario, merely require running 'gpp.py adios_config.xml' * and typing 'make'. */ #include "gwrite_b_to_c.ch" /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_close(adios_handle); TAU_PROFILE_STOP(adios_send_timer); #if 1 if (!announced) { SOS_val foo; foo.i_val = NX; SOS_pack(example_pub, "NX", SOS_VAL_TYPE_INT, foo); SOS_announce(example_pub); SOS_publish(example_pub); announced = true; } #endif } MPI_Barrier(adios_comm_b_to_c); } MPI_Barrier(MPI_COMM_WORLD); adios_read_close(fp); /* ADIOS: Close out the file completely and finalize. * If MPI is being used, this must happen before MPI_Finalize(). */ adios_read_finalize_method(method); } if (send_to_c) { adios_finalize(myrank); } free(data); //MPI_Comm_free(&adios_comm); //MPI_Comm_free(&adios_comm_b_to_c); TAU_PROFILE_STOP(timer); /* exit */ return 0; }