Beispiel #1
0
int main (int argc, char ** argv)
{
    char        filename [256];
    int         rank, size, i;
    int         NX = 10;
    double      t[NX];
    MPI_Comm    comm = MPI_COMM_WORLD;

    /* ADIOS variables declarations for matching gwrite_temperature.ch */
    int         adios_err;
    uint64_t    adios_groupsize, adios_totalsize;
    int64_t     adios_handle;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &size);

    for (i = 0; i < NX; i++)
        t[i] = rank*NX + i;

    strcpy (filename, "adios_global.bp");

    adios_init ("adios_global.xml", comm);

    adios_open (&adios_handle, "temperature", filename, "w", comm);
    #include "gwrite_temperature.ch"
    adios_close (adios_handle);

        MPI_Barrier (comm);

    adios_finalize (rank);

    MPI_Finalize ();
    return 0;
}
Beispiel #2
0
int write_file (char *fname)
{
	char        filename [256];
	int         rank, size, i, block;
        int         adios_err;
        uint64_t    adios_groupsize, adios_totalsize;
        int64_t     adios_handle;

	strcpy (filename, "reuse_dim.bp");
   
        adios_open (&m_adios_file, "restart", filename, "w", comm);

        adios_groupsize = (2*sizeof(int) + 2*NX*sizeof(double));

        adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize);

        adios_write(m_adios_file, "NX", (void *) &NX);
        adios_write(m_adios_file, "t1", t);

        adios_write(m_adios_file, "NX", (void *) &NX);
        adios_write(m_adios_file, "t2", t);

        adios_close (m_adios_file);

        MPI_Barrier (comm);

        return 0;
}
Beispiel #3
0
int write_file (int step) 
{
    int64_t       fh;
    uint64_t       groupsize=0, totalsize;

    log ("Write step %d to %s\n", step, FILENAME);
    adios_open (&fh, "selections", FILENAME, (step ? "a" : "w"), comm);
    
    groupsize  = 9 * sizeof(int);                           // dimensions 
    groupsize += 3 * sizeof(int);                           // scalars 
    groupsize += 3 * ldim1 * sizeof(int);                   // 1D 
    groupsize += 3 * ldim1 * ldim2 * sizeof(int);           // 2D 
    groupsize += 3 * ldim1 * ldim2 * ldim3 * sizeof(int);   // 3D

    adios_group_size (fh, groupsize, &totalsize);

    adios_write (fh, "gdim1", &gdim1);
    adios_write (fh, "gdim2", &gdim2);
    adios_write (fh, "gdim3", &gdim3);
    adios_write (fh, "ldim1", &ldim1);
    adios_write (fh, "ldim2", &ldim2);
    adios_write (fh, "ldim3", &ldim3);
    adios_write (fh, "offs1", &offs1);
    adios_write (fh, "offs2", &offs2);
    adios_write (fh, "offs3", &offs3);

    adios_write (fh, "a0", &a0);
    adios_write (fh, "a1", a1);
    adios_write (fh, "a2", a2);
    adios_write (fh, "a3", a3);

    adios_close (fh);
    MPI_Barrier (comm);
    return 0;
}
Beispiel #4
0
int main (int argc, char ** argv) 
{
    char        filename [256];
    int         rank;
    int         NX = 10;
    double      t[NX];
    char        result[1024], s[32];
    int         i;
    
    /* ADIOS variables declarations for matching gread_temperature.ch */
    int         adios_err;
    uint64_t    adios_groupsize, adios_totalsize, adios_buf_size;
    int64_t     adios_handle;
    MPI_Comm    comm =  MPI_COMM_WORLD;
    
    MPI_Init (&argc, &argv);
    MPI_Comm_rank (MPI_COMM_WORLD, &rank);
    sprintf (filename, "restart.bp");
    adios_init ("config.xml", comm);
    adios_open (&adios_handle, "temperature", filename, "r", comm);
    #include "gread_temperature.ch"
    adios_close (adios_handle);
    adios_finalize (rank);
    MPI_Finalize ();

    sprintf(result, "rank=%d t=[%g", rank, t[0]);
    for (i=1; i<NX; i++) {
        sprintf (s, ",%g", t[i]);
        strcat (result, s);
    }
    printf("%s]\n", result);

    return 0;
}    
Beispiel #5
0
int read_write(int step)
{
    int retval = 0;
    int i;
    uint64_t total_size;

    // open output file
    adios_open (&fh, group_namelist[0], outfilename, (step==1 ? "w" : "a"), comm);
    adios_group_size (fh, write_total, &total_size);
    
    for (i=0; i<f->nvars; i++) 
    {
        if (varinfo[i].writesize != 0) {
            // read variable subset
            print ("rank %d: Read variable %d: %s\n", rank, i, f->var_namelist[i]); 
            ADIOS_SELECTION *sel = adios_selection_boundingbox (varinfo[i].v->ndim,
                    varinfo[i].start, 
                    varinfo[i].count);
            adios_schedule_read_byid (f, sel, i, 1, 1, readbuf);
            adios_perform_reads (f, 1);   


            // write (buffer) variable
            print ("rank %d: Write variable %d: %s\n", rank, i, f->var_namelist[i]); 
            adios_write(fh, f->var_namelist[i], readbuf);
        }
    }

    adios_release_step (f); // this step is no longer needed to be locked in staging area
    adios_close (fh); // write out output buffer to file
    return retval;
}
Beispiel #6
0
int main(int argc, char ** argv){
	int  rank=0, size=0;
	int  NX = NX_DIM;                // size of 1D array we will write
	double t[NX_DIM];                // this will contain the variables
	MPI_Comm  comm = MPI_COMM_WORLD; // required for ADIOS

	int64_t 	adios_handle;        // the ADIOS file handler
	int retval;
	struct adios_tsprt_opts adios_opts;
	int err_count = 0;

	GET_ENTRY_OPTIONS(adios_opts, "Runs writers. It is recommended to run as many writers as readers.");

	// I assume that I have all required options set in adios_opts

	// sanity check
	assert(NX==NX_DIM);

	// ADIOS initialization
	MPI_Init(&argc, &argv);
	MPI_Comm_rank (comm, &rank);
	MPI_Comm_size (comm, &size);

	SET_ERROR_IF_NOT_ZERO(adios_init(adios_opts.xml_adios_init_filename, comm), err_count);
	RET_IF_ERROR(err_count, rank);

	// init the array that I will transport
	if (gen_1D_array(t, NX, rank) == DIAG_ERR){
		printf("ERROR: Generating 1D array. Quitting ...\n");
		return DIAG_ERR;
	}

	uint64_t adios_groupsize, adios_totalsize;

	// open with the group name as specified in the xml file
	adios_open( &adios_handle, "temperature", FILE_NAME, "w", comm);
	adios_groupsize = 4 + 4 + 4 + 8 * (NX);
	retval=adios_group_size (adios_handle, adios_groupsize, &adios_totalsize);
	fprintf(stderr, "Rank=%d adios_group_size(): adios_groupsize=%" PRIu64 ", adios_totalsize=%" PRIu64 ", retval=%d\n",
			rank, adios_groupsize, adios_totalsize, retval);

	// write; don't check errors for simplicity reasons
	adios_write(adios_handle, "NX", &NX);
	adios_write(adios_handle, "size", &size);
	adios_write(adios_handle, "rank", &rank);
	adios_write(adios_handle, "var_1d_array", t);

	fprintf(stderr, "Rank=%d committed write\n", rank);

	adios_close(adios_handle);

	// clean and finalize the system
	adios_finalize(rank);
	MPI_Finalize();

	return DIAG_OK;
}
Beispiel #7
0
int main (int argc, char ** argv) 
{
    char        filename [256];
    int         rank;
    MPI_Comm    comm = MPI_COMM_WORLD;

    uint64_t    adios_groupsize, adios_totalsize;
    int64_t     adios_handle;

    int8_t v1 = -4;
    int16_t v2 = -3;
    int32_t v3 = -2;
    int64_t v4 = -1;

    uint8_t v5 = 1;
    uint16_t v6 = 2;
    uint32_t v7 = 3;
    uint64_t v8 = 4;

    float v9 = 5.0;
    double v10 = 6.0;

    char * v11 = "ADIOS example";

    complex v12;
    v12.r = 8.0;
    v12.i = 9.0;

    double_complex v13;
    v13.r = 10.0;
    v13.i = 11.0;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);

    strcpy (filename, "scalars.bp");

    /* adios_open() opens a "group in a file", here the "scalars" group.   
       GWRITE is the convenient way to write all variables defined in the
       xml file but of course one can write the individual adios_write() 
       statements here too 
    */
    adios_init ("scalars.xml", comm);
    adios_open (&adios_handle, "scalars", filename, "w", comm);
#include "gwrite_scalars.ch"
    adios_close (adios_handle);

    MPI_Barrier (comm);

    adios_finalize (rank);

    MPI_Finalize ();
    return 0;
}
Beispiel #8
0
static void write_test_file(double *arr) {
    int64_t fd;
    uint64_t total_size;

    // Write the data to file
    adios_open(&fd, BP_GROUP, BP_FILENAME, "w", &comm);
    adios_group_size(fd, 2 * N * sizeof(double), &total_size);
    adios_write(fd, RAW_VAR, arr);
    adios_write(fd, XFORM_VAR, arr);
    adios_close(fd);
}
Beispiel #9
0
int main (int argc, char ** argv) 
{
	MPI_Comm    comm = 0; // dummy mpi 

	/* ADIOS variables declarations for matching gwrite_temperature.ch */
	uint64_t  adios_groupsize, adios_totalsize;
	int64_t   g;
	int64_t   f;
	int64_t   Tid, Pid, Vid; // variable IDs
	char dimstr[32];

	sprintf (dimstr, "%d,%d", NX, NY);

	adios_init_noxml (comm);
	adios_set_max_buffer_size (1);

	adios_declare_group (&g, "vars", "", adios_flag_yes);
	adios_select_method (g, "POSIX", "", "");

	Tid = adios_define_var (g, "T" ,"", adios_double, dimstr, dimstr, "0,0");
	adios_set_transform (Tid, "none");
	Pid = adios_define_var (g, "P" ,"", adios_double, dimstr, dimstr, "0,0");
	adios_set_transform (Pid, "none");
	Vid = adios_define_var (g, "V" ,"", adios_double, dimstr, dimstr, "0,0");
	adios_set_transform (Vid, "none");

    adios_read_init_method(ADIOS_READ_METHOD_BP,0,"");
    if (adios_query_is_method_available (ADIOS_QUERY_METHOD_ALACRITY)) {
        adios_set_transform (Tid, "alacrity");
        adios_set_transform (Pid, "alacrity");
        adios_set_transform (Vid, "alacrity");
        printf ("Turned on ALACRITY transformation for array variables\n");
    }

	adios_open (&f, "vars", "vars.bp", "w", comm);
	adios_groupsize = 3*NX*NY*sizeof(double);
	adios_group_size (f, adios_groupsize, &adios_totalsize);
	adios_write (f, "T", T);
	adios_write (f, "P", P);
	adios_write (f, "V", V);
	adios_close (f);

	adios_finalize (0);
	return 0;
}
Beispiel #10
0
int main (int argc, char ** argv) 
{
    char        filename [256];
    int         rank, size;
    int         NX = 10; 
    int         N = 3; /* number of files to write */
    double      t[NX];
    int         i;

    /* ADIOS variables declarations for matching gwrite_temperature.ch */
    uint64_t    adios_groupsize, adios_totalsize;
    int64_t     adios_handle;
    int         color, key;
    MPI_Comm    comm;
 
    MPI_Init (&argc, &argv);
    MPI_Comm_rank (MPI_COMM_WORLD, &rank);
    MPI_Comm_size (MPI_COMM_WORLD, &size);

    /* MPI_Comm_split partitions the world group into N disjoint subgroups, 
     * the processes are ranked in terms of the argument key. 
     * A new communicator comm is returned for this specific grid configuration
     */
    color = rank % N;
    key = rank / N;
    MPI_Comm_split (MPI_COMM_WORLD, color, key, &comm);

    for (i=0; i<NX; i++)
        t[i] = rank*NX + i;
            
    /* every P/N processes write into the same file 
     * there are N files generated. 
     */
    sprintf (filename, "restart_%5.5d.bp", color);
    adios_init ("config.xml", MPI_COMM_WORLD);
    adios_open (&adios_handle, "temperature", filename, "w", comm);
    #include "gwrite_temperature.ch"
    adios_close (adios_handle);
    adios_finalize (rank);
    MPI_Finalize ();
    return 0;
}
Beispiel #11
0
int main (int argc, char ** argv) 
{
	int         size, i, block;
	MPI_Comm    comm = 0; // dummy mpi 

	/* ADIOS variables declarations for matching gwrite_temperature.ch */
	uint64_t  adios_groupsize, adios_totalsize;
        int64_t   g;
        int64_t   f;
        char dimstr[32];

	adios_init_noxml (comm);
        adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 1);

        adios_declare_group (&g, "table", "", adios_flag_yes);
        adios_select_method (g, "POSIX1", "", "");

	sprintf (dimstr, "%d,%d", NX, NY);
        adios_define_var (g, "A" ,"", adios_integer, dimstr, dimstr, "0,0");
	sprintf (dimstr, "%d,%d", n_of_elements, Elements_length);
        adios_define_var (g, "Elements" ,"", adios_byte, dimstr, dimstr, "0,0");
	sprintf (dimstr, "%d,%d", NY, Columns_length);
        adios_define_var (g, "Columns" ,"", adios_byte, dimstr, dimstr, "0,0");
   
   
        adios_open (&f, "table", "table.bp", "w", comm);

        adios_groupsize = NX*NY*sizeof(int32_t)           /* size of A */
                        + n_of_elements * Elements_length /* size of Elements */
                        + NY * Columns_length;            /* size of Columns */

        adios_group_size (f, adios_groupsize, &adios_totalsize);
	adios_write (f, "A", A);
	adios_write (f, "Elements", Elements);
	adios_write (f, "Columns", Columns);
        adios_close (f);

	adios_finalize (0);
	return 0;
}
Beispiel #12
0
int write_file (int step) 
{
    int64_t       fh;
    uint64_t       groupsize=0, totalsize;

    log ("Write step %d to %s\n", step, FILENAME);
    adios_open (&fh, "connect", FILENAME, (step ? "a" : "w"), &subcomm);
    
    groupsize  = 3 * sizeof(int);                           // dimensions 
    groupsize += ldim1 * sizeof(int);                       // 1D 

    adios_group_size (fh, groupsize, &totalsize);

    adios_write (fh, "gdim1", &gdim1);
    adios_write (fh, "ldim1", &ldim1);
    adios_write (fh, "offs1", &offs1);
    adios_write (fh, "a1", a1);

    adios_close (fh);
    MPI_Barrier (subcomm);
    return 0;
}
Beispiel #13
0
int output_dump(char *filename, int step, void *data)
{
    int64_t fh;
    uint64_t tsize;
    double t1, t2;
    char fname[256], mode[2]="w";

    if (streaming) 
    {
        snprintf (fname, sizeof(fname), "data.bp",filename);
        mode[0]='a'; mode[1] = 0;
    } 
    else if (file_per_process) 
    {
        snprintf (fname, sizeof(fname), "%s_%d.bp",filename, rank);
        mode[0]='w'; mode[1] = 0;
    } 
    else 
    {
        snprintf (fname, sizeof(fname), "%s.bp",filename);
        mode[0]='w'; mode[1] = 0;
    }

    t1 = MPI_Wtime();
    adios_open (&fh, "writer", fname, mode, iocomm);
    t2 = MPI_Wtime();
    Tio_open[step] = t2-t1;
    adios_group_size (fh, groupsize, &tsize);
    t1 = MPI_Wtime();
    Tio_group[step] = t1-t2;
    adios_write (fh, "xy", data);
    t2 = MPI_Wtime();
    Tio_write[step] = t2-t1;
    adios_close (fh);
    t1 = MPI_Wtime();
    Tio_close[step] = t1-t2;
    return 0;
}
Beispiel #14
0
int main (int argc, char ** argv) 
{
    char        filename [256];
    int         rank, size, i, j;
    int         NX = 10, NY = 100; 
    double      t[NX][NY];
    int         p[NX];
    MPI_Comm    comm = MPI_COMM_WORLD;

    int         adios_err;
    uint64_t    adios_groupsize, adios_totalsize;
    int64_t     adios_handle;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);

    for (i = 0; i < NX; i++)
        for (j = 0; j< NY; j++)
            t[i][j] = rank * NX + i + j*(1.0/NY);

    for (i = 0; i < NX; i++)
        p[i] = rank * NX + i;

    strcpy (filename, "arrays.bp");
    adios_init ("arrays.xml", comm);
    adios_open (&adios_handle, "arrays", filename, "w", comm);
#include "gwrite_arrays.ch"
    adios_close (adios_handle);

    MPI_Barrier (comm);

    adios_finalize (rank);

    MPI_Finalize ();
    return 0;
}
Beispiel #15
0
int main (int argc, char ** argv) {
    //For varriable definitions:
    //gbounds = global bounds string, lbounds = local bounds string, offs = offset string, tstring = temp string to hold temperary stuff
    char       gbounds[1007], lbounds[1007], offs[1007],tstring[100];
    //size = number of cores,  gidx = adios group index
    int        rank, size, gidx, i, j, k, ii;
    //data = pointer to read-in data
    void       * data = NULL;
    uint64_t   s[] = {0,0,0,0,0,0,0,0,0,0};  //starting offset
    uint64_t   c[] = {1,1,1,1,1,1,1,1,1,1};  //chunk block array
    uint64_t   bytes_read = 0;
    int        element_size;
    int64_t    new_adios_group, m_adios_file;
    uint64_t   var_size;  //portion_bound,
    uint64_t   adios_groupsize, adios_totalsize;
    int        read_buffer;        //possible maximum size you the user would like for each chunk in MB
    int           write_buffer = 1536;  //actual buffer size you use in MB
    int        itime;
    int        WRITEME=1;
    uint64_t   chunk_size;   //chunk size in # of elements
    char      *var_path, *var_name; // full path cut into dir path and name
    MPI_Init(&argc,&argv);
    MPI_Comm_rank(comm,&rank);
    MPI_Comm_size(comm,&size);

    // timing numbers
    // we will time:
    // 0: adios_open, adios_group_size
    // 1: the total time to read in the data
    // 2: times around each write (will only work if we do NOT buffer....
    // 3: the time in the close
    // 4: fopen, fclose
    // 5: total time
    // timers: the total I/O time
    int        timers = 6;
    double     start_time[timers], end_time[timers], total_time[timers];

    if (TIMING==100) {
        for (itime=0;itime<timers;itime++) {
            start_time[itime] = 0;
            end_time[itime] = 0;
            total_time[itime]=0;
        }
        //MPI_Barrier(MPI_COMM_WORLD);
        start_time[5] = MPI_Wtime();
    }

    if(rank==0)
        printf("converting...\n");

    if (argc < 5) {
        if (rank==0) printf("Usage: %s <BP-file> <ADIOS-file> read_buffer(MB) write_buffer(MB) METHOD (LUSTRE_strip_count) (LUSTRE_strip_size) (LUSTRE_block_size)\n", argv[0]);
        return 1;
    }



    if(TIMING==100)
        start_time[4] = MPI_Wtime();
    ADIOS_FILE * f = adios_fopen (argv[1], MPI_COMM_SELF);
    if(TIMING==100){
        end_time[4] = MPI_Wtime();
        total_time[4] = end_time[4]-start_time[4];
    }
    adios_init_noxml(comm); // no xml will be used to write the new adios file
    read_buffer = atoi(argv[3]);
    write_buffer = atoi(argv[4]);
    adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, write_buffer); // allocate MB buffer



    if (f == NULL) {
        printf("rank=%d, file cant be opened\n", rank);
        if (DEBUG) printf ("%s\n", adios_errmsg());
        return -1;
    }


    for (gidx = 0; gidx < f->groups_count; gidx++) {    //group part
        adios_groupsize = 0;
        ADIOS_GROUP * g = adios_gopen (f, f->group_namelist[gidx]);


        if (g == NULL) {
            if (DEBUG) printf ("%s\n", adios_errmsg());
            printf("rank %d: group cannot be opened.\n", rank);
            return -1;
        }
        /* First create all of the groups */
        // now I need to create this group in the file that will be written

        adios_declare_group(&new_adios_group,f->group_namelist[gidx],"",adios_flag_yes);


        if(strcmp(argv[5],"MPI_LUSTRE")!=0)   //see whether or not the user uses MPI_LUSTRE method
            adios_select_method (new_adios_group, argv[5], "", "");  //non-MPI_LUSTRE methods... like MPI, POSIX....
        else{
            char lustre_pars[1000];
            strcpy(lustre_pars, "");
            strcat(lustre_pars, "stripe_count=");
            sprintf(tstring, "%d", atoi(argv[6]));
            strcat(lustre_pars, tstring);
            strcat(lustre_pars, ",stripe_size=");
            sprintf(tstring, "%d", atoi(argv[7]));
            strcat(lustre_pars, tstring);
            strcat(lustre_pars, ",block_size=");
            sprintf(tstring, "%d", atoi(argv[8]));
            strcat(lustre_pars, tstring);

            if(rank==0)
                printf("lustre_pars=%s\n", lustre_pars);

            adios_select_method (new_adios_group, argv[5], lustre_pars, "");  //Use MPI Lustre method

        }



        // variable definition part
        for (i = 0; i < g->vars_count; i++) {
            ADIOS_VARINFO * v = adios_inq_var_byid (g, i);
            getbasename (g->var_namelist[i], &var_path, &var_name);

            if (v->ndim == 0) 
            {   
                // scalars: every process does them the same.
                adios_define_var(new_adios_group,var_name,var_path,v->type,0,0,0);
                getTypeInfo( v->type, &element_size);    //element_size is size per element based on its type
                if (v->type == adios_string) {  //special case when the scalar is string.
                    adios_groupsize += strlen(v->value);
                } else {
                    adios_groupsize += element_size;
                }
            } 
            else 
            { 
                // vector variables
                getTypeInfo( v->type, &element_size);
                var_size=1;
                for (ii=0;ii<v->ndim;ii++) {
                    var_size*=v->dims[ii];
                }
                uint64_t total_size = var_size;  //total_size tells you the total number of elements in the current vector variable
                var_size*=element_size; //var_size tells you the size of the current vector variable in bytess

                //re-initialize the s and c variables
                for(j=0; j<v->ndim; j++){
                    s[j] = 0;
                    c[j] = 1;
                }

                //find the approximate chunk_size you would like to use.
                chunk_size = calcChunkSize(total_size, read_buffer*1024*1024/element_size, size);

                //set the chunk block array with the total size as close to chunk_size as possible
                calcC(chunk_size, v, c);
                strcpy(lbounds,"");
                for(j=0; j<v->ndim; j++){
                    sprintf(tstring, "%" PRId64 ",", c[j]);
                    strcat(lbounds, tstring);
                }
                printf("rank=%d, name=%s, chunk_size1=%" PRId64 " c[]=%s\n",rank,g->var_namelist[i],chunk_size,lbounds);


                chunk_size = 1;
                for(ii=0; ii<v->ndim; ii++)            //reset chunk_size based on the created c. Now the chunk_size is exact.
                    chunk_size *= c[ii];

                //current step points to where the process is in processing the vector. First sets with respect to rank.
                uint64_t current_step = rank*chunk_size;

                //First advance the starting point s by current_step. Of course, you don't do it if the current_step exceeds total_size.
                if(current_step<total_size)
                    rS(v, s, current_step, rank);

                uint64_t elements_defined = 0;  //First, the number of elements you have defined is 0.

                //You (the process) process your part of the vector when your current_step is smaller than the total_size
                while(current_step < total_size)
                {
                    //ts, temporary s, is introduced for the sake of the inner do while loop below. Copy s to ts.
                    uint64_t ts[] = {0,0,0,0,0,0,0,0,0,0};
                    arrCopy(s, ts);

                    //for every outer while iteration, you always have the whole chunk_size remained to process.
                    uint64_t remain_chunk = chunk_size;
                    if(current_step+chunk_size>total_size) //except when you are nearing the end of the vector....
                        remain_chunk = total_size-current_step;

                    //tc, temporary c, is introduced for the sake of the inner do while loop below. Copy s to tc.
                    uint64_t tc[] = {1,1,1,1,1,1,1,1,1,1};
                    arrCopy(c, tc);

                    do{
                        //how much of the remain chunk you wanna process? initially you think you can do all of it....
                        uint64_t used_chunk = remain_chunk;

                        //you feel like you should process the vector with tc block size, but given ts, you might go over bound.
                        uint64_t uc[] = {1,1,1,1,1,1,1,1,1,1};
                        //so you verify it by setting a new legit chunck block uc, and getting a new remain_chunk.
                        remain_chunk = checkBound(v, ts, tc, uc, remain_chunk);

                        //you check whether or not ts+uc goes over the bound. This is just checking to make sure there's no error.
                        //Thereotically, there should be no problem at all.
                        checkOverflow(0, v, ts, uc);


                        //the below code fragment simply calculates gbounds, and sets place holders for lbounds and offs.
                        strcpy(gbounds,"");
                        strcpy(lbounds,"");
                        strcpy(offs,"");

                        for(j=0; j<v->ndim-1; j++){
                            sprintf(tstring, "%d,", (int)v->dims[j]);
                            strcat(gbounds, tstring);
                            //sprintf(tstring, "ldim%d_%s,", j, var_name);
                            sprintf(tstring, "ldim%d,", j);
                            strcat(lbounds, tstring);
                            //sprintf(tstring, "offs%d_%s,", j, var_name);
                            sprintf(tstring, "offs%d,", j);
                            strcat(offs, tstring);
                        }

                        sprintf(tstring, "%d", (int)v->dims[v->ndim-1]);
                        strcat(gbounds, tstring);
                        //sprintf(tstring, "ldim%d_%s", v->ndim-1, var_name);
                        sprintf(tstring, "ldim%d", v->ndim-1);
                        strcat(lbounds, tstring);
                        //sprintf(tstring, "offs%d_%s", v->ndim-1, var_name);
                        sprintf(tstring, "offs%d", v->ndim-1);
                        strcat(offs, tstring);

                        //sprintf(tstring, "%d", v->ndim);
                        for(j=0; j<v->ndim; j++){
                            //sprintf(tstring, "ldim%d_%s", j, var_name);
                            sprintf(tstring, "ldim%d", j);
                            adios_define_var(new_adios_group, tstring, "bp2bp", adios_unsigned_long, 0, 0, 0);
                            //sprintf(tstring, "offs%d_%s", j, var_name);
                            sprintf(tstring, "offs%d", j);
                            adios_define_var(new_adios_group, tstring, "bp2bp", adios_unsigned_long, 0, 0, 0);
                        }

                        adios_define_var(new_adios_group,var_name,var_path,v->type,lbounds,gbounds,offs);


                        if (DEBUG){
                            strcpy(lbounds,"");
                            strcpy(offs,"");
                            for(j=0; j<v->ndim; j++){
                                sprintf(tstring, "%" PRId64 ",", ts[j]);
                                strcat(offs, tstring);
                                sprintf(tstring, "%" PRId64 ",", uc[j]);
                                strcat(lbounds, tstring);
                            }

                            printf("rank=%d, name=%s, gbounds=%s: lbounds=%s: offs=%s \n",rank,g->var_namelist[i],gbounds, lbounds, offs);
                        }

                        used_chunk -= remain_chunk; //you get the actual used_chunk here.
                        elements_defined += used_chunk;
                        if(remain_chunk!=0){
                            rS(v, ts, used_chunk, rank);  //advance ts by used_chunk.
                            for(k=0; k<10; k++)
                                tc[k] = 1;
                            calcC(remain_chunk, v, tc);   //based on the remain_chunk, calculate new tc chunk block remained to process.
                        }

                        adios_groupsize+= used_chunk*element_size+2*v->ndim*8;

                    }while(remain_chunk!=0);

                    current_step += size*chunk_size;  //once a whole chunk_size is processed, advance the current_step in roll-robin manner.

                    if(current_step<total_size){   //advance s in the same way.
                        rS(v, s, size*chunk_size, rank);
                    }
                }

                //beside checkOverflow above, here you check whether or not the total number of elements processed across processes matches
                //the total number of elements in the original vector.
                if(DEBUG){
                    uint64_t* sb = (uint64_t *) malloc(sizeof(uint64_t));
                    uint64_t* rb = (uint64_t *) malloc(sizeof(uint64_t));
                    sb[0] = elements_defined;
                    MPI_Reduce(sb,rb,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm);

                    if(rank==0 && rb[0]!=total_size)
                        printf("some array define mismatch. please use debug mode\n");
                    free(sb); free(rb);
                }
            }
            free (var_name);
            free (var_path);
        } // finished declaring all of the variables


        // Now we can define the attributes....
        for (i = 0; i < g->attrs_count; i++) {
            enum ADIOS_DATATYPES atype;
            int  asize;
            void *adata;
            adios_get_attr_byid (g, i, &atype, &asize, &adata);
            // if (DEBUG) printf("attribute name=%s\n",g->attr_namelist[i]);
            adios_define_attribute(new_adios_group,g->attr_namelist[i],"",atype,adata,0);
        }



        /*------------------------------ NOW WE WRITE -------------------------------------------- */
        // Now we have everything declared... now we need to write them out!!!!!!
        if (WRITEME==1) {
            // open up the file for writing....
            if (DEBUG) printf("rank=%d, opening file = %s, with group %s, size=%" PRId64 "\n",rank,argv[2],f->group_namelist[gidx],adios_groupsize);

            if(TIMING==100)
                start_time[0] = MPI_Wtime();

            adios_open(&m_adios_file, f->group_namelist[gidx],argv[2],"w",comm);
            adios_group_size( m_adios_file, adios_groupsize, &adios_totalsize);

            //get both the total adios_totalsize and total adios_groupsize summed across processes.
            uint64_t* sb = (uint64_t *) malloc(sizeof(uint64_t));;
            uint64_t* rb = (uint64_t *) malloc(sizeof(uint64_t));
            sb[0] = adios_groupsize;
            MPI_Reduce(sb,rb,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm);

            uint64_t* sb2 = (uint64_t *) malloc(sizeof(uint64_t));;
            uint64_t* rb2 = (uint64_t *) malloc(sizeof(uint64_t));
            sb2[0] = adios_totalsize;
            MPI_Reduce(sb2,rb2,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm);
            if(rank==0){
                printf("total adios_totalsize = %" PRId64 "\n", *rb2);
                printf("total adios_groupsize = %" PRId64 "\n", *rb);
            }
            free(sb); free(rb); free(sb2); free(rb2);

            if (TIMING==100) {
                end_time[0] = MPI_Wtime();
                total_time[0]+=end_time[0] - start_time[0];    //variable definition time taken
            }

            // now we have to write out the variables.... since they are all declared now
            // This will be the place we actually write out the data!!!!!!!!
            for (i = 0; i < g->vars_count; i++) {
                ADIOS_VARINFO * v = adios_inq_var_byid (g, i);
                getbasename (g->var_namelist[i], &var_path, &var_name);
                if (v->ndim == 0) 
                {
                    if (DEBUG) {
                        printf ("ADIOS WRITE SCALAR: rank=%d, name=%s value=",
                                rank,g->var_namelist[i]);
                        print_data (v->value, 0, v->type);
                        printf ("\n");
                    }
                    if (TIMING==100) {
                        start_time[2] = MPI_Wtime();
                    }
                    adios_write(m_adios_file,g->var_namelist[i],v->value);
                    if (TIMING==100) {
                        end_time[2] = MPI_Wtime();
                        total_time[2]+=end_time[2] - start_time[2];     //IO write time...
                    }
                } 
                else 
                {
                    for(j=0; j<v->ndim; j++){
                        s[j] = 0;
                        c[j] = 1;
                    }
                    getTypeInfo( v->type, &element_size);

                    uint64_t total_size = 1;
                    for (ii=0;ii<v->ndim;ii++)
                        total_size*=v->dims[ii];

                    chunk_size = calcChunkSize(total_size, read_buffer*1024*1024/element_size, size);
                    calcC(chunk_size, v, c);
                    chunk_size = 1;
                    for(ii=0; ii<v->ndim; ii++)
                        chunk_size *= c[ii];


                    uint64_t current_step = rank*chunk_size;
                    if(current_step<total_size)
                        rS(v, s, current_step, rank);

                    uint64_t elements_written = 0;

                    while(current_step < total_size)
                    {
                        uint64_t ts[] = {0,0,0,0,0,0,0,0,0,0};
                        arrCopy(s, ts);
                        uint64_t remain_chunk = chunk_size;
                        if(current_step+chunk_size>total_size)
                            remain_chunk = total_size-current_step;
                        uint64_t tc[] = {1,1,1,1,1,1,1,1,1,1};
                        arrCopy(c, tc);

                        do{
                            uint64_t uc[] = {1,1,1,1,1,1,1,1,1,1};
                            uint64_t used_chunk = remain_chunk;
                            remain_chunk = checkBound(v, ts, tc, uc, remain_chunk);

                            checkOverflow(1, v, ts, uc);

                            used_chunk -= remain_chunk;
                            elements_written += used_chunk;

                            //allocated space for data read-in
                            data = (void *) malloc(used_chunk*element_size);

                            if (TIMING==100) {
                                start_time[1] = MPI_Wtime();
                            }
                            if(PERFORMANCE_CHECK) printf("rank=%d, read start\n",rank);
                            bytes_read = adios_read_var_byid(g,v->varid,ts,uc,data);
                            if(PERFORMANCE_CHECK) printf("rank=%d, read end\n",rank);
                            if (TIMING==100) {
                                end_time[1] = MPI_Wtime();
                                total_time[1]+=end_time[1] -start_time[1];      //IO read time
                            }

                            if (DEBUG)
                                printf ("ADIOS WRITE: rank=%d, name=%s datasize=%" PRId64 "\n",rank,g->var_namelist[i],bytes_read);


                            if (TIMING==100) {
                                start_time[2] = MPI_Wtime();
                            }
                            if (DEBUG){
                                printf("rank=%d, write ts=",rank);
                                int k;
                                for(k=0; k<v->ndim; k++)
                                    printf("%" PRId64 ",", ts[k]);
                                printf("  uc=");
                                for(k=0; k<v->ndim; k++)
                                    printf("%" PRId64 ",", uc[k]);
                                printf("\n");
                            }

                            //local bounds and offets placeholders are not written out with actual values.
                            if(PERFORMANCE_CHECK) printf("rank=%d, adios write start\n", rank);
                            for(k=0; k<v->ndim; k++){
                                //sprintf(tstring, "ldim%d_%s", k, var_name);
                                sprintf(tstring, "ldim%d", k);
                                if (DEBUG) {
                                    printf ("ADIOS WRITE DIMENSION: rank=%d, name=%s value=",
                                            rank,tstring);
                                    print_data (&uc[k], 0, adios_unsigned_long);
                                    printf ("\n");
                                }
                                adios_write(m_adios_file, tstring, &uc[k]);

                                //sprintf(tstring, "offs%d_%s", k, var_name);
                                sprintf(tstring, "offs%d", k);
                                if (DEBUG) {
                                    printf ("ADIOS WRITE OFFSET: rank=%d, name=%s value=",
                                            rank,tstring);
                                    print_data (&ts[k], 0, adios_unsigned_long);
                                    printf ("\n");
                                }
                                adios_write(m_adios_file, tstring, &ts[k]);
                            }
                            adios_write(m_adios_file,g->var_namelist[i],data);
                            if(PERFORMANCE_CHECK) printf("rank=%d, adios write end\n", rank);


                            if (TIMING==100) {
                                end_time[2] = MPI_Wtime();
                                total_time[2]+=end_time[2] - start_time[2];   //IO write time
                            }

                            free(data);


                            if(remain_chunk!=0){
                                rS(v, ts, used_chunk, rank);
                                for(k=0; k<10; k++)
                                    tc[k] = 1;
                                calcC(remain_chunk, v, tc);
                            }

                        }while(remain_chunk!=0);

                        current_step += size*chunk_size;

                        if(current_step<total_size)
                            rS(v, s, size*chunk_size,rank);
                    }

                    if(DEBUG){
                        uint64_t* sb = (uint64_t *) malloc(sizeof(uint64_t));;
                        uint64_t* rb = (uint64_t *) malloc(sizeof(uint64_t));
                        sb[0] = elements_written;
                        MPI_Reduce(sb,rb,1,MPI_UNSIGNED_LONG_LONG,MPI_SUM,0, comm);
                        if(rank==0 && rb[0]!=total_size)
                            printf("some array read mismatch. please use debug mode\n");
                        free(sb); free(rb);
                    }
                }
                free (var_name);
                free (var_path);
            }// end of the writing of the variable..
            if (TIMING==100) {
                start_time[3] = MPI_Wtime();
            }
            if(PERFORMANCE_CHECK) printf("rank=%d, adios_close start\n", rank);
            adios_close(m_adios_file);
            if(PERFORMANCE_CHECK) printf("rank=%d, adios_close end\n", rank);
            if (TIMING==100) {
                end_time[3] = MPI_Wtime();
                total_time[3]+=end_time[3] - start_time[3];
            }
            adios_gclose(g);
        } //end of WRITEME
    } // end of all of the groups

    if(rank==0)
        printf("conversion done!\n");

    if(TIMING==100)
        start_time[4] = MPI_Wtime();
    adios_fclose(f);
    if(TIMING==100){
        end_time[4] = MPI_Wtime();
        total_time[4] = total_time[4]+end_time[4]-start_time[4];
    }
    adios_finalize(rank);


    // now, we write out the timing data, for each category, we give max, min, avg, std, all in seconds, across all processes.
    if(TIMING==100){

        // 0: adios_open, adios_group_size
        // 1: the total time to read in the data
        // 2: times around each write (will only work if we do NOT buffer....
        // 3: the time in the close
        // 4: fopen, fclose
        // 5: total time
        end_time[5] = MPI_Wtime();
        total_time[5] = end_time[5] - start_time[5];

        double sb[7];
        sb[0] = total_time[1]; sb[1] = total_time[4];   //read_var, fopen+fclose
        sb[2] = sb[0]+sb[1];
        sb[3] = total_time[0]; sb[4] = total_time[2]+total_time[3]; //adios_open+adios_group_size, write+close
        sb[5] = sb[3]+sb[4];
        sb[6] = total_time[5]; //total

        double * rb = NULL;

        if(rank==0)
            rb = (double *)malloc(size*7*sizeof(double));
        //MPI_Barrier(comm);
        MPI_Gather(sb, 7, MPI_DOUBLE, rb, 7, MPI_DOUBLE, 0, comm);

        if(rank==0){
            double read_avg1 = 0;
            double read_avg2 = 0;
            double tread_avg = 0;
            double write_avg1 = 0;
            double write_avg2 = 0;
            double twrite_avg = 0;
            double total_avg = 0;
            for(j=0; j<size; j++){
                read_avg1 += rb[7*j];
                read_avg2 += rb[7*j+1];
                tread_avg += rb[7*j+2];
                write_avg1 += rb[7*j+3];
                write_avg2 += rb[7*j+4];
                twrite_avg += rb[7*j+5];
                total_avg += rb[7*j+6];
            }
            read_avg1 /= size;
            read_avg2 /= size;
            tread_avg /= size;
            write_avg1 /= size;
            write_avg2 /= size;
            twrite_avg /= size;
            total_avg /= size;

            double read1_max = rb[0];
            double read1_min = rb[0];
            double read1_std = rb[0]-read_avg1; read1_std *= read1_std;

            double read2_max = rb[1];
            double read2_min = rb[1];
            double read2_std = rb[1]-read_avg2; read2_std *= read2_std;

            double tread_max = rb[2];
            double tread_min = rb[2];
            double tread_std = rb[2]-tread_avg; tread_std *= tread_std;

            double write1_max = rb[3];
            double write1_min = rb[3];
            double write1_std = rb[3]-write_avg1; write1_std *= write1_std;

            double write2_max = rb[4];
            double write2_min = rb[4];
            double write2_std = rb[4]-write_avg2; write2_std *= write2_std;

            double twrite_max = rb[5];
            double twrite_min = rb[5];
            double twrite_std = rb[5]-twrite_avg; twrite_std *= twrite_std;

            double total_max = rb[6];
            double total_min = rb[6];
            double total_std = rb[6]-total_avg; total_std *= total_std;

            for(j=1; j<size; j++){
                if(rb[7*j]>read1_max)
                    read1_max = rb[7*j];
                else if(rb[7*j]<read1_min)
                    read1_min = rb[7*j];
                double std = rb[7*j]-read_avg1; std *= std;
                read1_std += std;

                if(rb[7*j+1]>read2_max)
                    read2_max = rb[7*j+1];
                else if(rb[7*j+1]<read2_min)
                    read2_min = rb[7*j+1];
                std = rb[7*j+1]-read_avg2; std *= std;
                read2_std += std;

                if(rb[7*j+2]>tread_max)
                    tread_max = rb[7*j+2];
                else if(rb[7*j+2]<tread_min)
                    tread_min = rb[7*j+2];
                std = rb[7*j+2]-tread_avg; std *= std;
                tread_std += std;

                if(rb[7*j+3]>write1_max)
                    write1_max = rb[7*j+3];
                else if(rb[7*j+3]<write1_min)
                    write1_min = rb[7*j+3];
                std = rb[7*j+3]-write_avg1; std *= std;
                write1_std += std;

                if(rb[7*j+4]>write2_max)
                    write2_max = rb[7*j+4];
                else if(rb[7*j+4]<write2_min)
                    write2_min = rb[7*j+4];
                std = rb[7*j+4]-write_avg2; std *= std;
                write2_std += std;

                if(rb[7*j+5]>twrite_max)
                    twrite_max = rb[7*j+5];
                else if(rb[7*j+5]<twrite_min)
                    twrite_min = rb[7*j+5];
                std = rb[7*j+5]-twrite_avg; std *= std;
                twrite_std += std;

                if(rb[7*j+6]>total_max)
                    total_max = rb[7*j+6];
                else if(rb[7*j+6]<total_min)
                    total_min = rb[7*j+6];
                std = rb[7*j+6]-total_avg; std *= std;
                total_std += std;
            }
            read1_std /= size;  read1_std = sqrt(read1_std);
            read2_std /= size;  read2_std = sqrt(read2_std);
            tread_std /= size;    tread_std = sqrt(tread_std);
            write1_std /= size; write1_std = sqrt(write1_std);
            write2_std /= size; write2_std = sqrt(write2_std);
            twrite_std /= size;    twrite_std = sqrt(twrite_std);
            total_std /= size; total_std = sqrt(total_std);

            printf("---type---                       max\tmin\tavg\tstd\n");
            printf("---read_var---                   %lf\t%lf\t%lf\t%lf\n", read1_max, read1_min, read_avg1, read1_std);
            printf("---fopen+fclose---               %lf\t%lf\t%lf\t%lf\n", read2_max, read2_min, read_avg2, read2_std);
            printf("---total_read---                 %lf\t%lf\t%lf\t%lf\n", tread_max, tread_min, tread_avg, tread_std);
            printf("---adios_open+adios_groupsize--- %lf\t%lf\t%lf\t%lf\n", write1_max, write1_min, write_avg1, write1_std);
            printf("---write+close---                %lf\t%lf\t%lf\t%lf\n", write2_max, write2_min, write_avg2, write2_std);
            printf("---total_write---                %lf\t%lf\t%lf\t%lf\n", twrite_max, twrite_min, twrite_avg, twrite_std);
            printf("---total---                      %lf\t%lf\t%lf\t%lf\n", total_max, total_min, total_avg, total_std);
            free(rb);

        }

    }

    //    if (TIMING==100 && rank==0) {
    //        printf("------------------------------------------------------------------\n");
    //        printf("Define variables     = %lf\n",total_time[0]);
    //        printf("Read   variables     = %lf\n",total_time[1]);
    //        printf("Write  variables     = %lf\n",total_time[2]);
    //        printf("Close File for write = %lf\n",total_time[3]);
    //        printf("Total write time     = %lf\n",total_time[2] + total_time[3]);
    //        for (itime=0;itime<timers-1;itime++)
    //            total_time[timers-1]+=total_time[itime];
    //        printf("Total I/O time       = %lf\n",total_time[timers-1]);
    //    }
    MPI_Finalize();


    return(0);
}
Beispiel #16
0
int main (int argc, char ** argv ) 
{
    MPI_Comm    comm = MPI_COMM_WORLD;
    int         rank;
    int         ndx, ndy;             // size of array per processor
    double      *data;

    double      *X;                   //X coordinate
    double      *Y;                   //Y coordinate

    // Offsets and sizes
    int         offs_x, offs_y;       //offset in x and y direction
    int         nx_local, ny_local;   //local address
    int         nx_global, ny_global; //global address
    int         posx, posy;           // position index in the array
    int         i,j;
  
    /* ADIOS variables declarations for matching gwrite_temperature.ch */
    uint64_t    adios_groupsize, adios_totalsize;
    int64_t     adios_handle;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &nproc);

    if (processArgs(argc, argv)) {
        return 1;
    }
    //will work with each core writing ndx = 65, ndy = 129, (65*3,129*4) global
    ndx = 65;
    ndy = 129;

    //2D array with block,block decomposition
    posx = rank%npx;           // 1st dim
    posy = rank/npx;           // 2nd dim
    offs_x = posx * ndx;
    offs_y = posy * ndy;
    nx_local = ndx;
    ny_local = ndy;
    nx_global = npx * ndx;
    ny_global = npy * ndy;

    data = malloc (ndx * ndy * sizeof(double));
    for( i = 0; i < ndx; i++ )
        for( j = 0; j < ndy; j++)
            data[i*ndy + j] = 1.0*rank;

    X = malloc (ndx * sizeof(double)); 
    for( i = 0; i < ndx; i++ )
        //X[i] = 0.1*i*i+ndx*posx; 
        X[i] = 0.1*(i+offs_x)*(i+offs_x); 

    Y = malloc (ndy * sizeof(double));
    for( i = 0; i < ndy; i++ )
        //Y[i] = 0.1*i*i+ndx*posy;
        Y[i] = 0.1*(i+offs_y)*(i+offs_y);


    adios_init ("rectilinear2d.xml", comm);
    adios_open (&adios_handle, "rectilinear2d", "rectilinear2d.bp", "w", comm);

    adios_groupsize = 7*sizeof(int) \
	+ sizeof(double) * (nx_local*ny_local) \
    + sizeof(double) * (nx_local) \
    + sizeof(double) * (ny_local);

    adios_group_size (adios_handle, adios_groupsize, &adios_totalsize);
    adios_write (adios_handle, "nproc", &nproc);
    adios_write (adios_handle, "nx_global", &nx_global);
    adios_write (adios_handle, "ny_global", &ny_global);
    adios_write (adios_handle, "offs_x", &offs_x);
    adios_write (adios_handle, "offs_y", &offs_y);
    adios_write (adios_handle, "nx_local", &nx_local);
    adios_write (adios_handle, "ny_local", &ny_local);
    if( rank < npx ) {
        adios_write (adios_handle, "X", X);
    }   
    //printf ("rank %d: check if to print Y, rank%%npx=%d  offs_y=%d\n", rank, rank%npx, offs_y);
    if( rank % npx == 0 )
    {
        adios_write (adios_handle, "Y", Y);
    }
    adios_write (adios_handle, "data", data);

    adios_close (adios_handle);

    MPI_Barrier (comm);

    free (data);
    free (X);
    free (Y);
    adios_finalize (rank);

    MPI_Finalize ();
    return 0;
}
Beispiel #17
0
int flexpath_writer (MPI_Comm adios_comm, int sink_index, bool append, bool shutdown_flag) 
{
    int         i, j, offset, size_y;
    int         NX = 5; 
    int         NY = 2;
    int         NZ = 2;
    double      t[NX*NY*NZ];

    // this is our array of handles that we are writing to.
    static int64_t * adios_handles = 0;
    // this is our "current" handle, for convenience.
    int64_t * adios_handle = 0;

    // how much total data is there to transfer in the array?
    int total = NX * NY * NZ * comm_size;
    // this flag tells the workflow to shutdown.
    int shutdown = shutdown_flag ? 1 : 0;

    // offsets into the array for each MPI rank.
    offset = my_rank*NY;
    size_y = comm_size*NY;

    // Each MPI rank only writes part of the array.
    int myslice = NX * NY * NZ;
	for (j=0; j<NY*NX*NZ; j++) {       
	    t[j] = my_rank*myslice + (j);
	}

    // if we haven't allocated space for handles, do it.
    if (adios_handles == 0) {
        adios_handles = (int64_t*)(calloc(num_sinks, sizeof(int64_t)));
    }
    // if this file isn't open, open it.
    adios_handle = &(adios_handles[sink_index]);
    //if((*adios_handle) == 0) {
        char file_name[256] = {0};
        char group_name[256] = {0};
        sprintf(file_name, "adios_%s_%s", my_name, sinks[sink_index]);
        sprintf(group_name, "%s_to_%s", my_name, sinks[sink_index]);
        //printf("Opening %s for write\n", file_name); fflush(stdout);
        if(append) {
	        adios_open (adios_handle, group_name, file_name, "a", adios_comm);
        } else {
	        adios_open (adios_handle, group_name, file_name, "w", adios_comm);
        }
    //}

    /*
     * Write our variables.
     */
    uint64_t  adios_totalsize;
    uint64_t  adios_groupsize = 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 8 * (NZ) * (NY) * (NX);
    adios_group_size ((*adios_handle), adios_groupsize, &adios_totalsize);
	adios_write ((*adios_handle), "/scalar/dim/NX", &NX);
	adios_write ((*adios_handle), "/scalar/dim/NY", &NY);
	adios_write ((*adios_handle), "/scalar/dim/NZ", &NZ);
	adios_write ((*adios_handle), "shutdown", &shutdown);
	adios_write ((*adios_handle), "size", &comm_size);
	adios_write ((*adios_handle), "rank", &my_rank);
	adios_write ((*adios_handle), "offset", &offset);
	adios_write ((*adios_handle), "size_y", &size_y);
	adios_write ((*adios_handle), "var_2d_array", t);

    //if (shutdown_flag) {
        //printf("Closing %s for write\n", file_name); fflush(stdout);
	    adios_close (*adios_handle);
        adios_handle = 0;
    //}

    return 0;
}
Beispiel #18
0
int main (int argc, char ** argv)
{
    int i = 0;

    if(argc < 4)
    {
        printf("wrong args\n");
        usage();
        return -1;
    }

    DIM_GLOBAL = atoi (argv[1]);
    DIM_LOCAL = atoi (argv[2]);
    char* option = argv[3];

    char bp_file_name[NAME_LEN] = {0};
    char xml_file_name[NAME_LEN] = {0};

    snprintf(bp_file_name, NAME_LEN-1, "output/%s.bp", option);
    snprintf(xml_file_name, NAME_LEN-1, "conf/%s.xml", option);

    // MPI related intialization
    int rank, nproc;
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &nproc);

    double t1 = 0.0;
    double t2 = 0.0;
    double t3 = 0.0;
    double t4 = 0.0;

    // variable dimensions
    int gndx = DIM_GLOBAL;
    int gndy = DIM_GLOBAL;
    int gndz = DIM_GLOBAL;

    int ndx = DIM_LOCAL;
    int ndy = DIM_LOCAL;
    int ndz = DIM_LOCAL;

    int npx = gndx / ndx;
    int npy = gndy / ndy;
    int npz = gndz / ndz;

    if(nproc != npx * npy * npz)
    {
        printf("process num error! nproc != npx * npy * npz\n");
        MPI_Finalize();
        return -1;
    }

    int posx = rank / (npx * npy);
    int posy = rank % (npx * npy) / npy;
    int posz = rank % (npx * npy) % npy;

    // posx = mod(rank, npx)     // 1st dim easy: 0, npx, 2npx... are in the same X position
    // posy = mod(rank/npx, npy) // 2nd dim: (0, npx-1) have the same dim (so divide with npx first)
    // posz = rank/(npx*npy)     // 3rd dim: npx*npy processes belong into one dim
    int offx = posx * ndx;
    int offy = posy * ndy;
    int offz = posz * ndz;

    int timesteps = 0;

    srand(0); // all procs generate the same random datasets

    double* double_xyz = (double*) malloc (sizeof(double) * ndx * ndy * ndz);
    for(i = 0; i < ndx * ndy * ndz; i++)
    {
        double_xyz[i] = (double) rand () / RAND_MAX;
    }

    int adios_err;
    uint64_t adios_groupsize, adios_totalsize;
    int64_t adios_handle;

    if(rank == 0)
        t3 = dclock();

    MPI_Barrier(comm);

    t1 = dclock();

    adios_init (xml_file_name, comm);
    adios_open (&adios_handle, GROUP_NAME, bp_file_name, "w", comm);

    //////////////////////////////////////////////////////////////////////////////////////
    adios_groupsize = 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 4
                    + 8 * (ndx) * (ndy) * (ndz)
                    + 8 * (ndx) * (ndy) * (ndz);
    adios_group_size (adios_handle, adios_groupsize, &adios_totalsize);
    adios_write (adios_handle, "gndx", &gndx);
    adios_write (adios_handle, "gndy", &gndy);
    adios_write (adios_handle, "gndz", &gndz);
    adios_write (adios_handle, "nproc", &nproc);
    adios_write (adios_handle, "npx", &npx);
    adios_write (adios_handle, "npy", &npy);
    adios_write (adios_handle, "npz", &npz);
    adios_write (adios_handle, "offx", &offx);
    adios_write (adios_handle, "offy", &offy);
    adios_write (adios_handle, "offz", &offz);
    adios_write (adios_handle, "ndx", &ndx);
    adios_write (adios_handle, "ndy", &ndy);
    adios_write (adios_handle, "ndz", &ndz);
    adios_write (adios_handle, "temperature", double_xyz);
    adios_write (adios_handle, "preasure", double_xyz);

    //////////////////////////////////////////////////////////////////////////////////////

    adios_close (adios_handle);

    /*
    t2 = dclock();

    double tt = t2 - t1;

    MPI_Barrier (comm);

    if(rank == 0)
    {
        t4 = dclock();
    }
    */

    adios_finalize (rank);

    /*
    double* all_tt = (double*) malloc (sizeof(double) * nproc);

    // calling MPI_Gather
    int rtn = MPI_Gather (&tt, 1, MPI_DOUBLE, all_tt, 1, MPI_DOUBLE, 0, comm);
    MPI_Barrier (comm);
    if(rank == 0)
    {
        int k = 0;
        double sum = 0.0;
        for(k = 0; k < nproc; k++)
        {
            // printf("proc %d time %f\n", k, all_tt[k]);
            sum += all_tt[k];
        }

        printf("%s average_write_time %f\n", xml_file_name, sum / nproc);
        printf("%s total_write_time %f\n", xml_file_name, t4 - t3);
    }

    if(all_tt)
    {
        free(all_tt);
    }
    */

    MPI_Finalize ();

    if(double_xyz)
    {
        free(double_xyz);
    }


    return 0;
}
Beispiel #19
0
int write_blocks () 
{
    int         NX, G, O; 
    double      *t;
    /* ADIOS variables declarations for matching gwrite_temperature.ch */
    int         it, i, r;
    uint64_t    adios_groupsize, adios_totalsize;

    if (!rank) printf ("------- Write blocks -------\n");
    // We will have "3 steps * 2 blocks per process * number of processes" blocks
    nsteps = 3;
    nblocks_per_step = 2;
    block_offset = (uint64_t*) malloc (sizeof(uint64_t) * nsteps * nblocks_per_step * size);
    block_count  = (uint64_t*) malloc (sizeof(uint64_t) * nsteps * nblocks_per_step * size);
    gdims        = (uint64_t*) malloc (sizeof(uint64_t) * nsteps);

    adios_init_noxml (comm);
    adios_set_max_buffer_size (10);

    int64_t       m_adios_group;
    int64_t       m_adios_file;

    adios_declare_group (&m_adios_group, "restart", "", adios_flag_yes);
    adios_select_method (m_adios_group, "MPI", "", "");

    adios_define_var (m_adios_group, "NX"
            ,"", adios_integer
            ,0, 0, 0);

    adios_define_var (m_adios_group, "G"
            ,"", adios_integer
            ,0, 0, 0);

    /* have to define O and temperature as many times as we 
       write them within one step (twice) */
    for (it=0; it < nblocks_per_step; it++) {
        adios_define_var (m_adios_group, "O"
                ,"", adios_integer
                ,0, 0, 0);

        adios_define_var (m_adios_group, "t"
                ,"", adios_double
                ,"NX", "G", "O");
    }

    for (it =0; it < nsteps; it++) {
        if (!rank) printf ("Step %d:\n", it);
        NX = 10+it;
        G = nblocks_per_step * NX * size;

        t = (double *) malloc (NX*sizeof(double));

        for (i = 0; i < NX; i++)
            t[i] = rank + it*0.1 + 0.01;

        MPI_Barrier (comm);
        if (it==0) 
            adios_open (&m_adios_file, "restart", fname, "w", comm);
        else
            adios_open (&m_adios_file, "restart", fname, "a", comm);
        adios_groupsize = 4 + 4 + 4 + NX * 8
            + 4 + 4 + 4 + NX * 8;
        adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize);

        adios_write(m_adios_file, "NX", (void *) &NX);
        adios_write(m_adios_file, "G", (void *) &G);
        O = rank * nblocks_per_step * NX;
        adios_write(m_adios_file, "O", (void *) &O);
        adios_write(m_adios_file, "t", t);

        printf ("rank %d: block 1: size=%d, offset=%d\n", rank, NX, O);
        for (r = 0; r < size; r++) {
            block_count  [it*nblocks_per_step*size + nblocks_per_step*r] = NX; 
            block_offset [it*nblocks_per_step*size + nblocks_per_step*r] = r * nblocks_per_step * NX; 
        }

        for (i = 0; i < NX; i++)
            t[i] += 0.01;

        O = rank * nblocks_per_step * NX + NX;
        adios_write(m_adios_file, "O", (void *) &O);
        adios_write(m_adios_file, "t", t);

        printf ("rank %d: block 2: size=%d, offset=%d\n", rank, NX, O);
        for (r = 0; r < size; r++) {
            block_count  [it*nblocks_per_step*size + nblocks_per_step*r + 1] = NX; 
            block_offset [it*nblocks_per_step*size + nblocks_per_step*r + 1] = r * nblocks_per_step * NX + NX; 
        }
        gdims [it] = G;

        adios_close (m_adios_file);
        MPI_Barrier (comm);

        free(t);
    }

    adios_finalize (rank);

    return 0;
}
static void build_dataset_from_specs(
		const char *filename_prefix,
		const char *transform_name,
		const dataset_xml_spec_t *xml_spec,
		const dataset_global_spec_t *global_spec,
		int num_ts, int num_pgs_per_ts,
		dataset_pg_spec_t pg_specs[num_ts][num_pgs_per_ts]) // Not const because C has an corner case here (http://c-faq.com/ansi/constmismatch.html)
{
	int var;
	char xml_filename[strlen(filename_prefix) + strlen(".xml") + 1];
	char bp_filename[strlen(filename_prefix) + strlen(".bp") + 1];
	int timestep, pg_in_timestep;
	char dimvar[32];

	// Construct the XML and BP filenames
	sprintf(xml_filename, "%s.xml", filename_prefix);
	sprintf(bp_filename, "%s.bp", filename_prefix);

	// Write out the XML file
	FILE *xml_out = fopen(xml_filename, "w");
	assert(xml_out);
	produce_xml(xml_out, xml_spec, transform_name);
	fclose(xml_out);

	// Write out the BP file
	adios_init(xml_filename, MPI_COMM_WORLD);

	// Compute the groupsize contribution of the dimension scalars
	const uint64_t base_groupsize = xml_spec->ndim * 3 * 4; // *3 for 3 scalars (N, D, O) *4 for sizeof(adios_integer) (not sure how what function in the User API to call to get this programatically

	// For each timestep, for each PG in that timestep, write out all variables using the provided vardata buffers
	int64_t adios_file;
	for (timestep = 0; timestep < global_spec->num_ts; ++timestep) {
		for (pg_in_timestep = 0; pg_in_timestep < global_spec->num_pgs_per_ts; ++pg_in_timestep) {
			// (Re-)open the file in write or append mode, depending on whether or not this is the first PG written
			const int is_first_pg = (timestep == 0 && pg_in_timestep == 0);
			adios_open(&adios_file, xml_spec->group_name, bp_filename, is_first_pg ? "w" : "a", MPI_COMM_WORLD);

			// Pin the timestep to allow multiple adios_open/adios_close cycles to write
			// to the same timestep (this simulates a parallel file write with fewer core)
			adios_pin_timestep(timestep + 1); // +1 because we want the timesteps to be 1-based

			const dataset_pg_spec_t *pg_spec = &pg_specs[timestep][pg_in_timestep];

			// Compute the group size
			uint64_t groupsize = compute_groupsize(base_groupsize, xml_spec, pg_spec);
			uint64_t out_groupsize;
			adios_group_size(adios_file, groupsize, &out_groupsize);

			write_adios_dimension_scalars(adios_file, "N", xml_spec->ndim, global_spec->global_dims);
			write_adios_dimension_scalars(adios_file, "D", xml_spec->ndim, pg_spec->pg_dim);
			write_adios_dimension_scalars(adios_file, "O", xml_spec->ndim, pg_spec->pg_offset);

			// Write each variable
			for (var = 0; var < xml_spec->nvar; ++var) {
				adios_write(adios_file, xml_spec->varnames[var], (void*)pg_spec->vardata[var]); // (void*) to get rid of compiler complaining about constness
			}

			// Close the file to commit it
			adios_close(adios_file);
		}
	}
}
Beispiel #21
0
/* --------------------------------- Main --------------------------------- */
int main( int argc, char ** argv)
{
    char        filename [256];
    MPI_Comm    comm = MPI_COMM_WORLD;
    int         rank, size;
    /* ADIOS variables declarations for matching gwrite_schema.ch */
    int         adios_err;
    uint64_t    adios_groupsize, adios_totalsize;
    int64_t     adios_handle;
    float       tmax = 10.0;
    float       dt = 0.5;  // run from 0.0 increasing with 'dt' up to 'tmax'
    int         i;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &size);

    adios_init ("local_array_time.xml", comm);
    strcpy(filename, "local_array_time.bp");


    // Declare and Initialize essential variables
    int num_points = 37;
    float angles[num_points];
    float cos_of_angles[num_points];
    float sin_of_angles[num_points];
    float pi;

    // Obtain pi once for all
    pi = 4.0*atan(1.0);

    // Initialize angles in degrees
    float angle_degree = 0;
    for (i=0; i<num_points; i++) {
        angles[i] = pi * angle_degree/180.0;
        angle_degree = angle_degree + 10.0;
    }

    //  Scan over time
    float timestep = 0.0;
    for (timestep = 0.0; timestep <= tmax; timestep = timestep + dt) {

        if (timestep == 0.0) {
            printf("\n\n\nopen file\n\n\n");
            adios_open (&adios_handle, "schema", filename, "w", comm);
        } else {
            adios_open (&adios_handle, "schema", filename, "a", comm);
        }

        for (i=0; i<num_points; i++) {
            cos_of_angles[i] = cos(angles[i]*timestep);
            sin_of_angles[i] = sin(angles[i]*timestep);
        }

        adios_groupsize = 4 + 4 \
                          + 4*num_points \
                          + 4*num_points;
        if (timestep == 0 && rank == 0) {
            adios_groupsize += 4 + 4 + 4*num_points;
        }
        adios_group_size (adios_handle, adios_groupsize, &adios_totalsize);

        adios_write (adios_handle, "num_points", &num_points);
        adios_write (adios_handle, "t", &timestep);
        if (timestep == 0 && rank == 0) {
            adios_write (adios_handle, "tmax", &tmax);
            adios_write (adios_handle, "dt", &dt);
            adios_write (adios_handle, "angles", angles);
        }
        adios_write (adios_handle, "cos", cos_of_angles);
        adios_write (adios_handle, "sin", sin_of_angles);

        adios_close (adios_handle);

        // Write out raw data
        print_data_1D(timestep, num_points, angles, sin_of_angles, 0);
    }

    MPI_Barrier (comm);

    adios_finalize (rank);

    MPI_Finalize ();

    return 0;
}
int main (int argc, char ** argv) 
{
	char        filename [256];
        char        color_str[256];
	int         rank, size, i, color;
	int         NX = 100, Global_bounds, Offsets; 
	double      t[NX];
	MPI_Comm    comm = MPI_COMM_WORLD;

	/* ADIOS variables declarations for matching gwrite_temperature.ch */
	uint64_t    adios_groupsize, adios_totalsize;

	MPI_Init (&argc, &argv);
	MPI_Comm_rank (comm, &rank);
	MPI_Comm_size (comm, &size);

        Global_bounds = NX * size;

	strcpy (filename, "adios_global_aggregate_by_color.bp");

	adios_init_noxml (comm);
        adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10);

        int64_t       m_adios_group;
        int64_t       m_adios_file;

        adios_declare_group (&m_adios_group, "restart", "iter", adios_flag_yes);

        // split into 2 groups 
        color = (rank % 2 == 0 ? 0 : 1);
        sprintf (color_str, "color=%d", color);
        adios_select_method (m_adios_group, "MPI", color_str, "");

        adios_define_var (m_adios_group, "NX"
			,"", adios_integer
			,0, 0, 0);
   
	adios_define_var (m_adios_group, "Global_bounds"
			,"", adios_integer
			,0, 0, 0);

        adios_define_var (m_adios_group, "Offsets"
                         ,"", adios_integer
                         ,0, 0, 0);
   
       adios_define_var (m_adios_group, "temperature"
                        ,"", adios_double
                        ,"NX", "Global_bounds", "Offsets");
   
        adios_open (&m_adios_file, "restart", filename, "w", comm);

        adios_groupsize = 4 + 4 + 4 + NX * 8;

        adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize);
	adios_write(m_adios_file, "NX", (void *) &NX);
	adios_write(m_adios_file, "Global_bounds", (void *) &Global_bounds);

        Offsets = rank * NX;
        adios_write(m_adios_file, "Offsets", (void *) &Offsets);

        for (i = 0; i < NX; i++)
            t[i] = Offsets + i;

        adios_write(m_adios_file, "temperature", t);

        adios_close (m_adios_file);

        MPI_Barrier (comm);

	adios_finalize (rank);

	MPI_Finalize ();
	return 0;
}
Beispiel #23
0
int worker(int argc, char* argv[]) {
    TAU_PROFILE_TIMER(timer, __func__, __FILE__, TAU_USER);
    TAU_PROFILE_START(timer);
    my_printf("%d of %d In worker B\n", myrank, commsize);
    static bool announced = false;

    /* validate input */
    validate_input(argc, argv);

    my_printf("Worker B will execute until it sees n iterations.\n", iterations);

    /* ADIOS: These declarations are required to match the generated
     *        gread_/gwrite_ functions.  (And those functions are
     *        generated by calling 'gpp.py adios_config.xml') ...
     *        EXCEPT THAT THE generation of Reader code is broken.
     *        So, we will write the reader code manually.
     */
    uint64_t  adios_groupsize;
    uint64_t  adios_totalsize;
    uint64_t  adios_handle;
    void * data = NULL;
    uint64_t start[2], count[2];
    int i, j, steps = 0;
    int NX = 10;
    int NY = 1;
    double t[NX];
    double p[NX];

    /* ADIOS: Can duplicate, split the world, whatever.
     *        This allows you to have P writers to N files.
     *        With no splits, everyone shares 1 file, but
     *        can write lock-free by using different areas.
     */
    MPI_Comm  adios_comm, adios_comm_b_to_c;
    adios_comm = MPI_COMM_WORLD;
    //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm);
    adios_comm_b_to_c = MPI_COMM_WORLD;
    //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm_b_to_c);

    enum ADIOS_READ_METHOD method = ADIOS_READ_METHOD_FLEXPATH;
    adios_read_init_method(method, adios_comm, "verbose=3");
    if (adios_errno != err_no_error) {
        fprintf (stderr, "rank %d: Error %d at init: %s\n", myrank, adios_errno, adios_errmsg());
        exit(4);
    }
    if (send_to_c) {
        adios_init("adios_config.xml", adios_comm);
    }

    /* ADIOS: Set up the adios communications and buffers, open the file.
    */
    ADIOS_FILE *fp; // file handler
    ADIOS_VARINFO *vi; // information about one variable 
    ADIOS_SELECTION * sel;
    char      adios_filename_a_to_b[256];
    char      adios_filename_b_to_c[256];
    enum ADIOS_LOCKMODE lock_mode = ADIOS_LOCKMODE_NONE;
    double timeout_sec = 1.0;
    sprintf(adios_filename_a_to_b, "adios_a_to_b.bp");
    sprintf(adios_filename_b_to_c, "adios_b_to_c.bp");
    my_printf ("rank %d: Worker B opening file: %s\n", myrank, adios_filename_a_to_b);
    fp = adios_read_open(adios_filename_a_to_b, method, adios_comm, lock_mode, timeout_sec);
    if (adios_errno == err_file_not_found) {
        fprintf (stderr, "rank %d: Stream not found after waiting %d seconds: %s\n",
        myrank, timeout_sec, adios_errmsg());
        exit(1);
    } else if (adios_errno == err_end_of_stream) {
        // stream has been gone before we tried to open
        fprintf (stderr, "rank %d: Stream terminated before open. %s\n", myrank, adios_errmsg());
        exit(2);
    } else if (fp == NULL) {
        // some other error happened
        fprintf (stderr, "rank %d: Error %d at opening: %s\n", myrank, adios_errno, adios_errmsg());
        exit(3);
    } else {
        my_printf("Found file %s\n", adios_filename_a_to_b);
        my_printf ("File info:\n");
        my_printf ("  current step:   %d\n", fp->current_step);
        my_printf ("  last step:      %d\n", fp->last_step);
        my_printf ("  # of variables: %d:\n", fp->nvars);

        vi = adios_inq_var(fp, "temperature");
        adios_inq_var_blockinfo(fp, vi);

        printf ("ndim = %d\n",  vi->ndim);
        printf ("nsteps = %d\n",  vi->nsteps);
        printf ("dims[%llu][%llu]\n",  vi->dims[0], vi->dims[1]);

        uint64_t slice_size = vi->dims[0]/commsize;
        if (myrank == commsize-1) {
            slice_size = slice_size + vi->dims[0]%commsize;
        }

        start[0] = myrank * slice_size;
        count[0] = slice_size;
        start[1] = 0;
        count[1] = vi->dims[1];

        data = malloc (slice_size * vi->dims[1] * 8);

        /* Processing loop over the steps (we are already in the first one) */
        while (adios_errno != err_end_of_stream && steps < iterations) {
            steps++; // steps start counting from 1

            TAU_PROFILE_TIMER(adios_recv_timer, "ADIOS recv", __FILE__, TAU_USER);
            TAU_PROFILE_START(adios_recv_timer);
            sel = adios_selection_boundingbox (vi->ndim, start, count);
            adios_schedule_read (fp, sel, "temperature", 0, 1, data);
            adios_perform_reads (fp, 1);

            if (myrank == 0)
                printf ("--------- B Step: %d --------------------------------\n",
                        fp->current_step);

#if 0
            printf("B rank=%d: [0:%lld,0:%lld] = [", myrank, vi->dims[0], vi->dims[1]);
            for (i = 0; i < slice_size; i++) {
                printf (" [");
                for (j = 0; j < vi->dims[1]; j++) {
                    printf ("%g ", *((double *)data + i * vi->dims[1] + j));
                }
                printf ("]");
            }
            printf (" ]\n\n");
#endif

            // advance to 1) next available step with 2) blocking wait
            adios_advance_step (fp, 0, timeout_sec);
            if (adios_errno == err_step_notready)
            {
                printf ("B rank %d: No new step arrived within the timeout. Quit. %s\n",
                        myrank, adios_errmsg());
                break; // quit while loop
            }
            TAU_PROFILE_STOP(adios_recv_timer);

            /* Do some exchanges with neighbors */
            //do_neighbor_exchange();
            /* "Compute" */
            compute(steps);

            for (i = 0; i < NX; i++) {
                t[i] = steps*100.0 + myrank*NX + i;
            }

            for (i = 0; i < NY; i++) {
                p[i] = steps*1000.0 + myrank*NY + i;
            }

            if (send_to_c) {
                TAU_PROFILE_TIMER(adios_send_timer, "ADIOS send", __FILE__, TAU_USER);
                TAU_PROFILE_START(adios_send_timer);
                /* ADIOS: write to the next application in the workflow */
                if (steps == 0) {
                    adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "w", adios_comm_b_to_c);
                } else {
                    adios_open(&adios_handle, "b_to_c", adios_filename_b_to_c, "a", adios_comm_b_to_c);
                }
                /* ADIOS: Actually write the data out.
                *        Yes, this is the recommended method, and this way, changes in
                *        configuration with the .XML file will, even in the worst-case
                *        scenario, merely require running 'gpp.py adios_config.xml'
                *        and typing 'make'.
                */
                #include "gwrite_b_to_c.ch"
                /* ADIOS: Close out the file completely and finalize.
                *        If MPI is being used, this must happen before MPI_Finalize().
                */
                adios_close(adios_handle);
                TAU_PROFILE_STOP(adios_send_timer);
            #if 1
            if (!announced) {
                SOS_val foo;
                foo.i_val = NX;
                SOS_pack(example_pub, "NX", SOS_VAL_TYPE_INT, foo);
                SOS_announce(example_pub);
                SOS_publish(example_pub);
                announced = true;
            }
            #endif
            }
            MPI_Barrier(adios_comm_b_to_c);
        }
        MPI_Barrier(MPI_COMM_WORLD);
        adios_read_close(fp);
        /* ADIOS: Close out the file completely and finalize.
        *        If MPI is being used, this must happen before MPI_Finalize().
        */
        adios_read_finalize_method(method);
    }
    if (send_to_c) {
        adios_finalize(myrank);
    }

    free(data);
    //MPI_Comm_free(&adios_comm);
    //MPI_Comm_free(&adios_comm_b_to_c);

    TAU_PROFILE_STOP(timer);
    /* exit */
    return 0;
}
Beispiel #24
0
int main (int argc, char ** argv) 
{
	char        filename [256];
	int         rank, size, i, j, step, block;
        int         Offset; 

	int         NX = 2; // number of records written per step per process
        int         Width=20;
        int         sub_blocks = 2; // number of record-blocks written per process in one step
        int         steps = 3;

	char        t[NX][Width];
	MPI_Comm    comm = MPI_COMM_WORLD;

	/* ADIOS variables declarations for matching gwrite_temperature.ch */
	int         adios_err;
	uint64_t    adios_groupsize, adios_totalsize;
	int64_t     adios_handle;

	MPI_Init (&argc, &argv);
	MPI_Comm_rank (comm, &rank);
	MPI_Comm_size (comm, &size);

        //Global_bounds = sub_blocks * NX * size;

	strcpy (filename, "steps.bp");

	adios_init_noxml (comm);
        adios_set_max_buffer_size (1);

        int64_t       m_adios_group;
        int64_t       m_adios_file;

        adios_declare_group (&m_adios_group, "steps", "", adios_flag_yes);
        adios_select_method (m_adios_group, "MPI", "", "");


        adios_define_var (m_adios_group, "NX" ,"", adios_integer ,0, 0, 0);
        adios_define_var (m_adios_group, "Width" ,"", adios_integer ,0, 0, 0);
        adios_define_var (m_adios_group, "nproc" ,"", adios_integer ,0, 0, 0);
   
        for (i=0;i<sub_blocks;i++) {
   
           adios_define_var (m_adios_group, "record" ,"", adios_byte ,"NX,Width", "", "");
        }


        for (step=0; step<steps; step++) {

            adios_open (&m_adios_file, "steps", filename, "a", comm);

            adios_groupsize = sub_blocks * (4 + 4 + 4 + (uint64_t) NX * (uint64_t)Width);

            adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize);
            adios_write(m_adios_file, "nproc", (void *) &size);
            adios_write(m_adios_file, "NX", (void *) &NX);
            adios_write(m_adios_file, "Width", (void *) &Width);
            /* now we will write the data for each sub block */
            for (block=0;block<sub_blocks;block++) {

                for (i = 0; i < NX; i++)
                    //print 19 chars here + '\0'
                    sprintf (t[i], "r%2d  b%2d  s%2d  i%2d ", rank, block, step, i); 

                adios_write(m_adios_file, "record", t);
            }

            adios_close (m_adios_file);
        }

        MPI_Barrier (comm);

	adios_finalize (rank);

	MPI_Finalize ();
	return 0;
}
Beispiel #25
0
int main (int argc, char ** argv) 
{
    MPI_Comm    comm = MPI_COMM_WORLD;
    int         rank;
    int         ndx, ndy;             // size of array per processor
    double      * data;

    double      *X;                   //X coordinate
    double      *Y;                   //Y coordinate

    // Offsets and sizes
    int         offs_x, offs_y;       //offset in x and y direction
    int         nx_local, ny_local;   //local address
    int         nx_global, ny_global; //global address
    int         posx, posy;           // position index in the array
    int         i,j;

    int64_t     m_adios_group;
    uint64_t    adios_groupsize, adios_totalsize;
    int64_t     adios_handle;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &nproc);

    if (processArgs(argc, argv)) {
        return 1;
    }

    //will work with each core writing ndx = 65, ndy = 129, (65*4,129*3) global
    ndx = 65;
    ndy = 129;

    //2D array with block,block decomposition
    posx = rank%npx;           // 1st dim
    posy = rank/npx;           // 2nd dim
    offs_x = posx * ndx;
    offs_y = posy * ndy;
    nx_local = ndx;
    ny_local = ndy;
    nx_global = npx * ndx;
    ny_global = npy * ndy;

    data = malloc (ndx * ndy * sizeof(double));
    for( i = 0; i < ndx; i++ )
        for( j = 0; j < ndy; j++)
            data[i*ndy + j] = 1.0*rank;

    X = malloc (ndx * ndy * sizeof(double));
    for( i = 0; i < ndx; i++ )
        for( j = 0; j < ndy; j++)
            X[i*ndy + j] = offs_x + posy*ndx + i*ndx/ndx + (double)ndx*j/ndy;

    Y = malloc (ndx * ndy * sizeof(double));
    Y[0] = offs_y;
    for( i = 0; i < ndx; i++ )
        for( j = 0; j < ndy; j++)
            Y[i*ndy + j] = offs_y + ndy*j/ndy;
  
    char * schema_version = "1.1";
    char * dimemsions = "nx_global,ny_global";
 
	adios_init_noxml (comm);
    adios_set_max_buffer_size (50);

    adios_declare_group (&m_adios_group, "structured2d", "", adios_flag_yes);
    adios_select_method (m_adios_group, "MPI", "", "");

    adios_define_var (m_adios_group, "nx_global"
			,"", adios_integer
			,0, 0, 0);
    adios_define_var (m_adios_group, "ny_global"
            ,"", adios_integer
            ,0, 0, 0);
    adios_define_var (m_adios_group, "nproc"
                ,"", adios_integer                
                ,0, 0, 0);
    adios_define_var (m_adios_group, "offs_x"
                ,"", adios_integer
                ,0, 0, 0);
    adios_define_var (m_adios_group, "offs_y"
                ,"", adios_integer
                ,0, 0, 0);
    adios_define_var (m_adios_group, "nx_local"
                ,"", adios_integer
                ,0, 0, 0);
    adios_define_var (m_adios_group, "ny_local"
                ,"", adios_integer
                ,0, 0, 0);
    adios_define_var (m_adios_group, "X"
                    ,"", adios_double
                    ,"nx_local,ny_local", "nx_global,ny_global", "offs_x,offs_y");
    adios_define_var (m_adios_group, "Y"
                    ,"", adios_double
                    ,"nx_local,ny_local", "nx_global,ny_global", "offs_x,offs_y");
    adios_define_var (m_adios_group, "data"
                    ,"", adios_double
                    ,"nx_local,ny_local", "nx_global,ny_global", "offs_x,offs_y");

    adios_define_schema_version (m_adios_group, schema_version);
    adios_define_mesh_structured (dimemsions, "X,Y", "2", m_adios_group, "structuredmesh");
    adios_define_mesh_timevarying ("no", m_adios_group, "structuredmesh");
    adios_define_var_mesh (m_adios_group, "data", "structuredmesh");
    adios_define_var_centering (m_adios_group, "data", "point");

    adios_open (&adios_handle, "structured2d", "structured2d_noxml.bp", "w", comm);

    adios_groupsize = 7*sizeof(int) \
    + 3*sizeof(double) * (nx_local*ny_local);

    adios_group_size (adios_handle, adios_groupsize, &adios_totalsize);
    adios_write (adios_handle, "nproc", &nproc);
    adios_write (adios_handle, "nx_global", &nx_global);
    adios_write (adios_handle, "ny_global", &ny_global);
    adios_write (adios_handle, "offs_x", &offs_x);
    adios_write (adios_handle, "offs_y", &offs_y);
    adios_write (adios_handle, "nx_local", &nx_local);
    adios_write (adios_handle, "ny_local", &ny_local);
    adios_write (adios_handle, "X", X);
    adios_write (adios_handle, "Y", Y);
    adios_write (adios_handle, "data", data);

    adios_close (adios_handle);

    MPI_Barrier (comm);

    free (data);
    free (X);
    free (Y);

	adios_finalize (rank);

	MPI_Finalize ();
	return 0;
}
Beispiel #26
0
int main (int argc, char ** argv) 
{
    int         rank, size;
    int         NX, NY; 
    int         len, off;
    double      *t = NULL;
    MPI_Comm    comm = MPI_COMM_WORLD;

    uint64_t    start[2], count[2];

    ADIOS_SELECTION *sel;
    int         steps = 0;

#ifdef _USE_GNUPLOT
    int         i, j;
    double      *tmp;
    FILE        *pipe;
#else
    // Variables for ADIOS write
    int64_t     adios_handle;
    uint64_t    adios_groupsize, adios_totalsize;
    char        outfn[256];
#endif

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &size);

    adios_read_init_method(ADIOS_READ_METHOD_FLEXPATH, comm, "");

    ADIOS_FILE* fp = adios_read_open("stream.bp", 
                                     ADIOS_READ_METHOD_FLEXPATH, 
                                     comm, ADIOS_LOCKMODE_NONE, 0.0);
    assert(fp != NULL);
    
    ADIOS_VARINFO* nx_info = adios_inq_var( fp, "NX");
    ADIOS_VARINFO* ny_info = adios_inq_var( fp, "NY");

    NX = *((int *)nx_info->value);
    NY= *((int*)ny_info->value);
    
    len = NX / size;
    off = len * rank;

    if (rank == size-1)
        len = len + NX % size;
    
    printf("\trank=%d: NX,NY,len,off = %d\t%d\t%d\t%d\n", rank, NX, NY, len, off);
    assert(len > 0);

    t = (double *) malloc(sizeof(double) * len * NY);
    memset(t, '\0', sizeof(double) * len * NY);
    assert(t != NULL);

    start[0] = off;
    start[1] = 0;
    count[0] = len;
    count[1] = NY;
    // Not working ... 
    //sel = adios_selection_boundingbox (2, start, count);

    sel = malloc(sizeof(ADIOS_SELECTION));
    sel->type=ADIOS_SELECTION_WRITEBLOCK;
    sel->u.block.index = rank;

#ifdef _USE_GNUPLOT
    if ((NX % size) > 0)
    {
        fprintf(stderr, "Equal distribution is required\n");
        return -1;
    }

    if (rank == 0) {
        pipe = popen("gnuplot", "w");
        fprintf(pipe, "set view map\n");
        fprintf(pipe, "set xrange [0:%d]\n", NX-1);

        tmp = (double *) malloc(sizeof(double) * NX * NY);
        assert(tmp != NULL);
    }

#else
    // ADIOS write init
    adios_init ("adios.xml", comm);
#endif

    //while(adios_errno != err_end_of_stream && adios_errno != err_step_notready)
    while(1)
    {
        steps++;
        // Reading
        adios_schedule_read (fp, sel, "var_2d_array", 0, 1, t);
        adios_perform_reads (fp, 1);
        
        printf("step=%d\trank=%d\tfp->current_step=%d\t[%d,%d]\n", 
                steps, rank, fp->current_step, len, NY);
        /*
        // Debugging
        for (i=0; i<len; i++) {
            printf("%d: rank=%d: t[%d,0:4] = ", steps, rank, off+i);
            for (j=0; j<5; j++) {
                printf(", %g", t[i*NY + j]);
            }
            printf(" ...\n");
        }
        */

        // Do something 
#ifdef _USE_GNUPLOT         // Option 1: plotting

        MPI_Gather(t, len * NY, MPI_DOUBLE, tmp, len * NY, MPI_DOUBLE, 0, comm);
        
        if (rank == 0)
        {
            fprintf(pipe, "set title 'Soft X-Rray Signal (shot #%d)'\n", steps);
            fprintf(pipe, "set xlabel 'Channel#'\n");
            fprintf(pipe, "set ylabel 'Timesteps'\n");
            fprintf(pipe, "set cblabel 'Voltage (eV)'\n");

#  ifndef _GNUPLOT_INTERACTIVE
            fprintf(pipe, "set terminal png\n");
            fprintf(pipe, "set output 'fig%03d.png'\n", steps);
#  endif

            fprintf(pipe, "splot '-' matrix with image\n");
            //fprintf(pipe, "plot '-' with lines, '-' with lines, '-' with lines\n");

            double *sum = calloc(NX, sizeof(double));

            for (j = 0; j < NY; j++) {
                for (i = 0; i < NX; i++) {
                    sum[i] += tmp[i * NY + j];
                }
            }

            for (j = 0; j < NY; j++) {
                for (i = 0; i < NX; i++) {
                    fprintf (pipe, "%g ", (-tmp[i * NY + j] + sum[i]/NY)/3276.8);
                }
                fprintf(pipe, "\n");
            }
            fprintf(pipe, "e\n");
            fprintf(pipe, "e\n");
            fflush (pipe);

#  ifdef _GNUPLOT_INTERACTIVE
            printf ("Press [Enter] to continue . . .");
            fflush (stdout);
            getchar ();
#  endif

            free(sum);
        }


#else        // Option 2: BP writing

        snprintf (outfn, sizeof(outfn), "reader_%3.3d.bp", steps);
        adios_open (&adios_handle, "reader", outfn, "w", comm);
        adios_groupsize = 4 * sizeof(int) + sizeof(double) * len * NY; 
        adios_group_size (adios_handle, adios_groupsize, &adios_totalsize);
        adios_write (adios_handle, "NX", &NX);
        adios_write (adios_handle, "NY", &NY);
        adios_write (adios_handle, "len", &len);
        adios_write (adios_handle, "off", &off);
        adios_write (adios_handle, "var", t);
        adios_close (adios_handle);

#endif        

        // Advance
        MPI_Barrier (comm);
        adios_advance_step(fp, 0, TIMEOUT_SEC);
        if (adios_errno == err_end_of_stream)
        {
            printf("rank %d, Stream terminated. Quit\n", rank);
            break; // quit while loop
        }
        else if (adios_errno == err_step_notready)
        {
            printf ("rank %d: No new step arrived within the timeout. Quit.\n", rank);
            break; // quit while loop
        }
        else if (adios_errno != err_no_error) {
            printf("ADIOS returned code=%d, msg:%s\n", 
                    adios_errno, adios_get_last_errmsg()); 
            break; // quit while loop
        }
    }
    //
    free(t);

    adios_read_close(fp);
    //printf("rank %d, Successfully closed stream\n", rank);

    adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH);
    //printf("rank %d, Successfully finalized read method\n", rank);

#ifndef _USE_GNUPLOT
    adios_finalize (rank);
    //printf("rank %d, Successfully finalized adios\n", rank);
#else
    if (rank==0) {
        free(tmp);
        pclose(pipe);
    }
#endif

    MPI_Finalize ();

    return 0;
}
Beispiel #27
0
int main (int argc, char ** argv) 
{
    char        filename [256];
    int         rank, size, i, j;
    int         NX = 100, gb, offset;  //local/global/offset
    double      t[NX];
    int         nblocks = 3;
    MPI_Comm    comm = MPI_COMM_WORLD;
    char g_str[100], o_str[100], l_str[100];
    // attributes (from C variables)
    int someints[5] = {5,4,3,2,1};
    double somedoubles[5] = {5.55555, 4.4444, 3.333, 2.22, 1.1};

    /* ADIOS variables declarations for matching gwrite_temperature.ch */
    uint64_t    adios_groupsize, adios_totalsize;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &size);

    gb = nblocks * NX * size;
    sprintf (g_str, "%d", gb);
    sprintf (l_str, "%d", NX);

    strcpy (filename, "no_xml_write_byid.bp");

    adios_init_noxml (comm);
    adios_set_max_buffer_size (10);

    int64_t       m_adios_group;
    int64_t       m_adios_file;
    int64_t       var_ids[nblocks];

    adios_declare_group (&m_adios_group, "restart", "iter", adios_flag_yes);
    adios_select_method (m_adios_group, "MPI", "", "");

    for (i = 0; i < nblocks; i++)
    {
        offset = rank * nblocks * NX + i * NX;
        sprintf (o_str, "%d", offset);
        var_ids[i] = adios_define_var (m_adios_group, "temperature"
                                       ,"", adios_double
                                       ,l_str, g_str, o_str
        );
        adios_set_transform (var_ids[i], "none");

        // This is here just for fun
        uint64_t varsize = adios_expected_var_size(var_ids[i]);
        // adios_expected_var_size() works here because the definition of the variable
        // does not depend on any dimension variable (but defined with numerical dimensions)
        fprintf (stderr, "Temperature block %d is %" PRIu64 " bytes\n", i, varsize);
    }

    // add some attributes
    adios_define_attribute_byvalue (m_adios_group,
                                    "single_string","", adios_string,  1, "A single string attribute");
    char *strings[] = {"X","Yy","ZzZ"};
    adios_define_attribute_byvalue (m_adios_group,
                                    "three_strings","", adios_string_array,  3, strings);
    adios_define_attribute_byvalue (m_adios_group,
                                    "single_int",   "", adios_integer, 1, &someints);
    adios_define_attribute_byvalue (m_adios_group,
                                    "single_double","", adios_double,  1, &somedoubles);
    adios_define_attribute_byvalue (m_adios_group,
                                    "five_ints",    "", adios_integer, 5, &someints);
    adios_define_attribute_byvalue (m_adios_group,
                                    "five_double",  "", adios_double,  5, &somedoubles);



    adios_open (&m_adios_file, "restart", filename, "w", comm);

    adios_groupsize = nblocks * (4 + 4 + 4 + NX * 8);

    adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize);
    /* now we will write the data for each sub block */
    for (i = 0; i < nblocks; i++)
    {
        offset = rank * nblocks * NX + i * NX;
        for (j = 0; j < NX; j++)
            t[j] = offset + j;

        adios_write_byid(m_adios_file, var_ids[i], t);
    }

    adios_close (m_adios_file);

    MPI_Barrier (comm);

    adios_finalize (rank);

    MPI_Finalize ();
    return 0;
}
Beispiel #28
0
int main (int argc, char ** argv) 
{
    char        filename [256] = "stream.bp";
    int         rank, size;
    int         NX, NY; 
    int         len, off;
    double      *t = NULL;
    MPI_Comm    comm = MPI_COMM_WORLD;

    int64_t     adios_handle;
	uint64_t    adios_groupsize, adios_totalsize;

    uint64_t    start[2], count[2];

    ADIOS_SELECTION *sel;
    int         steps = 0;

    MPI_Init (&argc, &argv);
    MPI_Comm_rank (comm, &rank);
    MPI_Comm_size (comm, &size);
    
    // ADIOS read init
    adios_read_init_method (ADIOS_READ_METHOD_BP, comm, "verbose=3");
    
    ADIOS_FILE* fp = adios_read_open_file ("kstar.bp", 
                                           ADIOS_READ_METHOD_BP,
                                           comm);
    assert(fp != NULL);

    ADIOS_VARINFO* nx_info = adios_inq_var( fp, "N");
    ADIOS_VARINFO* ny_info = adios_inq_var( fp, "L");

    NX = *((int *)nx_info->value);
    NY= *((int*)ny_info->value);

    len = NX / size;
    off = len * rank;

    if (rank == size-1)
        len = len + NX % size;

    printf("\trank=%d: NX,NY,len,off = %d\t%d\t%d\t%d\n", rank, NX, NY, len, off);
    assert(len > 0);

    t = (double *) malloc(sizeof(double) * len * NY);
    memset(t, '\0', sizeof(double) * len * NY);
    assert(t != NULL);

    start[0] = off;
    start[1] = 0;
    count[0] = len;
    count[1] = NY;
    sel = adios_selection_boundingbox (2, start, count);

    // ADIOS write init
    adios_init ("adios.xml", comm);
    
    remove (filename);
    //int ii;
    //for(ii = 0; ii<10; ii++){
    //    for (i = 0; i < len * NY; i++)
    //        t[i] = ii*1000 + rank;

    while(adios_errno != err_end_of_stream && adios_errno != err_step_notready)
    {
        steps++;
        // Reading
        adios_schedule_read (fp, sel, "var", 0, 1, t);
        adios_perform_reads (fp, 1);

        // Debugging
        //for (i = 0; i < len*NY; i++)  t[i] = off * NY + i;

        printf("step=%d\trank=%d\t[%d,%d]\n", steps, rank, len, NY);

        // Writing
        adios_open (&adios_handle, "writer", filename, "a", comm);
        adios_groupsize = 4*4 + 8*len*NY;
        adios_group_size (adios_handle, adios_groupsize, &adios_totalsize);
        adios_write (adios_handle, "NX", &NX);
        adios_write (adios_handle, "NY", &NY);
        adios_write (adios_handle, "len", &len);
        adios_write (adios_handle, "off", &off);
        adios_write (adios_handle, "var_2d_array", t);
        adios_close (adios_handle);


        // Advance
        MPI_Barrier (comm);
        adios_advance_step(fp, 0, TIMEOUT_SEC);
    }
    free(t);

    MPI_Barrier (comm);
    adios_read_close(fp);

    if (rank==0) 
        printf ("We have processed %d steps\n", steps);

    MPI_Barrier (comm);
    adios_read_finalize_method(ADIOS_READ_METHOD_BP);

    adios_finalize (rank);

    MPI_Finalize ();

    return 0;
}
Beispiel #29
0
int worker(int argc, char* argv[]) {
    TAU_PROFILE_TIMER(timer, __func__, __FILE__, TAU_USER);
    TAU_PROFILE_START(timer);
    static bool announced = false;
    my_printf("%d of %d In worker A\n", myrank, commsize);

    /* validate input */
    validate_input(argc, argv);

    my_printf("Worker A will execute %d iterations.\n", iterations);

    /* ADIOS: These declarations are required to match the generated
     *        gread_/gwrite_ functions.  (And those functions are
     *        generated by calling 'gpp.py adios_config.xml') ...
     */
    uint64_t  adios_groupsize;
    uint64_t  adios_totalsize;
    uint64_t  adios_handle;
    char      adios_filename[256];
    MPI_Comm  adios_comm;

    /* ADIOS: Can duplicate, split the world, whatever.
     *        This allows you to have P writers to N files.
     *        With no splits, everyone shares 1 file, but
     *        can write lock-free by using different areas.
     */
    //MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm);
    adios_comm = MPI_COMM_WORLD;

    int NX = 10;
    int NY = 1;
    double t[NX];
    double p[NX];

    /* ADIOS: Set up the adios communications and buffers, open the file.
     */
    if (send_to_b) {
        sprintf(adios_filename, "adios_a_to_b.bp");
        adios_init("adios_config.xml", adios_comm);
    }

    int index, i;
    for (index = 0 ; index < iterations ; index++ ) {
        /* Do some exchanges with neighbors */
        do_neighbor_exchange();
        /* "Compute" */
        compute(index);
        /* Write output */
        //my_printf("a");

        for (i = 0; i < NX; i++) {
            t[i] = index*100.0 + myrank*NX + i;
        }

        for (i = 0; i < NY; i++) {
            p[i] = index*1000.0 + myrank*NY + i;
        }

        if (send_to_b) {
            TAU_PROFILE_TIMER(adiostimer, "ADIOS send", __FILE__, TAU_USER);
            TAU_PROFILE_START(adiostimer);
            if (index == 0) {
                adios_open(&adios_handle, "a_to_b", adios_filename, "w", adios_comm);
            } else {
                adios_open(&adios_handle, "a_to_b", adios_filename, "a", adios_comm);
            }
            /* ADIOS: Actually write the data out.
            *        Yes, this is the recommended method, and this way, changes in
            *        configuration with the .XML file will, even in the worst-case
            *        scenario, merely require running 'gpp.py adios_config.xml'
            *        and typing 'make'.
            */
            #include "gwrite_a_to_b.ch"
            /* ADIOS: Close out the file completely and finalize.
            *        If MPI is being used, this must happen before MPI_Finalize().
            */
            adios_close(adios_handle);
            TAU_PROFILE_STOP(adiostimer);
            #if 1
            if (!announced) {
                SOS_val foo;
                foo.i_val = NX;
                SOS_pack(example_pub, "NX", SOS_VAL_TYPE_INT, foo);
                SOS_announce(example_pub);
                SOS_publish(example_pub);
                announced = true;
            }
            #endif
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    if (send_to_b) {
        adios_finalize(myrank);
    }
    my_printf("Worker A exting.\n");
    //MPI_Comm_free(&adios_comm);

    TAU_PROFILE_STOP(timer);
    /* exit */
    return 0;
}
Beispiel #30
0
int main (int argc, char ** argv) 
{
	char        filename [256];
	int         rank, size, i, block;
	int         NX = 100, Global_bounds, Offsets; 
	double      t[NX];
        int         sub_blocks = 3;
	MPI_Comm    comm = MPI_COMM_WORLD;

	/* ADIOS variables declarations for matching gwrite_temperature.ch */
	uint64_t    adios_groupsize, adios_totalsize;

	MPI_Init (&argc, &argv);
	MPI_Comm_rank (comm, &rank);
	MPI_Comm_size (comm, &size);

        Global_bounds = sub_blocks * NX * size;

	strcpy (filename, "adios_global_no_xml.bp");

	adios_init_noxml (comm);
        adios_allocate_buffer (ADIOS_BUFFER_ALLOC_NOW, 10);

        int64_t       m_adios_group;
        int64_t       m_adios_file;

        adios_declare_group (&m_adios_group, "restart", "iter", adios_flag_yes);
        adios_select_method (m_adios_group, "MPI", "", "");


        adios_define_var (m_adios_group, "NX"
			,"", adios_integer
			,0, 0, 0);
   
	adios_define_var (m_adios_group, "Global_bounds"
			,"", adios_integer
			,0, 0, 0);

        for (i=0;i<sub_blocks;i++) {
   
           adios_define_var (m_adios_group, "Offsets"
                        ,"", adios_integer
                        ,0, 0, 0);
   
           int64_t varid;
           varid = adios_define_var (m_adios_group, "temperature"
                        ,"", adios_double
                        ,"NX", "Global_bounds", "Offsets");
           adios_set_transform (varid, "none");
        }
   
        adios_open (&m_adios_file, "restart", filename, "w", comm);

        adios_groupsize = sub_blocks * (4 + 4 + 4 + NX * 8);

        adios_group_size (m_adios_file, adios_groupsize, &adios_totalsize);
	adios_write(m_adios_file, "NX", (void *) &NX);
	adios_write(m_adios_file, "Global_bounds", (void *) &Global_bounds);
/* now we will write the data for each sub block */
        for (block=0;block<sub_blocks;block++) {

           Offsets = rank * sub_blocks * NX + block*NX;
           adios_write(m_adios_file, "Offsets", (void *) &Offsets);

           for (i = 0; i < NX; i++)
               t[i] = Offsets + i;

           adios_write(m_adios_file, "temperature", t);
        }

        adios_close (m_adios_file);

        MPI_Barrier (comm);

	adios_finalize (rank);

	MPI_Finalize ();
	return 0;
}