Exemple #1
0
int main (int argc, char **argv)
{
    int rootid;
    int iam = -1;
    int ret;

    ret = GPTLinitialize ();
    ret = GPTLprint_memusage ("before_MPI_Init");
    ret = MPI_Init (&argc, &argv);
    ret = GPTLprint_memusage ("after_MPI_Init ");
    ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam);
    if (iam == 0)
        rootid = iam;
    ret = MPI_Bcast (&rootid, 1, MPI_INT, 0, MPI_COMM_WORLD);
    ret = GPTLprint_memusage ("after_MPI_Bcast ");
    ret = MPI_Finalize ();
}
/** Run Tests for NetCDF-4 Functions.
 *
 * @param argc argument count
 * @param argv array of arguments
 */
int
main(int argc, char **argv)
{
    int verbose = 1;
    
    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** Different output flavors. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_names_pnetcdf.nc",
							  "test_names_classic.nc",
							  "test_names_serial4.nc",
							  "test_names_parallel4.nc"};
	
    /** Number of processors that will do IO. In this test we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * test. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this test. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. */
    int iosysid;

    /** The ncid of the netCDF file. */
    int ncid = 0;

    /** The ID of the netCDF varable. */
    int varid;

    /** Storage of netCDF-4 files (contiguous vs. chunked). */
    int storage;

    /** Chunksizes set in the file. */
    size_t my_chunksize[NDIM];
    
    /** The shuffle filter setting in the netCDF-4 test file. */
    int shuffle;
    
    /** Non-zero if deflate set for the variable in the netCDF-4 test file. */
    int deflate;

    /** The deflate level set for the variable in the netCDF-4 test file. */
    int deflate_level;

    /** Non-zero if fletcher32 filter is used for variable. */
    int fletcher32;

    /** Endianness of variable. */
    int endianness;

    /* Size of the file chunk cache. */
    size_t chunk_cache_size;

    /* Number of elements in file cache. */
    size_t nelems;

    /* File cache preemption. */
    float preemption;

    /* Size of the var chunk cache. */
    size_t var_cache_size;

    /* Number of elements in var cache. */
    size_t var_cache_nelems;

    /* Var cache preemption. */    
    float var_cache_preemption;
    
    /** The I/O description ID. */
    int ioid;

    /** A buffer for sample data. */
    float *buffer;

    /** A buffer for reading data back from the file. */
    int *read_buffer;

    /** The decomposition mapping. */
    PIO_Offset *compdof;

    /** Return code. */
    int ret;

    /** Index for loops. */
    int fmt, d, d1, i;
    
#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library example1 running on %d processors.\n",
	       my_rank, ntasks);

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this test. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

    /* How many flavors will we be running for? */
    int num_flavors = 0;
    int fmtidx = 0;
#ifdef _PNETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_PNETCDF;
#endif
#ifdef _NETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_NETCDF;
#endif
#ifdef _NETCDF4
    num_flavors += 2;
    format[fmtidx++] = PIO_IOTYPE_NETCDF4C;
    format[fmtidx] = PIO_IOTYPE_NETCDF4P;
#endif
    
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (fmt = 0; fmt < num_flavors; fmt++) 
    {
	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}

	/* Check the dimension names. */
	if ((ret = check_dim_names(my_rank, ncid, verbose)))
	    ERR(ret);

	/* Define a global attribute. */
	int att_val = 42;
	if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, ATT_NAME, NC_INT, 1, &att_val)))
	    ERR(ret);

	/* Check the attribute name. */
	if ((ret = check_att_name(my_rank, ncid, verbose)))
	    ERR(ret);

	/* Define a variable. */
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);

	/* Check the variable name. */
	if ((ret = check_var_name(my_rank, ncid, verbose)))
	    ERR(ret);

	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);

	/* Put a barrier here to make verbose output look better. */
	if ((ret = MPI_Barrier(MPI_COMM_WORLD)))
	    MPIERR(ret);

    }
	
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    
    

    return 0;
}
/** Run Tests for NetCDF-4 Functions.
 *
 * @param argc argument count
 * @param argv array of arguments
 */
int
main(int argc, char **argv)
{
    int verbose = 1;
    
    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Different output flavors. The example file is written (and
     * then read) four times. The first two flavors,
     * parallel-netcdf, and netCDF serial, both produce a netCDF
     * classic format file (but with different libraries). The
     * last two produce netCDF4/HDF5 format files, written with
     * and without using netCDF-4 parallel I/O. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. Two of them (pnetcdf and
     * classic) will be in classic netCDF format, the others
     * (serial4 and parallel4) will be in netCDF-4/HDF5
     * format. All four can be read by the netCDF library, and all
     * will contain the same contents. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_nc4_pnetcdf.nc",
							  "test_nc4_classic.nc",
							  "test_nc4_serial4.nc",
							  "test_nc4_parallel4.nc"};
	
    /** Number of processors that will do IO. In this example we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * example. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this example. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. This is the number of
     * elements of the data array that will be handled by each
     * processor. In this example there are 16 data elements. If the
     * example is run on 4 processors, then arrIdxPerPe will be 4. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. It is set by
     * PIOc_Init_Intracomm(). It references an internal structure
     * containing the general IO subsystem data and MPI
     * structure. It is passed to PIOc_finalize() to free
     * associated resources, after all I/O, but before
     * MPI_Finalize is called. */
    int iosysid;

    /** The ncid of the netCDF file created in this example. */
    int ncid = 0;

    /** The ID of the netCDF varable in the example file. */
    int varid;

    /** The I/O description ID as passed back by PIOc_InitDecomp()
     * and freed in PIOc_freedecomp(). */
    int ioid;

    /** A buffer for sample data.  The size of this array will
     * vary depending on how many processors are involved in the
     * execution of the example code. It's length will be the same
     * as elements_per_pe.*/
    float *buffer;

    /** A buffer for reading data back from the file. The size of
     * this array will vary depending on how many processors are
     * involved in the execution of the example code. It's length
     * will be the same as elements_per_pe.*/
    int *read_buffer;

    /** A 1-D array which holds the decomposition mapping for this
     * example. The size of this array will vary depending on how
     * many processors are involved in the execution of the
     * example code. It's length will be the same as
     * elements_per_pe. */
    PIO_Offset *compdof;

    /** Return code. */
    int ret;
    
#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);
    if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library example1 running on %d processors.\n",
	       my_rank, ntasks);

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (int i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this test. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */
	
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (int d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);

	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);

	    /** Check that the inq_var_chunking function works. */
	    int storage;
	    size_t my_chunksize[NDIM];
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)))
	    	ERR(ret);
	    
	    /** For serial netCDF-4, only processor rank 0 gets the answers. */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C && !my_rank ||
		format[fmt] == PIO_IOTYPE_NETCDF4P)
	    {
		if (storage != NC_CHUNKED)
		    ERR(ERR_AWFUL);
		for (int d = 0; d < NDIM; d++)
		    if (my_chunksize[d] != chunksize[d])
		    	ERR(ERR_AWFUL);
	    }

	    /* Check that the inv_var_deflate functions works. */
	    int shuffle;
	    int deflate;
	    int deflate_level;
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level)))
	    	ERR(ret);

	    /** For serial netCDF-4, only processor rank 0 gets the
	     * answers. Also deflate is turned on by default */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C && !my_rank)
		if (shuffle || !deflate || deflate_level != 1)
		    ERR(ERR_AWFUL);

	    /* For parallel netCDF, no compression available. :-( */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4P)
		if (shuffle || deflate)
		    ERR(ERR_AWFUL);

	} else {
	    /* Trying to set chunking for non-netCDF-4 files results
	     * in the PIO_ENOTNC4 error. */
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}	    
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);
    }
	
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    
    
    return 0;
}
Exemple #4
0
/** Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
int main(int argc, char* argv[])
{
    /** Set to non-zero to get output to stdout. */
    int verbose = 0;

    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Different output flavors. The example file is written (and
     * then read) four times. The first two flavors,
     * parallel-netcdf, and netCDF serial, both produce a netCDF
     * classic format file (but with different libraries). The
     * last two produce netCDF4/HDF5 format files, written with
     * and without using netCDF-4 parallel I/O. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. Two of them (pnetcdf and
     * classic) will be in classic netCDF format, the others
     * (serial4 and parallel4) will be in netCDF-4/HDF5
     * format. All four can be read by the netCDF library, and all
     * will contain the same contents. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"example2_pnetcdf.nc",
							  "example2_classic.nc",
							  "example2_serial4.nc",
							  "example2_parallel4.nc"};
	
    /** Number of processors that will do IO. In this example we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * example. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this example. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. This is the number of
     * elements of the data array that will be handled by each
     * processor. In this example there are 16 data elements. If the
     * example is run on 4 processors, then arrIdxPerPe will be 4. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. It is set by
     * PIOc_Init_Intracomm(). It references an internal structure
     * containing the general IO subsystem data and MPI
     * structure. It is passed to PIOc_finalize() to free
     * associated resources, after all I/O, but before
     * MPI_Finalize is called. */
    int iosysid;

    /** The ncid of the netCDF file created in this example. */
    int ncid = 0;

    /** The ID of the netCDF varable in the example file. */
    int varid;

    /** The I/O description ID as passed back by PIOc_InitDecomp()
     * and freed in PIOc_freedecomp(). */
    int ioid;

    /** A buffer for sample data.  The size of this array will
     * vary depending on how many processors are involved in the
     * execution of the example code. It's length will be the same
     * as elements_per_pe.*/
    float *buffer;

    /** A buffer for reading data back from the file. The size of
     * this array will vary depending on how many processors are
     * involved in the execution of the example code. It's length
     * will be the same as elements_per_pe.*/
    int *read_buffer;

    /** A 1-D array which holds the decomposition mapping for this
     * example. The size of this array will vary depending on how
     * many processors are involved in the execution of the
     * example code. It's length will be the same as
     * elements_per_pe. */
    PIO_Offset *compdof;

#ifdef HAVE_MPE	
    /** MPE event numbers used to track start and stop of
     * different parts of the program for later display with
     * Jumpshot. */
    int event_num[2][NUM_EVENTS];
#endif /* HAVE_MPE */

    /** Needed for command line processing. */
    int c;

    /* Parse command line. */
    while ((c = getopt(argc, argv, "v")) != -1)
	switch (c)
	{
	case 'v':
	    verbose++;
	    break;
	default:
	    break;
	}

#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    int ret;
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);
    if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library example1 running on %d processors.\n",
	       my_rank, ntasks);

#ifdef HAVE_MPE
    /* Initialize MPE logging. */
    if ((ret = MPE_Init_log()))
	ERR(ret);
    if (init_logging(my_rank, event_num))
	ERR(ERR_LOGGING);

    /* Log with MPE that we are starting INIT. */
    if ((ret = MPE_Log_event(event_num[START][INIT], 0, "start init")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (int i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this example. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */
	
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (int d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);
	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

#ifdef HAVE_MPE
	/* Log with MPE that we are done with CREATE. */
	if ((ret = MPE_Log_event(event_num[END][CREATE_PNETCDF + fmt], 0, "end create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Allocate space for sample data. */
	if (!(buffer = malloc(elements_per_pe * sizeof(float))))
	    return PIO_ENOMEM;

	/* Write data for each timestep. */
	for (int ts = 0; ts < NUM_TIMESTEPS; ts++) {

#ifdef HAVE_MPE
	    /* Log with MPE that we are starting CALCULATE. */
	    if ((ret = MPE_Log_event(event_num[START][CALCULATE], 0, "start calculate")))
		MPIERR(ret);
#endif /* HAVE_MPE */

	    /* Calculate sample data. Add some math function calls to make this slower. */
	    for (int i = 0; i < elements_per_pe; i++)
		if ((ret = calculate_value(my_rank, ts, &buffer[i])))
		    ERR(ret);

#ifdef HAVE_MPE
	    /* Log with MPE that we are done with CALCULATE. */
	    if ((ret = MPE_Log_event(event_num[END][CALCULATE], 0, "end calculate")))
		MPIERR(ret);
	    /* Log with MPE that we are starting WRITE. */
	    if ((ret = MPE_Log_event(event_num[START][WRITE], 0, "start write")))
		MPIERR(ret);
#endif /* HAVE_MPE */
		
	    /* Write data to the file. */
	    if (verbose)
		printf("rank: %d Writing sample data...\n", my_rank);

	    if ((ret = PIOc_setframe(ncid, varid, ts)))
		ERR(ret);
	    if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
					 buffer, NULL)))
		ERR(ret);
	    if ((ret = PIOc_sync(ncid)))
		ERR(ret);
#ifdef HAVE_MPE
	    /* Log with MPE that we are done with WRITE. */
	    if ((ret = MPE_Log_event(event_num[END][WRITE], 0, "end write")))
		MPIERR(ret);
#endif /* HAVE_MPE */
	}

#ifdef HAVE_MPE
	/* Log with MPE that we are starting CLOSE. */
	if ((ret = MPE_Log_event(event_num[START][CLOSE], 0, "start close")))
	    MPIERR(ret);
#endif /* HAVE_MPE */
		
	/* Free buffer space used in this example. */
	free(buffer);
	
	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);

#ifdef HAVE_MPE
	/* Log with MPE that we are done with CLOSE. */
	if ((ret = MPE_Log_event(event_num[END][CLOSE], 0, "end close")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* After each file is closed, make all processors wait so that
	 * all start creating the next file at the same time. */
	if ((ret = MPI_Barrier(MPI_COMM_WORLD)))
	    MPIERR(ret);
    }
	
#ifdef HAVE_MPE
    /* Log with MPE that we are starting FREE. */
    if ((ret = MPE_Log_event(event_num[START][FREE], 0, "start free")))
	MPIERR(ret);
#endif /* HAVE_MPE */
    
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with FREE. */
    if ((ret = MPE_Log_event(event_num[END][FREE], 0, "end free")))
	MPIERR(ret);
    /* Log with MPE that we are starting READ. */
    if ((ret = MPE_Log_event(event_num[START][READ], 0, "start read")))
	MPIERR(ret);
#endif /* HAVE_MPE */
    
    /* Check the output file. */
    /* if (!my_rank) */
    /*     for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++)  */
    /* 	if ((ret = check_file(ntasks, filename[fmt]))) */
    /* 	    ERR(ret); */

#ifdef HAVE_MPE
    /* Log with MPE that we are done with READ. */
    if ((ret = MPE_Log_event(event_num[END][READ], 0, "end read")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    

    if (verbose)
	printf("rank: %d SUCCESS!\n", my_rank);
    return 0;
}
Exemple #5
0
/* Fortran wrapper functions start here */
int gptlinitialize (void)
{
  return GPTLinitialize ();
}
Exemple #6
0
/** @brief Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
    int main(int argc, char* argv[])
    {
	/** Set to non-zero to get output to stdout. */
	int verbose = 0;

	/** Zero-based rank of processor. */
	int my_rank;

	/** Number of processors involved in current execution. */
	int ntasks;

	/** Different output flavors. The example file is written (and
	 * then read) four times. The first two flavors,
	 * parallel-netcdf, and netCDF serial, both produce a netCDF
	 * classic format file (but with different libraries). The
	 * last two produce netCDF4/HDF5 format files, written with
	 * and without using netCDF-4 parallel I/O. */
	int format[NUM_NETCDF_FLAVORS];

	/** Number of processors that will do IO. In this example we
	 * will do IO from all processors. */
	int niotasks;

	/** Stride in the mpi rank between io tasks. Always 1 in this
	 * example. */
	int ioproc_stride = 1;

	/** Zero based rank of first processor to be used for I/O. */
	int ioproc_start = 0;

	/** The dimension ID. */
	int dimid;

	/** Array index per processing unit. This is the number of
	 * elements of the data array that will be handled by each
	 * processor. In this example there are 16 data elements. If the
	 * example is run on 4 processors, then arrIdxPerPe will be 4. */
	PIO_Offset elements_per_pe;

	/* Length of the dimensions in the data. This simple example
	 * uses one-dimensional data. The lenght along that dimension
	 * is DIM_LEN (16). */
	int dim_len[1] = {DIM_LEN};

	/** The ID for the parallel I/O system. It is set by
	 * PIOc_Init_Intracomm(). It references an internal structure
	 * containing the general IO subsystem data and MPI
	 * structure. It is passed to PIOc_finalize() to free
	 * associated resources, after all I/O, but before
	 * MPI_Finalize is called. */
	int iosysid;

	/** The ncid of the netCDF file created in this example. */
	int ncid;

	/** The ID of the netCDF varable in the example file. */
	int varid;

	/** The I/O description ID as passed back by PIOc_InitDecomp()
	 * and freed in PIOc_freedecomp(). */
	int ioid;

	/** A buffer for sample data.  The size of this array will
	 * vary depending on how many processors are involved in the
	 * execution of the example code. It's length will be the same
	 * as elements_per_pe.*/
	int *buffer;

	/** A 1-D array which holds the decomposition mapping for this
	 * example. The size of this array will vary depending on how
	 * many processors are involved in the execution of the
	 * example code. It's length will be the same as
	 * elements_per_pe. */
	PIO_Offset *compdof;

        /** Test filename. */
        char filename[NC_MAX_NAME + 1];

        /** The number of netCDF flavors available in this build. */
        int num_flavors = 0;
            
	/** Used for command line processing. */
	int c;

	/** Return value. */
	int ret;

	/* Parse command line. */
	while ((c = getopt(argc, argv, "v")) != -1)
	    switch (c)
	    {
	    case 'v':
		verbose++;
		break;
	    default:
		break;
	    }

#ifdef TIMING    
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif    
    
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
	if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	      ntasks == 8 || ntasks == 16))
	    fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
	if (verbose)
	    printf("%d: ParallelIO Library example1 running on %d processors.\n",
		   my_rank, ntasks);

	/* keep things simple - 1 iotask per MPI process */    
	niotasks = ntasks;

        /* Turn on logging if available. */
        /* PIOc_set_log_level(4); */

        /* Change error handling to return errors. */
        if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
            return ret;
        
	/* Initialize the PIO IO system. This specifies how
	 * many and which processors are involved in I/O. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				       ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	    ERR(ret);

	/* Describe the decomposition. This is a 1-based array, so add 1! */
	elements_per_pe = DIM_LEN / ntasks;
	if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	    return PIO_ENOMEM;
	for (int i = 0; i < elements_per_pe; i++)
	    compdof[i] = my_rank * elements_per_pe + i + 1;
	
	/* Create the PIO decomposition for this example. */
	if (verbose)
	    printf("rank: %d Creating decomposition...\n", my_rank);
	if ((ret = PIOc_InitDecomp(iosysid, PIO_INT, NDIM, dim_len, (PIO_Offset)elements_per_pe,
				   compdof, &ioid, NULL, NULL, NULL)))
	    ERR(ret);
	free(compdof);

        /* The number of favors may change with the build parameters. */
#ifdef _PNETCDF
        format[num_flavors++] = PIO_IOTYPE_PNETCDF;
#endif
        format[num_flavors++] = PIO_IOTYPE_NETCDF;
#ifdef _NETCDF4
        format[num_flavors++] = PIO_IOTYPE_NETCDF4C;
        format[num_flavors++] = PIO_IOTYPE_NETCDF4P;
#endif
	
	/* Use PIO to create the example file in each of the four
	 * available ways. */
	for (int fmt = 0; fmt < num_flavors; fmt++) 
	{
            /* Create a filename. */
            sprintf(filename, "example1_%d.nc", fmt);
            
	    /* Create the netCDF output file. */
	    if (verbose)
		printf("rank: %d Creating sample file %s with format %d...\n",
		       my_rank, filename, format[fmt]);
	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename,
				       PIO_CLOBBER)))
		ERR(ret);
	
	    /* Define netCDF dimension and variable. */
	    if (verbose)
		printf("rank: %d Defining netCDF metadata...\n", my_rank);
	    if ((ret = PIOc_def_dim(ncid, DIM_NAME, (PIO_Offset)dim_len[0], &dimid)))
		ERR(ret);
	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, &dimid, &varid)))
		ERR(ret);
	    if ((ret = PIOc_enddef(ncid)))
		ERR(ret);
	
	    /* Prepare sample data. */
	    if (!(buffer = malloc(elements_per_pe * sizeof(int))))
	        return PIO_ENOMEM;
	    for (int i = 0; i < elements_per_pe; i++)
	        buffer[i] = START_DATA_VAL + my_rank;

	    /* Write data to the file. */
	    if (verbose)
	        printf("rank: %d Writing sample data...\n", my_rank);
	    if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
	    			     buffer, NULL)))
	        ERR(ret);
	    if ((ret = PIOc_sync(ncid)))
	        ERR(ret);

	    /* Free buffer space used in this example. */
	    free(buffer);
	
	    /* Close the netCDF file. */
	    if (verbose)
		printf("rank: %d Closing the sample data file...\n", my_rank);
	    if ((ret = PIOc_closefile(ncid)))
		ERR(ret);
	}
	
	/* Free the PIO decomposition. */
	if (verbose)
	    printf("rank: %d Freeing PIO decomposition...\n", my_rank);
	if ((ret = PIOc_freedecomp(iosysid, ioid)))
	    ERR(ret);
	
	/* Finalize the IO system. */
	if (verbose)
	    printf("rank: %d Freeing PIO resources...\n", my_rank);
	if ((ret = PIOc_finalize(iosysid)))
	    ERR(ret);

	/* Check the output file. */
	if (!my_rank)
	    for (int fmt = 0; fmt < num_flavors; fmt++)
            {
                sprintf(filename, "example1_%d.nc", fmt);
		if ((ret = check_file(ntasks, filename)))
		    ERR(ret);
            }

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING    
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif    

	if (verbose)
	    printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }
Exemple #7
0
int main (int argc, char **argv)
{
  char pname[MPI_MAX_PROCESSOR_NAME];

  int iter;
  int counter;
  int c;
  int tnum = 0;
  int resultlen;
  int ret;
  double value;
  extern char *optarg;

  while ((c = getopt (argc, argv, "p:")) != -1) {
    switch (c) {
    case 'p':
      if ((ret = GPTLevent_name_to_code (optarg, &counter)) != 0) {
	printf ("Failure from GPTLevent_name_to_code\n");
	return 1;
      }
      if (GPTLsetoption (counter, 1) < 0) {
	printf ("Failure from GPTLsetoption (%s,1)\n", optarg);
	return 1;
      }
      break;
    default:
      printf ("unknown option %c\n", c);
      printf ("Usage: %s [-p option_name]\n", argv[0]);
      return 2;
    }
  }
  
  ret = GPTLsetoption (GPTLabort_on_error, 1);
  ret = GPTLsetoption (GPTLoverhead, 1);
  ret = GPTLsetoption (GPTLnarrowprint, 1);

  if (MPI_Init (&argc, &argv) != MPI_SUCCESS) {
    printf ("Failure from MPI_Init\n");
    return 1;
  }

  ret = GPTLinitialize ();
  ret = GPTLstart ("total");
	 
  ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam);
  ret = MPI_Comm_size (MPI_COMM_WORLD, &nproc);

  ret = MPI_Get_processor_name (pname, &resultlen);
  printf ("Rank %d is running on processor %s\n", iam, pname);

#ifdef THREADED_OMP
  nthreads = omp_get_max_threads ();
#pragma omp parallel for private (iter, ret, tnum)
#endif

  for (iter = 1; iter <= nthreads; iter++) {
#ifdef THREADED_OMP
    tnum = omp_get_thread_num ();
#endif
    printf ("Thread %d of rank %d on processor %s\n", tnum, iam, pname);
    value = sub (iter);
  }

  ret = GPTLstop ("total");
  ret = GPTLpr (iam);

  if (iam == 0) {
    printf ("summary: testing GPTLpr_summary...\n");
    printf ("Number of threads was %d\n", nthreads);
    printf ("Number of tasks was %d\n", nproc);
  }

  // NOTE: if ENABLE_PMPI is set, 2nd pr call below will show some extra send/recv calls
  // due to MPI calls from within GPTLpr_summary_file
  if (GPTLpr_summary (MPI_COMM_WORLD) != 0)
    return 1;

  if (GPTLpr_summary_file (MPI_COMM_WORLD, "timing.summary.duplicate") != 0)
    return 1;

  ret = MPI_Finalize ();

  if (GPTLfinalize () != 0)
    return 1;

  return 0;
}
Exemple #8
0
/** @brief Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
    int main(int argc, char* argv[])
    {
	/** Set to non-zero to get output to stdout. */
	int verbose = 0;

	/** Zero-based rank of processor. */
	int my_rank;

	/** Number of processors involved in current execution. */
	int ntasks;

	/** Number of processors that will do IO. In this example we
	 * will do IO from all processors. */
	int niotasks;

	/** Stride in the mpi rank between io tasks. Always 1 in this
	 * example. */
	int stride = 1;

	/** Number of the aggregator? Always 0 in this example. */
	int numAggregator = 0;

	/** */
	int optBase = 1;

	/** Specifies the flavor of netCDF output format. */
	int iotype;

	/** The dimension ID. */
	int pioDimId;

	/** */
	PIO_Offset ista;

	/** */
	PIO_Offset isto;

	/** Array index per processing unit. This is the number of
	 * elements of the data array that will be handled by each
	 * processor. In this example there are 16 data elements. If the
	 * example is run on 4 processors, then arrIdxPerPe will be 4. */
	PIO_Offset arrIdxPerPe;

	/* Length of the dimension in data. */
	int dimLen[1];

	/** The ID for the parallel I/O system. It is set by
	 * PIOc_Init_Intracomm(). It references an internal structure
	 * containing the general IO subsystem data and MPI structure. */
	int pio_io_system;

	/** The ncid of the netCDF file created in this example. */
	int pioFileDesc;

	/** The ID of the netCDF varable in the example file. */
	int pioVar;

	/** The I/O description ID as passed back by PIOc_InitDecomp(). */
	int iodescNCells;

	/** A buffer for sample data. */
	int *dataBuffer;

	/** A buffer for reading data back from the file. */
	int *readBuffer;

	/** A 1-D array which holds the decomposition mapping for this
	 * example. */
	PIO_Offset *compdof;

	/** The example file name. */
	char file_name[] = EXAMPLE_FILENAME;     

	/** Used for command line processing. */
	int c;

	/* Parse command line. */
	while ((c = getopt(argc, argv, "v")) != -1)
	    switch (c)
	    {
	    case 'v':
		verbose++;
		break;
	    default:
		break;
	    }

#ifdef TIMING    
	/* Initialize the GPTL timing library. */
	int ret;
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif    
    
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);
	/* Check that a valid number of processors was specified. */
	if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	      ntasks == 8 || ntasks == 16))
	    fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
	if (verbose)
	    printf("%d: ParallelIO Library example1 running on %d processors.\n",
		   my_rank, ntasks);

	/* Initialize the ParallelIO library IO system. */
	iotype = PIO_IOTYPE_NETCDF;

	/* keep things simple - 1 iotask per MPI process */    
	niotasks = ntasks; 

	/* Initialize the IO system. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, stride, optBase,
				       PIO_REARR_SUBSET, &pio_io_system)))
	    ERR(ret);

	/* Finalize the IO system. */
	if ((ret = PIOc_finalize(pio_io_system)))
	    ERR(ret);

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING    
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif    

	return 0;
    }
Exemple #9
0
int main (int argc, char **argv)
{
  int ret;
  int i, code;
  long long pc[1]; /* papi counters */
  double sum;

  printf ("testpapi: Testing PAPI interface...\n");

  printf ("%s: testing getting event code for PAPI_TOT_CYC...\n", argv[0]);
  if ((ret = GPTLevent_name_to_code ("PAPI_TOT_CYC", &code)) != 0) {
    printf ("Failure\n");
    return 2;
  }
  printf ("Success\n");

  printf ("%s: testing GPTLsetoption(PAPI_TOT_CYC,1)...\n", argv[0]);
  if (GPTLsetoption (code, 1) != 0) {
    printf ("Failure\n");
    return 3;
  }
  printf ("Success\n");

  printf ("%s: testing GPTLinitialize\n", argv[0]);
  if ((ret = GPTLinitialize ()) != 0) {
    printf ("Failure\n");
    return 3;
  }
  printf ("Success\n");

  printf ("%s: testing GPTLstart\n", argv[0]);
  if ((ret = GPTLstart ("sum")) != 0) {
    printf ("Unexpected failure from GPTLstart(\"sum\")\n");
    return 3;
  }
  printf ("Success\n");

  sum = 0.;
  for (i = 0; i < 1000000; ++i) 
    sum += (double) i;
  printf ("%s: testing GPTLstop\n", argv[0]);
  if ((ret = GPTLstop ("sum")) != 0) {
    printf ("Unexpected failure from GPTLstop(\"sum\")\n");
    return 3;
  }
  printf ("Success\n");

  printf ("%s: testing GPTLquerycounters...\n", argv[0]);
  if (GPTLquerycounters ("sum", 0, pc) != 0) {
    printf ("Failure\n");
    return 4;
  }
  printf ("sum=%g\n",sum);
  printf ("%s: testing reasonableness of PAPI counters...\n", argv[0]);
  if (pc[0] < 1 || pc[0] > 1.e8) {
    printf ("Suspicious PAPI_TOT_CYC value=%lld for 1e6 additions\n", pc[0]);
    return 5;
  } else {
    printf ("Success\n");
  }
  printf ("%s: all tests successful\n", argv[0]);
  return 0;
}
Exemple #10
0
/** Run Tests for NetCDF-4 Functions.
 *
 * @param argc argument count
 * @param argv array of arguments
 */
int
main(int argc, char **argv)
{
    int verbose = 1;
    
    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** Different output flavors. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_nc4_pnetcdf.nc",
							  "test_nc4_classic.nc",
							  "test_nc4_serial4.nc",
							  "test_nc4_parallel4.nc"};
	
    /** Number of processors that will do IO. In this test we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * test. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this test. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. */
    int iosysid;

    /** The ncid of the netCDF file. */
    int ncid = 0;

    /** The ID of the netCDF varable. */
    int varid;

    /** Storage of netCDF-4 files (contiguous vs. chunked). */
    int storage;

    /** Chunksizes set in the file. */
    PIO_Offset my_chunksize[NDIM];
    
    /** The shuffle filter setting in the netCDF-4 test file. */
    int shuffle;
    
    /** Non-zero if deflate set for the variable in the netCDF-4 test file. */
    int deflate;

    /** The deflate level set for the variable in the netCDF-4 test file. */
    int deflate_level;

    /** Endianness of variable. */
    int endianness;

    /* Size of the var chunk cache. */
    PIO_Offset var_cache_size;

    /* Number of elements in var cache. */
    PIO_Offset var_cache_nelems;

    /* Var cache preemption. */    
    float var_cache_preemption;
    
    /** The I/O description ID. */
    int ioid;

    /** A buffer for sample data. */
    float *buffer;

    /** A buffer for reading data back from the file. */
    int *read_buffer;

    /** The decomposition mapping. */
    PIO_Offset *compdof;

    /** Return code. */
    int ret;

    /** Index for loops. */
    int fmt, d, d1, i;

    /** For setting the chunk cache. */
    PIO_Offset chunk_cache_size = 1024*1024;
    PIO_Offset chunk_cache_nelems = 1024;
    float chunk_cache_preemption = 0.5;

    /* For reading the chunk cache. */
    PIO_Offset chunk_cache_size_in;
    PIO_Offset chunk_cache_nelems_in;
    float chunk_cache_preemption_in;
    
    char varname[15];
    
#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library test_nc4 running on %d processors.\n",
	       my_rank, ntasks);

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this test. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* How many flavors will we be running for? */
    int num_flavors = 0;
    int fmtidx = 0;
#ifdef _PNETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_PNETCDF;
#endif
#ifdef _NETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_NETCDF;
#endif
#ifdef _NETCDF4
    num_flavors += 2;
    format[fmtidx++] = PIO_IOTYPE_NETCDF4C;
    format[fmtidx] = PIO_IOTYPE_NETCDF4P;
#endif
    
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (fmt = 0; fmt < num_flavors; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	if (verbose)
	    printf("rank: %d Setting chunk cache for file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);

	/* Try to set the chunk cache with invalid preemption to check error handling. */
	chunk_cache_preemption = 50.0;
	ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size,
				   chunk_cache_nelems, chunk_cache_preemption);
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (ret != NC_EINVAL)
		ERR(ERR_AWFUL);
	}
	else
	{
	    if (ret != NC_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Try to set the chunk cache. */
	chunk_cache_preemption = 0.5;
	ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size,
				   chunk_cache_nelems, chunk_cache_preemption);

	/* Should only have worked for netCDF-4 iotypes. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (ret != PIO_NOERR)
		ERR(ret);
	}
	else
	{
	    if (ret != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Now check the chunk cache. */
	ret = PIOc_get_chunk_cache(iosysid, format[fmt], &chunk_cache_size_in,
				   &chunk_cache_nelems_in, &chunk_cache_preemption_in);

	/* Should only have worked for netCDF-4 iotypes. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    /* Check that there was no error. */
	    if (ret != PIO_NOERR)
		ERR(ret);

	    /* Check that we got the correct values. */
	    if (chunk_cache_size_in != chunk_cache_size || chunk_cache_nelems_in != chunk_cache_nelems ||
		chunk_cache_preemption_in != chunk_cache_preemption)
		ERR(ERR_AWFUL);
	}
	else
	{
	    if (ret != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);

	/* Set error handling. */
	PIOc_Set_File_Error_Handling(ncid, PIO_BCAST_ERROR);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if (verbose)
	    printf("rank: %d Defining netCDF variable %s, ndims %d\n", my_rank, VAR_NAME, NDIM);
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);

	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (verbose)
		printf("rank: %d Defining chunksizes\n", my_rank);
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);

	    /** Check that the inq_varname function works. */
	    if (verbose)
	    	printf("rank: %d Checking varname\n", my_rank);
	    ret = PIOc_inq_varname(ncid, 0, varname);
	    printf("rank: %d ret: %d varname: %s\n", my_rank, ret, varname);
	    
	    /** Check that the inq_var_chunking function works. */
	    if (verbose)
		printf("rank: %d Checking chunksizes\n");
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)))
	    	ERR(ret);
	    if (verbose)
	    {
		printf("rank: %d ret: %d storage: %d\n", my_rank, ret, storage);
		for (d1 = 0; d1 < NDIM; d1++)
		{
		    printf("chunksize[%d]=%d\n", d1, my_chunksize[d1]);
		}
	    }
	    
	    /** Check the answers. */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C ||
		format[fmt] == PIO_IOTYPE_NETCDF4P)
	    {
		if (storage != NC_CHUNKED)
		    ERR(ERR_AWFUL);
		for (d1 = 0; d1 < NDIM; d1++)
		    if (my_chunksize[d1] != chunksize[d1])
		    	ERR(ERR_AWFUL);
	    }

	    /* Check that the inq_var_deflate functions works. */
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level)))
	    	ERR(ret);

	    /** For serial netCDF-4 deflate is turned on by default */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C)
		if (shuffle || !deflate || deflate_level != 1)
		    ERR(ERR_AWFUL);

	    /* For parallel netCDF-4, no compression available. :-( */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4P)
		if (shuffle || deflate)
		    ERR(ERR_AWFUL);

	    /* Check setting the chunk cache for the variable. */
	    printf("rank: %d PIOc_set_var_chunk_cache...\n", my_rank);
	    if ((ret = PIOc_set_var_chunk_cache(ncid, 0, VAR_CACHE_SIZE, VAR_CACHE_NELEMS,
						VAR_CACHE_PREEMPTION)))
	    	ERR(ret);

	    /* Check getting the chunk cache values for the variable. */
	    printf("rank: %d PIOc_get_var_chunk_cache...\n", my_rank);	    
	    if ((ret = PIOc_get_var_chunk_cache(ncid, 0, &var_cache_size, &var_cache_nelems,
						&var_cache_preemption)))
	    	ERR(ret);
	    PIO_Offset len;
	    if ((ret = PIOc_inq_dimlen(ncid, 0, &len)))
	    	ERR(ret);

	    /* Check that we got expected values. */
	    printf("rank: %d var_cache_size = %d\n", my_rank, var_cache_size);	    
	    if (var_cache_size != VAR_CACHE_SIZE)
		ERR(ERR_AWFUL);
	    if (var_cache_nelems != VAR_CACHE_NELEMS)
		ERR(ERR_AWFUL);
	    if (var_cache_preemption != VAR_CACHE_PREEMPTION)
		ERR(ERR_AWFUL);
	} else {
	    /* Trying to set or inq netCDF-4 settings for non-netCDF-4
	     * files results in the PIO_ENOTNC4 error. */
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level))
		!= PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_def_var_endian(ncid, 0, 1)) != PIO_ENOTNC4)
		ERR(ret);
	    if ((ret = PIOc_inq_var_endian(ncid, 0, &endianness)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_set_var_chunk_cache(ncid, 0, VAR_CACHE_SIZE, VAR_CACHE_NELEMS,
						VAR_CACHE_PREEMPTION)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_get_var_chunk_cache(ncid, 0, &var_cache_size, &var_cache_nelems,
						&var_cache_preemption)) != PIO_ENOTNC4)
		ERR(ret);
	    if ((ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size, chunk_cache_nelems,
	    				    chunk_cache_preemption)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_get_chunk_cache(iosysid, format[fmt], &chunk_cache_size,
	    				    &chunk_cache_nelems, &chunk_cache_preemption)) != PIO_ENOTNC4)
	    	ERR(ret);
	}	    
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);
    }
	
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    
    

    return 0;
}
Exemple #11
0
/* Write, then read, a simple example with darrays.

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump):

    <pre>
netcdf darray_no_async_iotype_1 {
dimensions:
	unlimted = UNLIMITED ; // (2 currently)
	x = 4 ;
	y = 4 ;
variables:
	int foo(unlimted, x, y) ;
data:

 foo =
  42, 42, 42, 42,
  43, 43, 43, 43,
  44, 44, 44, 44,
  45, 45, 45, 45,
  142, 142, 142, 142,
  143, 143, 143, 143,
  144, 144, 144, 144,
  145, 145, 145, 145 ;
}
    </pre>

*/
    int main(int argc, char* argv[])
    {
	int my_rank;  /* Zero-based rank of processor. */
	int ntasks;   /* Number of processors involved in current execution. */
	int ioproc_stride = 1;	    /* Stride in the mpi rank between io tasks. */
	int ioproc_start = 0; 	    /* Rank of first task to be used for I/O. */
        PIO_Offset elements_per_pe; /* Array elements per processing unit. */
	int iosysid;  /* The ID for the parallel I/O system. */	
	int ncid;     /* The ncid of the netCDF file. */
	int dimid[NDIM3];    /* The dimension ID. */
	int varid;    /* The ID of the netCDF varable. */
	int ioid;     /* The I/O description ID. */
        char filename[NC_MAX_NAME + 1]; /* Test filename. */
        int num_flavors = 0;            /* Number of iotypes available in this build. */
	int format[NUM_NETCDF_FLAVORS]; /* Different output flavors. */
	int ret;                        /* Return value. */

#ifdef TIMING
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif

	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
	if (ntasks != TARGET_NTASKS)
	    fprintf(stderr, "Number of processors must be 4!\n");
        printf("%d: ParallelIO Library darray_no_async example running on %d processors.\n",
               my_rank, ntasks);

        /* Turn on logging. */
        if ((ret = PIOc_set_log_level(LOG_LEVEL)))
            return ret;
        
	/* Initialize the PIO IO system. This specifies how many and
	 * which processors are involved in I/O. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, 1, ioproc_stride,
				       ioproc_start, PIO_REARR_BOX, &iosysid)))
	    ERR(ret);

	/* Describe the decomposition. */
	elements_per_pe = DIM_LEN_X * DIM_LEN_Y / TARGET_NTASKS;

        /* Allocate and initialize array of decomposition mapping. */
	PIO_Offset compdof[elements_per_pe];
	for (int i = 0; i < elements_per_pe; i++)
	    compdof[i] = my_rank * elements_per_pe + i;

	/* Create the PIO decomposition for this example. Since this
         * is a variable with an unlimited dimension, we want to
         * create a 2-D composition which represents one record. */
        printf("rank: %d Creating decomposition...\n", my_rank);
	if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe,
				   compdof, &ioid, 0, NULL, NULL)))
	    ERR(ret);

        /* The number of favors may change with the build parameters. */
#ifdef _PNETCDF
        format[num_flavors++] = PIO_IOTYPE_PNETCDF;
#endif
        format[num_flavors++] = PIO_IOTYPE_NETCDF;
#ifdef _NETCDF4
        format[num_flavors++] = PIO_IOTYPE_NETCDF4C;
        format[num_flavors++] = PIO_IOTYPE_NETCDF4P;
#endif

	/* Use PIO to create the example file in each of the four
	 * available ways. */
	for (int fmt = 0; fmt < num_flavors; fmt++)
	{
            /* Create a filename. */
            sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]);

	    /* Create the netCDF output file. */
            printf("rank: %d Creating sample file %s with format %d...\n",
                   my_rank, filename, format[fmt]);
	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER)))
		ERR(ret);

	    /* Define netCDF dimension and variable. */
            printf("rank: %d Defining netCDF metadata...\n", my_rank);
            for (int d = 0; d < NDIM3; d++)
                if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d])))
                    ERR(ret);
	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid)))
	        ERR(ret);
	    if ((ret = PIOc_enddef(ncid)))
	        ERR(ret);

	    /* Allocate storage for sample data. */
            int buffer[elements_per_pe];

            /* Write each timestep. */
            for (int t = 0; t < NUM_TIMESTEPS; t++)
            {
                /* Create some data for this timestep. */
                for (int i = 0; i < elements_per_pe; i++)
                    buffer[i] = 100 * t + START_DATA_VAL + my_rank;
                
                /* Write data to the file. */
                printf("rank: %d Writing sample data...\n", my_rank);
                if ((ret = PIOc_setframe(ncid, varid, t)))
                    ERR(ret);
                if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL)))
                    ERR(ret);
            }

            /* THis will cause all data to be written to disk. */
            if ((ret = PIOc_sync(ncid)))
	        ERR(ret);

	    /* Close the netCDF file. */
            printf("rank: %d Closing the sample data file...\n", my_rank);
	    if ((ret = PIOc_closefile(ncid)))
		ERR(ret);

            /* Check the output file. */
            /* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, */
            /*                       my_rank, ioid))) */
            /*     ERR(ret); */
	}

	/* Free the PIO decomposition. */
        printf("rank: %d Freeing PIO decomposition...\n", my_rank);
	if ((ret = PIOc_freedecomp(iosysid, ioid)))
	    ERR(ret);

	/* Finalize the IO system. */
        printf("rank: %d Freeing PIO resources...\n", my_rank);
	if ((ret = PIOc_finalize(iosysid)))
	    ERR(ret);

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif

        printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }
Exemple #12
0
/* Write, then read, a simple example with darrays.

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump):

    <pre>
netcdf darray_no_async_iotype_1 {
dimensions:
	unlimted = UNLIMITED ; // (2 currently)
	x = 4 ;
	y = 4 ;
variables:
	int foo(unlimted, x, y) ;
data:

 foo =
  42, 42, 42, 42,
  43, 43, 43, 43,
  44, 44, 44, 44,
  45, 45, 45, 45,
  142, 142, 142, 142,
  143, 143, 143, 143,
  144, 144, 144, 144,
  145, 145, 145, 145 ;
}
    </pre>

*/
    int main(int argc, char* argv[])
    {
	int my_rank;  /* Zero-based rank of processor. */
	int ntasks;   /* Number of processors involved in current execution. */
        int iosysid; /* The ID for the parallel I/O system. */
	/* int ncid;     /\* The ncid of the netCDF file. *\/ */
	/* int dimid[NDIM3];    /\* The dimension ID. *\/ */
	/* int varid;    /\* The ID of the netCDF varable. *\/ */
        /* char filename[NC_MAX_NAME + 1]; /\* Test filename. *\/ */
        /* int num_flavors = 0;            /\* Number of iotypes available in this build. *\/ */
	/* int format[NUM_NETCDF_FLAVORS]; /\* Different output flavors. *\/ */
	int ret;                        /* Return value. */

#ifdef TIMING
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif
        
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
        printf("%d: ParallelIO Library darray_async example running on %d processors.\n",
               my_rank, ntasks);
	if (ntasks != TARGET_NTASKS)
        {
	    fprintf(stderr, "Number of processors must be %d!\n", TARGET_NTASKS);
            return ERR_BAD;
        }

        /* Turn on logging. */
        if ((ret = PIOc_set_log_level(LOG_LEVEL)))
            return ret;

        /* Num procs for computation. */
        int num_procs2[COMPONENT_COUNT] = {4};
        
        /* Is the current process a computation task? */
        int comp_task = my_rank < NUM_IO_TASKS ? 0 : 1;

        /* Initialize the IO system. */
        if ((ret = PIOc_init_async(MPI_COMM_WORLD, NUM_IO_TASKS, NULL, COMPONENT_COUNT,
                                   num_procs2, NULL, NULL, NULL, PIO_REARR_BOX, &iosysid)))
            ERR(ret);


        /* The rest of the code executes on computation tasks only. As
         * PIO functions are called on the computation tasks, the
         * async system will call them on the IO task. When the
         * computation tasks call PIO_finalize(), the IO task will get
         * a message to shut itself down. */
        if (comp_task)
        {
            /* PIO_Offset elements_per_pe; /\* Array elements per processing unit. *\/ */
            /* int ioid;     /\* The I/O description ID. *\/ */
            
            /* /\* How many elements on each computation task? *\/ */
            /* elements_per_pe = DIM_LEN_X * DIM_LEN_Y / NUM_COMP_TASKS; */

            /* /\* Allocate and initialize array of decomposition mapping. *\/ */
            /* PIO_Offset compdof[elements_per_pe]; */
            /* for (int i = 0; i < elements_per_pe; i++) */
            /*     compdof[i] = my_rank * elements_per_pe + i; */

            /* /\* Create the PIO decomposition for this example. Since */
            /*    this is a variable with an unlimited dimension, we want */
            /*    to create a 2-D composition which represents one */
            /*    record. *\/ */
            /* printf("rank: %d Creating decomposition...\n", my_rank); */
            /* if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe, */
            /*                             compdof, &ioid, 0, NULL, NULL))) */
            /*     ERR(ret); */

/*         /\* The number of favors may change with the build parameters. *\/ */
/* #ifdef _PNETCDF */
/*         format[num_flavors++] = PIO_IOTYPE_PNETCDF; */
/* #endif */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF; */
/* #ifdef _NETCDF4 */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF4C; */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF4P; */
/* #endif */

/* 	/\* Use PIO to create the example file in each of the four */
/* 	 * available ways. *\/ */
/* 	for (int fmt = 0; fmt < num_flavors; fmt++) */
/* 	{ */
/*             /\* Create a filename. *\/ */
/*             sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]); */

/* 	    /\* Create the netCDF output file. *\/ */
/*             printf("rank: %d Creating sample file %s with format %d...\n", */
/*                    my_rank, filename, format[fmt]); */
/* 	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER))) */
/* 		ERR(ret); */

/* 	    /\* Define netCDF dimension and variable. *\/ */
/*             printf("rank: %d Defining netCDF metadata...\n", my_rank); */
/*             for (int d = 0; d < NDIM3; d++) */
/*                 if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d]))) */
/*                     ERR(ret); */
/* 	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid))) */
/* 	        ERR(ret); */
/* 	    if ((ret = PIOc_enddef(ncid))) */
/* 	        ERR(ret); */

/* 	    /\* Allocate storage for sample data. *\/ */
/*             int buffer[elements_per_pe]; */

/*             /\* Write each timestep. *\/ */
/*             for (int t = 0; t < NUM_TIMESTEPS; t++) */
/*             { */
/*                 /\* Create some data for this timestep. *\/ */
/*                 for (int i = 0; i < elements_per_pe; i++) */
/*                     buffer[i] = 100 * t + START_DATA_VAL + my_rank; */
                
/*                 /\* Write data to the file. *\/ */
/*                 printf("rank: %d Writing sample data...\n", my_rank); */
/*                 if ((ret = PIOc_setframe(ncid, varid, t))) */
/*                     ERR(ret); */
/*                 if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL))) */
/*                     ERR(ret); */
/*             } */

/*             /\* THis will cause all data to be written to disk. *\/ */
/*             if ((ret = PIOc_sync(ncid))) */
/* 	        ERR(ret); */

/* 	    /\* Close the netCDF file. *\/ */
/*             printf("rank: %d Closing the sample data file...\n", my_rank); */
/* 	    if ((ret = PIOc_closefile(ncid))) */
/* 		ERR(ret); */

/*             /\* Check the output file. *\/ */
/*             /\* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, *\/ */
/*             /\*                       my_rank, ioid))) *\/ */
/*             /\*     ERR(ret); *\/ */
/* 	} */

            /* Free the PIO decomposition. */
            /* printf("rank: %d Freeing PIO decomposition...\n", my_rank); */
            /* if ((ret = PIOc_freedecomp(iosysid, ioid))) */
            /*     ERR(ret); */

            /* Finalize the IO system. Only call this from the computation tasks. */
            printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME);
            if ((ret = PIOc_finalize(iosysid)))
                ERR(ret);
        } /* endif comp_task */

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif

        printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }