Exemplo n.º 1
0
/** @brief Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
    int main(int argc, char* argv[])
    {
	/** Set to non-zero to get output to stdout. */
	int verbose = 0;

	/** Zero-based rank of processor. */
	int my_rank;

	/** Number of processors involved in current execution. */
	int ntasks;

	/** Number of processors that will do IO. In this example we
	 * will do IO from all processors. */
	int niotasks;

	/** Stride in the mpi rank between io tasks. Always 1 in this
	 * example. */
	int stride = 1;

	/** Number of the aggregator? Always 0 in this example. */
	int numAggregator = 0;

	/** */
	int optBase = 1;

	/** Specifies the flavor of netCDF output format. */
	int iotype;

	/** The dimension ID. */
	int pioDimId;

	/** */
	PIO_Offset ista;

	/** */
	PIO_Offset isto;

	/** Array index per processing unit. This is the number of
	 * elements of the data array that will be handled by each
	 * processor. In this example there are 16 data elements. If the
	 * example is run on 4 processors, then arrIdxPerPe will be 4. */
	PIO_Offset arrIdxPerPe;

	/* Length of the dimension in data. */
	int dimLen[1];

	/** The ID for the parallel I/O system. It is set by
	 * PIOc_Init_Intracomm(). It references an internal structure
	 * containing the general IO subsystem data and MPI structure. */
	int pio_io_system;

	/** The ncid of the netCDF file created in this example. */
	int pioFileDesc;

	/** The ID of the netCDF varable in the example file. */
	int pioVar;

	/** The I/O description ID as passed back by PIOc_InitDecomp(). */
	int iodescNCells;

	/** A buffer for sample data. */
	int *dataBuffer;

	/** A buffer for reading data back from the file. */
	int *readBuffer;

	/** A 1-D array which holds the decomposition mapping for this
	 * example. */
	PIO_Offset *compdof;

	/** The example file name. */
	char file_name[] = EXAMPLE_FILENAME;     

	/** Used for command line processing. */
	int c;

	/* Parse command line. */
	while ((c = getopt(argc, argv, "v")) != -1)
	    switch (c)
	    {
	    case 'v':
		verbose++;
		break;
	    default:
		break;
	    }

#ifdef TIMING    
	/* Initialize the GPTL timing library. */
	int ret;
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif    
    
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);
	/* Check that a valid number of processors was specified. */
	if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	      ntasks == 8 || ntasks == 16))
	    fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
	if (verbose)
	    printf("%d: ParallelIO Library example1 running on %d processors.\n",
		   my_rank, ntasks);

	/* Initialize the ParallelIO library IO system. */
	iotype = PIO_IOTYPE_NETCDF;

	/* keep things simple - 1 iotask per MPI process */    
	niotasks = ntasks; 

	/* Initialize the IO system. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, stride, optBase,
				       PIO_REARR_SUBSET, &pio_io_system)))
	    ERR(ret);

	/* Finalize the IO system. */
	if ((ret = PIOc_finalize(pio_io_system)))
	    ERR(ret);

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING    
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif    

	return 0;
    }
Exemplo n.º 2
0
/** @brief Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
    int main(int argc, char* argv[])
    {
	/** Set to non-zero to get output to stdout. */
	int verbose = 0;

	/** Zero-based rank of processor. */
	int my_rank;

	/** Number of processors involved in current execution. */
	int ntasks;

	/** Different output flavors. The example file is written (and
	 * then read) four times. The first two flavors,
	 * parallel-netcdf, and netCDF serial, both produce a netCDF
	 * classic format file (but with different libraries). The
	 * last two produce netCDF4/HDF5 format files, written with
	 * and without using netCDF-4 parallel I/O. */
	int format[NUM_NETCDF_FLAVORS];

	/** Number of processors that will do IO. In this example we
	 * will do IO from all processors. */
	int niotasks;

	/** Stride in the mpi rank between io tasks. Always 1 in this
	 * example. */
	int ioproc_stride = 1;

	/** Zero based rank of first processor to be used for I/O. */
	int ioproc_start = 0;

	/** The dimension ID. */
	int dimid;

	/** Array index per processing unit. This is the number of
	 * elements of the data array that will be handled by each
	 * processor. In this example there are 16 data elements. If the
	 * example is run on 4 processors, then arrIdxPerPe will be 4. */
	PIO_Offset elements_per_pe;

	/* Length of the dimensions in the data. This simple example
	 * uses one-dimensional data. The lenght along that dimension
	 * is DIM_LEN (16). */
	int dim_len[1] = {DIM_LEN};

	/** The ID for the parallel I/O system. It is set by
	 * PIOc_Init_Intracomm(). It references an internal structure
	 * containing the general IO subsystem data and MPI
	 * structure. It is passed to PIOc_finalize() to free
	 * associated resources, after all I/O, but before
	 * MPI_Finalize is called. */
	int iosysid;

	/** The ncid of the netCDF file created in this example. */
	int ncid;

	/** The ID of the netCDF varable in the example file. */
	int varid;

	/** The I/O description ID as passed back by PIOc_InitDecomp()
	 * and freed in PIOc_freedecomp(). */
	int ioid;

	/** A buffer for sample data.  The size of this array will
	 * vary depending on how many processors are involved in the
	 * execution of the example code. It's length will be the same
	 * as elements_per_pe.*/
	int *buffer;

	/** A 1-D array which holds the decomposition mapping for this
	 * example. The size of this array will vary depending on how
	 * many processors are involved in the execution of the
	 * example code. It's length will be the same as
	 * elements_per_pe. */
	PIO_Offset *compdof;

        /** Test filename. */
        char filename[NC_MAX_NAME + 1];

        /** The number of netCDF flavors available in this build. */
        int num_flavors = 0;
            
	/** Used for command line processing. */
	int c;

	/** Return value. */
	int ret;

	/* Parse command line. */
	while ((c = getopt(argc, argv, "v")) != -1)
	    switch (c)
	    {
	    case 'v':
		verbose++;
		break;
	    default:
		break;
	    }

#ifdef TIMING    
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif    
    
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
	if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	      ntasks == 8 || ntasks == 16))
	    fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
	if (verbose)
	    printf("%d: ParallelIO Library example1 running on %d processors.\n",
		   my_rank, ntasks);

	/* keep things simple - 1 iotask per MPI process */    
	niotasks = ntasks;

        /* Turn on logging if available. */
        /* PIOc_set_log_level(4); */

        /* Change error handling to return errors. */
        if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
            return ret;
        
	/* Initialize the PIO IO system. This specifies how
	 * many and which processors are involved in I/O. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				       ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	    ERR(ret);

	/* Describe the decomposition. This is a 1-based array, so add 1! */
	elements_per_pe = DIM_LEN / ntasks;
	if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	    return PIO_ENOMEM;
	for (int i = 0; i < elements_per_pe; i++)
	    compdof[i] = my_rank * elements_per_pe + i + 1;
	
	/* Create the PIO decomposition for this example. */
	if (verbose)
	    printf("rank: %d Creating decomposition...\n", my_rank);
	if ((ret = PIOc_InitDecomp(iosysid, PIO_INT, NDIM, dim_len, (PIO_Offset)elements_per_pe,
				   compdof, &ioid, NULL, NULL, NULL)))
	    ERR(ret);
	free(compdof);

        /* The number of favors may change with the build parameters. */
#ifdef _PNETCDF
        format[num_flavors++] = PIO_IOTYPE_PNETCDF;
#endif
        format[num_flavors++] = PIO_IOTYPE_NETCDF;
#ifdef _NETCDF4
        format[num_flavors++] = PIO_IOTYPE_NETCDF4C;
        format[num_flavors++] = PIO_IOTYPE_NETCDF4P;
#endif
	
	/* Use PIO to create the example file in each of the four
	 * available ways. */
	for (int fmt = 0; fmt < num_flavors; fmt++) 
	{
            /* Create a filename. */
            sprintf(filename, "example1_%d.nc", fmt);
            
	    /* Create the netCDF output file. */
	    if (verbose)
		printf("rank: %d Creating sample file %s with format %d...\n",
		       my_rank, filename, format[fmt]);
	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename,
				       PIO_CLOBBER)))
		ERR(ret);
	
	    /* Define netCDF dimension and variable. */
	    if (verbose)
		printf("rank: %d Defining netCDF metadata...\n", my_rank);
	    if ((ret = PIOc_def_dim(ncid, DIM_NAME, (PIO_Offset)dim_len[0], &dimid)))
		ERR(ret);
	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, &dimid, &varid)))
		ERR(ret);
	    if ((ret = PIOc_enddef(ncid)))
		ERR(ret);
	
	    /* Prepare sample data. */
	    if (!(buffer = malloc(elements_per_pe * sizeof(int))))
	        return PIO_ENOMEM;
	    for (int i = 0; i < elements_per_pe; i++)
	        buffer[i] = START_DATA_VAL + my_rank;

	    /* Write data to the file. */
	    if (verbose)
	        printf("rank: %d Writing sample data...\n", my_rank);
	    if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
	    			     buffer, NULL)))
	        ERR(ret);
	    if ((ret = PIOc_sync(ncid)))
	        ERR(ret);

	    /* Free buffer space used in this example. */
	    free(buffer);
	
	    /* Close the netCDF file. */
	    if (verbose)
		printf("rank: %d Closing the sample data file...\n", my_rank);
	    if ((ret = PIOc_closefile(ncid)))
		ERR(ret);
	}
	
	/* Free the PIO decomposition. */
	if (verbose)
	    printf("rank: %d Freeing PIO decomposition...\n", my_rank);
	if ((ret = PIOc_freedecomp(iosysid, ioid)))
	    ERR(ret);
	
	/* Finalize the IO system. */
	if (verbose)
	    printf("rank: %d Freeing PIO resources...\n", my_rank);
	if ((ret = PIOc_finalize(iosysid)))
	    ERR(ret);

	/* Check the output file. */
	if (!my_rank)
	    for (int fmt = 0; fmt < num_flavors; fmt++)
            {
                sprintf(filename, "example1_%d.nc", fmt);
		if ((ret = check_file(ntasks, filename)))
		    ERR(ret);
            }

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING    
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif    

	if (verbose)
	    printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }
Exemplo n.º 3
0
/** Run async tests. */
int
main(int argc, char **argv)
{
    int my_rank; /* Zero-based rank of processor. */
    int ntasks; /* Number of processors involved in current execution. */
    int iosysid[COMPONENT_COUNT]; /* The ID for the parallel I/O system. */
    int flv; /* Index for loop of PIO netcdf flavors. */
    int ret; /* Return code. */

    int flavor[NUM_FLAVORS] = {PIO_IOTYPE_PNETCDF, PIO_IOTYPE_NETCDF,
			       PIO_IOTYPE_NETCDF4C, PIO_IOTYPE_NETCDF4P};

    /* Num procs for IO and computation. */
    int num_procs[NUM_COMBOS][COMPONENT_COUNT + 1] = {{3, 1}, {2, 2}, {1, 3}};

    /* Number of processors that will do IO. */
    int num_io_procs[NUM_COMBOS] = {3, 2, 1};

    /* Initialize test. */
    if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS)))
	ERR(ERR_INIT);
    
    for (int combo = 0; combo < NUM_COMBOS; combo++)
    {
	/* Is the current process a computation task? */
	int comp_task = my_rank < num_io_procs[combo] ? 0 : 1;
	
	/* Initialize the IO system. */
	if ((ret = PIOc_Init_Async(MPI_COMM_WORLD, num_io_procs[combo], NULL, COMPONENT_COUNT,
				   num_procs[combo], NULL, iosysid)))
	    ERR(ERR_INIT);
	
	for (int c = 0; c < COMPONENT_COUNT; c++)
	    printf("%d iosysid[%d] = %d\n", my_rank, c, iosysid[c]);
	
	/* All the netCDF calls are only executed on the computation
	 * tasks. The IO tasks have not returned from PIOc_Init_Intercomm,
	 * and when the do, they should go straight to finalize. */
	if (comp_task)
	{
	    for (int flv = 0; flv < NUM_FLAVORS; flv++)
	    {
		char filename[NC_MAX_NAME + 1]; /* Test filename. */
		int my_comp_idx = 0; /* Index in iosysid array. */
		
		for (int sample = 0; sample < NUM_SAMPLES; sample++)
		{
		    /* Create a filename. */
		    sprintf(filename, "%s_%s_%d_%d.nc", TEST_NAME, flavor_name(flv), sample, my_comp_idx);
		    
		    /* Create sample file. */
		    printf("%d %s creating file %s\n", my_rank, TEST_NAME, filename);
		    if ((ret = create_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank)))
			ERR(ret);
		    
		    /* Check the file for correctness. */
		    if ((ret = check_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank)))
			ERR(ret);
		}
	    } /* next netcdf flavor */
	    
	    /* Finalize the IO system. Only call this from the computation tasks. */
	    printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME);
	    for (int c = 0; c < COMPONENT_COUNT; c++)
	    {
		if ((ret = PIOc_finalize(iosysid[c])))
		    ERR(ret);
		printf("%d %s PIOc_finalize completed for iosysid = %d\n", my_rank, TEST_NAME,
		       iosysid[c]);
	    }
	} /* endif comp_task */

	/* Wait for everyone to catch up. */
	printf("%d %s waiting for all processes!\n", my_rank, TEST_NAME);
	MPI_Barrier(MPI_COMM_WORLD);
    } /* next combo */

    /* Finalize test. */
    printf("%d %s finalizing...\n", my_rank, TEST_NAME);
    if ((ret = pio_test_finalize()))
	ERR(ERR_AWFUL);

    printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);

    return 0;
}
Exemplo n.º 4
0
/* Run Tests for Init_Intercomm. */
int main(int argc, char **argv)
{
    /* Zero-based rank of processor. */
    int my_rank;

    /* Number of processors involved in current execution. */
    int ntasks;

    int num_flavors; /* Number of PIO netCDF flavors in this build. */
    int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */

    /* Names for the output files. */
    char filename[NUM_FLAVORS][NC_MAX_NAME + 1];

    /* The ID for the parallel I/O system. */
    int iosysid[COMPONENT_COUNT];

    /* Return code. */
    int ret;

    MPI_Comm test_comm;

    char too_long_name[PIO_MAX_NAME * 5 + 1];

    /* Create a name that is too long. */
    memset(too_long_name, 74, PIO_MAX_NAME * 5);
    too_long_name[PIO_MAX_NAME * 5] = 0;

    /* Set up test. */
    if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS,
                              -1, &test_comm)))
        ERR(ERR_INIT);

    /* Figure out iotypes. */
    if ((ret = get_iotypes(&num_flavors, flavor)))
        ERR(ret);

    if (my_rank < TARGET_NTASKS)
    {

        /* How many processors will be used for our IO and 2 computation components. */
        int num_procs[COMPONENT_COUNT] = {2};

        /* Is the current process a computation task? */
        int comp_task = my_rank < 2 ? 0 : 1;

        /* Index of computation task in iosysid array. Varies by rank and
         * does not apply to IO component processes. */
        int my_comp_idx = comp_task ? 0 : -1;

        /* Initialize the IO system. */
        if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT,
                                   num_procs, NULL, NULL, NULL, PIO_REARR_BOX, iosysid)))
            ERR(ERR_AWFUL);

        /* All the netCDF calls are only executed on the computation
         * tasks. The IO tasks have not returned from PIOc_Init_Intercomm,
         * and when the do, they should go straight to finalize. */
        if (comp_task)
        {
            for (int fmt = 0; fmt < num_flavors; fmt++)
            {
                int ncid, varid, dimid;
                PIO_Offset start[NDIM], count[NDIM] = {0};
                int data[DIM_LEN];

                /* Create the filename for this flavor. */
                sprintf(filename[fmt], "test_intercomm2_%d.nc", flavor[fmt]);

                /* Create a netCDF file with one dimension and one variable. */
                if ((ret = PIOc_createfile(iosysid[my_comp_idx], &ncid, &flavor[fmt], filename[fmt],
                                           NC_CLOBBER)))
                    ERR(ret);

                /* End define mode, then re-enter it. */
                if ((ret = PIOc_enddef(ncid)))
                    ERR(ret);
                if ((ret = PIOc_redef(ncid)))
                    ERR(ret);

                /* Test the inq_format function. */
                int myformat;
                if (PIOc_inq_format(ncid + TEST_VAL_42, &myformat) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if ((ret = PIOc_inq_format(ncid, &myformat)))
                    ERR(ret);
                if ((flavor[fmt] == PIO_IOTYPE_PNETCDF || flavor[fmt] == PIO_IOTYPE_NETCDF) &&
                    myformat != 1)
                    ERR(ERR_AWFUL);
                else if ((flavor[fmt] == PIO_IOTYPE_NETCDF4C || flavor[fmt] == PIO_IOTYPE_NETCDF4P) &&
                         myformat != 3)
                    ERR(ERR_AWFUL);

                /* Test the inq_type function for atomic types. */
                char type_name[NC_MAX_NAME + 1];
                PIO_Offset type_size;
                nc_type xtype[NUM_TYPES] = {NC_CHAR, NC_BYTE, NC_SHORT, NC_INT, NC_FLOAT, NC_DOUBLE,
                                            NC_UBYTE, NC_USHORT, NC_UINT, NC_INT64, NC_UINT64};
                int type_len[NUM_TYPES] = {1, 1, 2, 4, 4, 8, 1, 2, 4, 8, 8};
                int max_type = flavor[fmt] == PIO_IOTYPE_NETCDF ? NC_DOUBLE : NC_UINT64;

                /* This should not work. */
                if (PIOc_inq_type(ncid + TEST_VAL_42, xtype[0], type_name, &type_size) != PIO_EBADID)
                    ERR(ERR_WRONG);

                /* These should work. */
                for (int i = 0; i < max_type; i++)
                {
                    if ((ret = PIOc_inq_type(ncid, xtype[i], type_name, &type_size)))
                        ERR(ret);
                    if (type_size != type_len[i])
                        ERR(ERR_AWFUL);
                }

                /* Define a dimension. */
                char dimname2[NC_MAX_NAME + 1];
                if ((ret = PIOc_def_dim(ncid, FIRST_DIM_NAME, DIM_LEN, &dimid)))
                    ERR(ret);
                if ((ret = PIOc_inq_dimname(ncid, 0, dimname2)))
                    ERR(ret);
                if (strcmp(dimname2, FIRST_DIM_NAME))
                    ERR(ERR_WRONG);
                if ((ret = PIOc_rename_dim(ncid, 0, DIM_NAME)))
                    ERR(ret);

                /* These should not work. */
                if (PIOc_rename_dim(ncid + TEST_VAL_42, 0, DIM_NAME) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_rename_dim(ncid, 0, NULL) != PIO_EINVAL)
                    ERR(ERR_WRONG);
                if (PIOc_rename_dim(ncid, 0, too_long_name) != PIO_EINVAL)
                    ERR(ERR_WRONG);

                /* Define a 1-D variable. */
                char varname2[NC_MAX_NAME + 1];
                if ((ret = PIOc_def_var(ncid, FIRST_VAR_NAME, NC_INT, NDIM, &dimid, &varid)))
                    ERR(ret);
                if ((ret = PIOc_inq_varname(ncid, 0, varname2)))
                    ERR(ret);
                if (strcmp(varname2, FIRST_VAR_NAME))
                    ERR(ERR_WRONG);
                if ((ret = PIOc_rename_var(ncid, 0, VAR_NAME)))
                    ERR(ret);

                /* These should not work. */
                if (PIOc_rename_var(ncid + TEST_VAL_42, 0, VAR_NAME) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_rename_var(ncid, 0, NULL) != PIO_EINVAL)
                    ERR(ERR_WRONG);
                if (PIOc_rename_var(ncid, 0, too_long_name) != PIO_EINVAL)
                    ERR(ERR_WRONG);

                /* Add a global attribute. */
                int att_data = ATT_VALUE;
                short short_att_data = ATT_VALUE;
                float float_att_data = ATT_VALUE;
                double double_att_data = ATT_VALUE;
                char attname2[NC_MAX_NAME + 1];

                /* Write an att and rename it. */
                if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, FIRST_ATT_NAME, NC_INT, 1, &att_data)))
                    ERR(ret);
                if ((ret = PIOc_inq_attname(ncid, NC_GLOBAL, 0, attname2)))
                    ERR(ret);
                if (strcmp(attname2, FIRST_ATT_NAME))
                    ERR(ERR_WRONG);
                if ((ret = PIOc_rename_att(ncid, NC_GLOBAL, FIRST_ATT_NAME, ATT_NAME)))
                    ERR(ret);

                /* These should not work. */
                if (PIOc_inq_attname(ncid + TEST_VAL_42, NC_GLOBAL, 0, attname2) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_rename_att(ncid + TEST_VAL_42, NC_GLOBAL, FIRST_ATT_NAME, ATT_NAME) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_rename_att(ncid, NC_GLOBAL, FIRST_ATT_NAME, NULL) != PIO_EINVAL)
                    ERR(ERR_WRONG);
                if (PIOc_rename_att(ncid, NC_GLOBAL, FIRST_ATT_NAME, too_long_name) != PIO_EINVAL)
                    ERR(ERR_WRONG);
                if (PIOc_del_att(ncid + TEST_VAL_42, NC_GLOBAL, FIRST_ATT_NAME) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_del_att(ncid, NC_GLOBAL, NULL) != PIO_EINVAL)
                    ERR(ERR_WRONG);
                if (PIOc_del_att(ncid, NC_GLOBAL, too_long_name) != PIO_EINVAL)
                    ERR(ERR_WRONG);

                /* Write an att and delete it. */
                if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, FIRST_ATT_NAME, NC_INT, 1, &att_data)))
                    ERR(ret);
                if ((ret = PIOc_del_att(ncid, NC_GLOBAL, FIRST_ATT_NAME)))
                    ERR(ret);
                /* if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, FIRST_ATT_NAME, NULL, NULL)) != PIO_ENOTATT) */
                /* { */
                /*      printf("ret = %d\n", ret); */
                /*      ERR(ERR_AWFUL); */
                /* } */

                /* Write some atts of different types. */
                if ((ret = PIOc_put_att_short(ncid, NC_GLOBAL, SHORT_ATT_NAME, NC_SHORT, 1, &short_att_data)))
                    ERR(ret);
                if ((ret = PIOc_put_att_float(ncid, NC_GLOBAL, FLOAT_ATT_NAME, NC_FLOAT, 1, &float_att_data)))
                    ERR(ret);
                if ((ret = PIOc_put_att_double(ncid, NC_GLOBAL, DOUBLE_ATT_NAME, NC_DOUBLE, 1, &double_att_data)))
                    ERR(ret);

                /* Check some att types. */
                nc_type myatttype;
                if ((ret = PIOc_inq_atttype(ncid, NC_GLOBAL, SHORT_ATT_NAME, &myatttype)))
                    ERR(ret);
                if (myatttype != NC_SHORT)
                    ERR(ERR_WRONG);
                if ((ret = PIOc_inq_atttype(ncid, NC_GLOBAL, FLOAT_ATT_NAME, &myatttype)))
                    ERR(ret);
                if (myatttype != NC_FLOAT)
                    ERR(ERR_WRONG);
                if ((ret = PIOc_inq_atttype(ncid, NC_GLOBAL, DOUBLE_ATT_NAME, &myatttype)))
                    ERR(ret);
                if (myatttype != NC_DOUBLE)
                    ERR(ERR_WRONG);

                /* End define mode. */
                if ((ret = PIOc_enddef(ncid)))
                    ERR(ret);

                /* Write some data. For the PIOc_put/get functions, all
                 * data must be on compmaster before the function is
                 * called. Only compmaster's arguments are passed to the
                 * async msg handler. All other computation tasks are
                 * ignored. */
                for (int i = 0; i < DIM_LEN; i++)
                    data[i] = i;
                start[0] = 0;
                count[0] = DIM_LEN;
                if ((ret = PIOc_put_vars_tc(ncid, varid, start, count, NULL, NC_INT, data)))
                    ERR(ret);

                /* Close the file. */
                if ((ret = PIOc_closefile(ncid)))
                    ERR(ret);

                /* Check the file for correctness. */
                if ((ret = check_file(iosysid[my_comp_idx], flavor[fmt], filename[fmt], my_rank)))
                    ERR(ret);

                /* Now delete the file. */
                /* if ((ret = PIOc_deletefile(iosysid, filename[fmt]))) */
                /*      ERR(ret); */
                /* if ((ret = PIOc_openfile(iosysid, &ncid, &flavor[fmt], filename[fmt], */
                /*                           NC_NOWRITE)) != PIO_ENFILE) */
                /*      ERR(ERR_AWFUL); */

            } /* next netcdf flavor */

            /* Finalize the IO system. Only call this from the computation tasks. */
            if ((ret = PIOc_finalize(iosysid[my_comp_idx])))
                ERR(ret);
        }
    } /* my_rank < TARGET_NTASKS */

    /* Finalize test. */
    if ((ret = pio_test_finalize(&test_comm)))
        return ERR_AWFUL;

    printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);

    return 0;
}
Exemplo n.º 5
0
/** Run Tests for NetCDF-4 Functions.
 *
 * @param argc argument count
 * @param argv array of arguments
 */
int
main(int argc, char **argv)
{
    int verbose = 1;
    
    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** Different output flavors. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_nc4_pnetcdf.nc",
							  "test_nc4_classic.nc",
							  "test_nc4_serial4.nc",
							  "test_nc4_parallel4.nc"};
	
    /** Number of processors that will do IO. In this test we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * test. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this test. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. */
    int iosysid;

    /** The ncid of the netCDF file. */
    int ncid = 0;

    /** The ID of the netCDF varable. */
    int varid;

    /** Storage of netCDF-4 files (contiguous vs. chunked). */
    int storage;

    /** Chunksizes set in the file. */
    PIO_Offset my_chunksize[NDIM];
    
    /** The shuffle filter setting in the netCDF-4 test file. */
    int shuffle;
    
    /** Non-zero if deflate set for the variable in the netCDF-4 test file. */
    int deflate;

    /** The deflate level set for the variable in the netCDF-4 test file. */
    int deflate_level;

    /** Endianness of variable. */
    int endianness;

    /* Size of the var chunk cache. */
    PIO_Offset var_cache_size;

    /* Number of elements in var cache. */
    PIO_Offset var_cache_nelems;

    /* Var cache preemption. */    
    float var_cache_preemption;
    
    /** The I/O description ID. */
    int ioid;

    /** A buffer for sample data. */
    float *buffer;

    /** A buffer for reading data back from the file. */
    int *read_buffer;

    /** The decomposition mapping. */
    PIO_Offset *compdof;

    /** Return code. */
    int ret;

    /** Index for loops. */
    int fmt, d, d1, i;

    /** For setting the chunk cache. */
    PIO_Offset chunk_cache_size = 1024*1024;
    PIO_Offset chunk_cache_nelems = 1024;
    float chunk_cache_preemption = 0.5;

    /* For reading the chunk cache. */
    PIO_Offset chunk_cache_size_in;
    PIO_Offset chunk_cache_nelems_in;
    float chunk_cache_preemption_in;
    
    char varname[15];
    
#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library test_nc4 running on %d processors.\n",
	       my_rank, ntasks);

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this test. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* How many flavors will we be running for? */
    int num_flavors = 0;
    int fmtidx = 0;
#ifdef _PNETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_PNETCDF;
#endif
#ifdef _NETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_NETCDF;
#endif
#ifdef _NETCDF4
    num_flavors += 2;
    format[fmtidx++] = PIO_IOTYPE_NETCDF4C;
    format[fmtidx] = PIO_IOTYPE_NETCDF4P;
#endif
    
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (fmt = 0; fmt < num_flavors; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	if (verbose)
	    printf("rank: %d Setting chunk cache for file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);

	/* Try to set the chunk cache with invalid preemption to check error handling. */
	chunk_cache_preemption = 50.0;
	ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size,
				   chunk_cache_nelems, chunk_cache_preemption);
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (ret != NC_EINVAL)
		ERR(ERR_AWFUL);
	}
	else
	{
	    if (ret != NC_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Try to set the chunk cache. */
	chunk_cache_preemption = 0.5;
	ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size,
				   chunk_cache_nelems, chunk_cache_preemption);

	/* Should only have worked for netCDF-4 iotypes. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (ret != PIO_NOERR)
		ERR(ret);
	}
	else
	{
	    if (ret != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Now check the chunk cache. */
	ret = PIOc_get_chunk_cache(iosysid, format[fmt], &chunk_cache_size_in,
				   &chunk_cache_nelems_in, &chunk_cache_preemption_in);

	/* Should only have worked for netCDF-4 iotypes. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    /* Check that there was no error. */
	    if (ret != PIO_NOERR)
		ERR(ret);

	    /* Check that we got the correct values. */
	    if (chunk_cache_size_in != chunk_cache_size || chunk_cache_nelems_in != chunk_cache_nelems ||
		chunk_cache_preemption_in != chunk_cache_preemption)
		ERR(ERR_AWFUL);
	}
	else
	{
	    if (ret != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);

	/* Set error handling. */
	PIOc_Set_File_Error_Handling(ncid, PIO_BCAST_ERROR);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if (verbose)
	    printf("rank: %d Defining netCDF variable %s, ndims %d\n", my_rank, VAR_NAME, NDIM);
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);

	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (verbose)
		printf("rank: %d Defining chunksizes\n", my_rank);
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);

	    /** Check that the inq_varname function works. */
	    if (verbose)
	    	printf("rank: %d Checking varname\n", my_rank);
	    ret = PIOc_inq_varname(ncid, 0, varname);
	    printf("rank: %d ret: %d varname: %s\n", my_rank, ret, varname);
	    
	    /** Check that the inq_var_chunking function works. */
	    if (verbose)
		printf("rank: %d Checking chunksizes\n");
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)))
	    	ERR(ret);
	    if (verbose)
	    {
		printf("rank: %d ret: %d storage: %d\n", my_rank, ret, storage);
		for (d1 = 0; d1 < NDIM; d1++)
		{
		    printf("chunksize[%d]=%d\n", d1, my_chunksize[d1]);
		}
	    }
	    
	    /** Check the answers. */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C ||
		format[fmt] == PIO_IOTYPE_NETCDF4P)
	    {
		if (storage != NC_CHUNKED)
		    ERR(ERR_AWFUL);
		for (d1 = 0; d1 < NDIM; d1++)
		    if (my_chunksize[d1] != chunksize[d1])
		    	ERR(ERR_AWFUL);
	    }

	    /* Check that the inq_var_deflate functions works. */
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level)))
	    	ERR(ret);

	    /** For serial netCDF-4 deflate is turned on by default */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C)
		if (shuffle || !deflate || deflate_level != 1)
		    ERR(ERR_AWFUL);

	    /* For parallel netCDF-4, no compression available. :-( */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4P)
		if (shuffle || deflate)
		    ERR(ERR_AWFUL);

	    /* Check setting the chunk cache for the variable. */
	    printf("rank: %d PIOc_set_var_chunk_cache...\n", my_rank);
	    if ((ret = PIOc_set_var_chunk_cache(ncid, 0, VAR_CACHE_SIZE, VAR_CACHE_NELEMS,
						VAR_CACHE_PREEMPTION)))
	    	ERR(ret);

	    /* Check getting the chunk cache values for the variable. */
	    printf("rank: %d PIOc_get_var_chunk_cache...\n", my_rank);	    
	    if ((ret = PIOc_get_var_chunk_cache(ncid, 0, &var_cache_size, &var_cache_nelems,
						&var_cache_preemption)))
	    	ERR(ret);
	    PIO_Offset len;
	    if ((ret = PIOc_inq_dimlen(ncid, 0, &len)))
	    	ERR(ret);

	    /* Check that we got expected values. */
	    printf("rank: %d var_cache_size = %d\n", my_rank, var_cache_size);	    
	    if (var_cache_size != VAR_CACHE_SIZE)
		ERR(ERR_AWFUL);
	    if (var_cache_nelems != VAR_CACHE_NELEMS)
		ERR(ERR_AWFUL);
	    if (var_cache_preemption != VAR_CACHE_PREEMPTION)
		ERR(ERR_AWFUL);
	} else {
	    /* Trying to set or inq netCDF-4 settings for non-netCDF-4
	     * files results in the PIO_ENOTNC4 error. */
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level))
		!= PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_def_var_endian(ncid, 0, 1)) != PIO_ENOTNC4)
		ERR(ret);
	    if ((ret = PIOc_inq_var_endian(ncid, 0, &endianness)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_set_var_chunk_cache(ncid, 0, VAR_CACHE_SIZE, VAR_CACHE_NELEMS,
						VAR_CACHE_PREEMPTION)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_get_var_chunk_cache(ncid, 0, &var_cache_size, &var_cache_nelems,
						&var_cache_preemption)) != PIO_ENOTNC4)
		ERR(ret);
	    if ((ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size, chunk_cache_nelems,
	    				    chunk_cache_preemption)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_get_chunk_cache(iosysid, format[fmt], &chunk_cache_size,
	    				    &chunk_cache_nelems, &chunk_cache_preemption)) != PIO_ENOTNC4)
	    	ERR(ret);
	}	    
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);
    }
	
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    
    

    return 0;
}
Exemplo n.º 6
0
/* Run async tests. */
int main(int argc, char **argv)
{
    int my_rank; /* Zero-based rank of processor. */
    int ntasks; /* Number of processors involved in current execution. */
    int iosysid_world; /* The ID for the parallel I/O system. */
    int even_iosysid; /* The ID for iosystem of even_comm. */
    int overlap_iosysid; /* The ID for iosystem of even_comm. */
    MPI_Group world_group; /* An MPI group of world. */
    MPI_Group even_group; /* An MPI group of 0 and 2. */
    MPI_Group overlap_group; /* An MPI group of 0, 1, and 3. */
    MPI_Comm even_comm = MPI_COMM_NULL; /* Communicator for tasks 0, 2 */
    MPI_Comm overlap_comm = MPI_COMM_NULL; /* Communicator for tasks 0, 1, 2. */
    int even_rank = -1, overlap_rank = -1; /* Tasks rank in communicator. */
    int even_size = 0, overlap_size = 0; /* Size of communicator. */
    int num_flavors; /* Number of PIO netCDF flavors in this build. */
    int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
    MPI_Comm test_comm;
    int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET};
    int ret; /* Return code. */

    /* Initialize test. */
    if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS,
                              -1, &test_comm)))
        ERR(ERR_INIT);

    /* Test code runs on TARGET_NTASKS tasks. The left over tasks do
     * nothing. */
    if (my_rank < TARGET_NTASKS)
    {
        /* Figure out iotypes. */
        if ((ret = get_iotypes(&num_flavors, flavor)))
            ERR(ret);

        /* Test with both rearrangers. */
        for (int r = 0; r < NUM_REARRANGERS; r++)
        {
            /* Initialize PIO system on world. */
            if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO4, STRIDE1, BASE0, rearranger[r],
                                           &iosysid_world)))
                ERR(ret);

            /* Set the error handler. */
            if ((ret = PIOc_set_iosystem_error_handling(iosysid_world, PIO_BCAST_ERROR, NULL)))
                ERR(ret);

            /* Get MPI_Group of world comm. */
            if ((ret = MPI_Comm_group(test_comm, &world_group)))
                ERR(ret);

            /* Create a group with tasks 0 and 2. */
            int even_ranges[EVEN_NUM_RANGES][3] = {{0, 2, 2}};
            if ((ret = MPI_Group_range_incl(world_group, EVEN_NUM_RANGES, even_ranges,
                                            &even_group)))
                ERR(ret);

            /* Create a communicator from the even_group. */
            if ((ret = MPI_Comm_create(test_comm, even_group, &even_comm)))
                ERR(ret);

            /* Learn my rank and the total number of processors in even group. */
            if (even_comm != MPI_COMM_NULL)
            {
                if ((ret = MPI_Comm_rank(even_comm, &even_rank)))
                    MPIERR(ret);
                if ((ret = MPI_Comm_size(even_comm, &even_size)))
                    MPIERR(ret);
            }

            /* Create a group with tasks 0, 1, and 3. */
            int overlap_ranges[OVERLAP_NUM_RANGES][3] = {{0, 0, 1}, {1, 3, 2}};
            if ((ret = MPI_Group_range_incl(world_group, OVERLAP_NUM_RANGES, overlap_ranges,
                                            &overlap_group)))
                ERR(ret);

            /* Create a communicator from the overlap_group. */
            if ((ret = MPI_Comm_create(test_comm, overlap_group, &overlap_comm)))
                ERR(ret);

            /* Learn my rank and the total number of processors in overlap
             * group. */
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = MPI_Comm_rank(overlap_comm, &overlap_rank)))
                    MPIERR(ret);
                if ((ret = MPI_Comm_size(overlap_comm, &overlap_size)))
                    MPIERR(ret);
            }

            /* Initialize PIO system for even. */
            if (even_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_Init_Intracomm(even_comm, NUM_IO1, STRIDE1, BASE1, rearranger[r],
                                               &even_iosysid)))
                    ERR(ret);

                /* These should not work. */
                if (PIOc_set_hint(even_iosysid + TEST_VAL_42, NULL, NULL) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_set_hint(even_iosysid, NULL, NULL) != PIO_EINVAL)
                    ERR(ERR_WRONG);

                /* Set the hint (which will be ignored). */
                if ((ret = PIOc_set_hint(even_iosysid, "hint", "hint_value")))
                    ERR(ret);

                /* Set the error handler. */
                /*PIOc_Set_IOSystem_Error_Handling(even_iosysid, PIO_BCAST_ERROR);*/
                if ((ret = PIOc_set_iosystem_error_handling(even_iosysid, PIO_BCAST_ERROR, NULL)))
                    ERR(ret);
            }

            /* Initialize PIO system for overlap comm. */
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_Init_Intracomm(overlap_comm, NUM_IO2, STRIDE1, BASE1, rearranger[r],
                                               &overlap_iosysid)))
                    ERR(ret);

                /* Set the error handler. */
                PIOc_Set_IOSystem_Error_Handling(overlap_iosysid, PIO_BCAST_ERROR);
            }

            for (int i = 0; i < num_flavors; i++)
            {
                char fname0[PIO_MAX_NAME + 1];
                char fname1[PIO_MAX_NAME + 1];
                char fname2[PIO_MAX_NAME + 1];

                sprintf(fname0, "%s_file_0_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname0, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                sprintf(fname1, "%s_file_1_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname1, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                sprintf(fname2, "%s_file_2_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname2, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                /* Now check the first file from WORLD communicator. */
                int ncid;
                if ((ret = open_and_check_file(test_comm, iosysid_world, flavor[i], &ncid, fname0,
                                               ATTNAME, DIMNAME, 1, my_rank)))
                    ERR(ret);

                /* Now have the even communicators check the files. */
                int ncid2;
                if (even_comm != MPI_COMM_NULL)
                {
                    if ((ret = open_and_check_file(even_comm, even_iosysid, flavor[i], &ncid2,
                                                   fname2, ATTNAME, DIMNAME, 1, my_rank)))
                        ERR(ret);
                    if ((ret = check_file(even_comm, even_iosysid, flavor[i], ncid2, fname2,
                                          ATTNAME, DIMNAME, my_rank)))
                        ERR(ret);
                }

                /* Now have the overlap communicators check the files. */
                int ncid3;
                if (overlap_comm != MPI_COMM_NULL)
                {
                    if ((ret = open_and_check_file(overlap_comm, overlap_iosysid, flavor[i],
                                                   &ncid3, fname1, ATTNAME, DIMNAME, 1, my_rank)))
                        ERR(ret);
                    if ((ret = check_file(overlap_comm, overlap_iosysid, flavor[i], ncid3, fname1,
                                          ATTNAME, DIMNAME, my_rank)))
                        ERR(ret);
                }

                /* Close the still-open files. */
                if (even_comm != MPI_COMM_NULL)
                    if ((ret = PIOc_closefile(ncid2)))
                        ERR(ret);
                if (overlap_comm != MPI_COMM_NULL)
                    if ((ret = PIOc_closefile(ncid3)))
                        ERR(ret);
                if ((ret = PIOc_closefile(ncid)))
                    ERR(ret);

            } /* next iotype */
        
            /* Finalize PIO systems. */
            if (even_comm != MPI_COMM_NULL)
                if ((ret = PIOc_finalize(even_iosysid)))
                    ERR(ret);
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_finalize(overlap_iosysid)))
                    ERR(ret);
            }
            if ((ret = PIOc_finalize(iosysid_world)))
                ERR(ret);

            /* Free MPI resources used by test. */
            if ((ret = MPI_Group_free(&overlap_group)))
                ERR(ret);
            if ((ret = MPI_Group_free(&even_group)))
                ERR(ret);
            if ((ret = MPI_Group_free(&world_group)))
                ERR(ret);
            if (overlap_comm != MPI_COMM_NULL)
                if ((ret = MPI_Comm_free(&overlap_comm)))
                    ERR(ret);
            if (even_comm != MPI_COMM_NULL)
                if ((ret = MPI_Comm_free(&even_comm)))
                    ERR(ret);
        } /* next rearranger */
    } /* my_rank < TARGET_NTASKS */

    /* Finalize test. */
    if ((ret = pio_test_finalize(&test_comm)))
        return ERR_AWFUL;

    printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);

    return 0;
}
Exemplo n.º 7
0
/* Run tests for darray functions. */
int main(int argc, char **argv)
{
    int my_rank;
    int ntasks;
    int num_flavors; /* Number of PIO netCDF flavors in this build. */
    int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
    MPI_Comm test_comm; /* A communicator for this test. */
    int ret;         /* Return code. */

    /* Initialize test. */
    if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS,
                              MIN_NTASKS, -1, &test_comm)))
        ERR(ERR_INIT);

    if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
        return ret;

    /* Only do something on max_ntasks tasks. */
    if (my_rank < TARGET_NTASKS)
    {
        int iosysid;  /* The ID for the parallel I/O system. */
        int ioproc_stride = 1;    /* Stride in the mpi rank between io tasks. */
        int ioproc_start = 0;     /* Zero based rank of first processor to be used for I/O. */
        int wioid, rioid;
        int maplen = MAPLEN;
        MPI_Offset wcompmap[MAPLEN];
        MPI_Offset rcompmap[MAPLEN];
        int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_BOX, PIO_REARR_SUBSET};

        /* Data we will write for each type. */
        signed char byte_data[MAPLEN];
        char char_data[MAPLEN];
        short short_data[MAPLEN];
        int int_data[MAPLEN];
        float float_data[MAPLEN];
        double double_data[MAPLEN];
#ifdef _NETCDF4
        unsigned char ubyte_data[MAPLEN];
        unsigned short ushort_data[MAPLEN];
        unsigned int uint_data[MAPLEN];
        long long int64_data[MAPLEN];
        unsigned long long uint64_data[MAPLEN];
#endif /* _NETCDF4 */

        /* Expected results for each type. */
        signed char byte_expected[MAPLEN];
        char char_expected[MAPLEN];
        short short_expected[MAPLEN];
        int int_expected[MAPLEN];
        float float_expected[MAPLEN];
        double double_expected[MAPLEN];
#ifdef _NETCDF4
        unsigned char ubyte_expected[MAPLEN];
        unsigned short ushort_expected[MAPLEN];
        unsigned int uint_expected[MAPLEN];
        long long int64_expected[MAPLEN];
        unsigned long long uint64_expected[MAPLEN];
#endif /* _NETCDF4 */

        /* Custom fill value for each type. */
        signed char byte_fill = -2;
        char char_fill = 2;
        short short_fill = -2;
        int int_fill = -2;
        float float_fill = -2;
        double double_fill = -2;
#ifdef _NETCDF4
        unsigned char ubyte_fill = 2;
        unsigned short ushort_fill = 2;
        unsigned int uint_fill = 2;
        long long int64_fill = 2;
        unsigned long long uint64_fill = 2;
#endif /* _NETCDF4 */

        /* Default fill value for each type. */
        signed char byte_default_fill = NC_FILL_BYTE;
        char char_default_fill = NC_FILL_CHAR;
        short short_default_fill = NC_FILL_SHORT;
        int int_default_fill = NC_FILL_INT;
        float float_default_fill = NC_FILL_FLOAT;
        double double_default_fill = NC_FILL_DOUBLE;
#ifdef _NETCDF4
        unsigned char ubyte_default_fill = NC_FILL_UBYTE;
        unsigned short ushort_default_fill = NC_FILL_USHORT;
        unsigned int uint_default_fill = NC_FILL_UINT;
        long long int64_default_fill = NC_FILL_INT64;
        unsigned long long uint64_default_fill = NC_FILL_UINT64;
#endif /* _NETCDF4 */

        int ret;      /* Return code. */

        /* Set up the compmaps. Don't forget these are 1-based
         * numbers, like in Fortran! */
        for (int i = 0; i < MAPLEN; i++)
        {
            wcompmap[i] = i % 2 ? my_rank * MAPLEN + i + 1 : 0; /* Even values missing. */
            rcompmap[i] = my_rank * MAPLEN + i + 1;
        }

        /* Figure out iotypes. */
        if ((ret = get_iotypes(&num_flavors, flavor)))
            ERR(ret);

        /* Test for each rearranger. */
        for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++)
        {
            /* Initialize the PIO IO system. This specifies how
             * many and which processors are involved in I/O. */
            if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO_PROCS, ioproc_stride, ioproc_start,
                                           rearranger[r], &iosysid)))
                return ret;

            /* Test with and without custom fill values. */
            for (int fv = 0; fv < NUM_TEST_CASES_FILLVALUE; fv++)
            {
#ifndef _NETCDF4
#define NUM_TYPES 6
                int test_type[NUM_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE};
#else
#define NUM_TYPES 11
                int test_type[NUM_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE,
                                            PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64};
                
#endif /* _NETCDF4 */

                /* Determine what data to write. Put value of 42 into
                 * array elements that will not get written. Due to
                 * the decomposition, these will be replaced by fill
                 * values. */
                for (int i = 0; i < MAPLEN; i++)
                {
                    byte_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    char_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    short_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    int_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    float_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    double_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
#ifdef _NETCDF4
                    ubyte_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    ushort_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    uint_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    int64_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
                    uint64_data[i] = i % 2 ? my_rank * MAPLEN + i + 1 : TEST_VAL_42;
#endif /* _NETCDF4 */
                }

                /* Determine what data to expect from the test. For
                 * even values of i, the fill value will be used, and
                 * it may be custom or default fill value. */
                for (int i = 0; i < MAPLEN; i++)
                {
                    byte_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? byte_default_fill : byte_fill);
                    char_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? char_default_fill : char_fill);
                    short_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? short_default_fill : short_fill);
                    int_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? int_default_fill : int_fill);
                    float_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? float_default_fill : float_fill);
                    double_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? double_default_fill : double_fill);
#ifdef _NETCDF4
                    ubyte_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? ubyte_default_fill : ubyte_fill);
                    ushort_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? ushort_default_fill : ushort_fill);
                    uint_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? uint_default_fill : uint_fill);
                    int64_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? int64_default_fill : int64_fill);
                    uint64_expected[i] = i % 2 ? my_rank * MAPLEN + i + 1 : (fv ? uint64_default_fill : uint64_fill);
#endif /* _NETCDF4 */
                }

                /* Test for each available type. */
                for (int t = 0; t < NUM_TYPES; t++)
                {
                    void *expected;
                    void *fill;
                    void *data;
                    int ncid, dimid, varid;
                    char filename[NC_MAX_NAME + 1];

                    switch (test_type[t])
                    {
                    case PIO_BYTE:
                        expected = byte_expected;
                        fill = fv ? &byte_default_fill : &byte_fill;
                        data = byte_data;
                        break;
                    case PIO_CHAR:
                        expected = char_expected;
                        fill = fv ? &char_default_fill : &char_fill;
                        data = char_data;
                        break;
                    case PIO_SHORT:
                        expected = short_expected;
                        fill = fv ? &short_default_fill : &short_fill;
                        data = short_data;
                        break;
                    case PIO_INT:
                        expected = int_expected;
                        fill = fv ? &int_default_fill : &int_fill;
                        data = int_data;
                        break;
                    case PIO_FLOAT:
                        expected = float_expected;
                        fill = fv ? &float_default_fill : &float_fill;
                        data = float_data;
                        break;
                    case PIO_DOUBLE:
                        expected = double_expected;
                        fill = fv ? &double_default_fill : &double_fill;
                        data = double_data;
                        break;
#ifdef _NETCDF4
                    case PIO_UBYTE:
                        expected = ubyte_expected;
                        fill = fv ? &ubyte_default_fill : &ubyte_fill;
                        data = ubyte_data;
                        break;
                    case PIO_USHORT:
                        expected = ushort_expected;
                        fill = fv ? &ushort_default_fill : &ushort_fill;
                        data = ushort_data;
                        break;
                    case PIO_UINT:
                        expected = uint_expected;
                        fill = fv ? &uint_default_fill : &uint_fill;
                        data = uint_data;
                        break;
                    case PIO_INT64:
                        expected = int64_expected;
                        fill = fv ? &int64_default_fill : &int64_fill;
                        data = int64_data;
                        break;
                    case PIO_UINT64:
                        expected = uint64_expected;
                        fill = fv ? &uint64_default_fill : &uint64_fill;
                        data = uint64_data;
                        break;
#endif /* _NETCDF4 */
                    default:
                        return ERR_AWFUL;
                    }

                    /* Initialize decompositions. */
                    if ((ret = PIOc_InitDecomp(iosysid, test_type[t], NDIM1, dim_len, maplen, wcompmap,
                                               &wioid, &rearranger[r], NULL, NULL)))
                        return ret;
                    if ((ret = PIOc_InitDecomp(iosysid, test_type[t], NDIM1, dim_len, maplen, rcompmap,
                                               &rioid, &rearranger[r], NULL, NULL)))
                        return ret;

                    /* Create the test file in each of the available iotypes. */
                    for (int fmt = 0; fmt < num_flavors; fmt++)
                    {
                        PIO_Offset type_size;
                        void *data_in;

                        /* Byte type doesn't work with pnetcdf. */
                        if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (test_type[t] == PIO_BYTE || test_type[t] == PIO_CHAR))
                            continue;

                        /* NetCDF-4 types only work with netCDF-4 formats. */
                        if (test_type[t] > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C &&
                            flavor[fmt] != PIO_IOTYPE_NETCDF4P)
                            continue;

                        /* Put together filename. */
                        sprintf(filename, "%s_iotype_%d_rearr_%d_type_%d.nc", TEST_NAME, flavor[fmt],
                                rearranger[r], test_type[t]);

                        /* Create file. */
                        if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, NC_CLOBBER)))
                            return ret;

                        /* Define metadata. */
                        if ((ret = PIOc_def_dim(ncid, DIM_NAME, dim_len[0], &dimid)))
                            return ret;
                        if ((ret = PIOc_def_var(ncid, VAR_NAME, test_type[t], NDIM1, &dimid, &varid)))
                            return ret;
                        if ((ret = PIOc_put_att(ncid, varid, FILL_VALUE_NAME, test_type[t],
                                                1, fill)))
                            return ret;
                        if ((ret = PIOc_enddef(ncid)))
                            return ret;

                        /* Write some data. */
                        if ((ret = PIOc_write_darray(ncid, varid, wioid, MAPLEN, data, fill)))
                            return ret;
                        if ((ret = PIOc_sync(ncid)))
                            return ret;

                        /* What is size of type? */
                        if ((ret = PIOc_inq_type(ncid, test_type[t], NULL, &type_size)))
                            return ret;

                        /* Allocate space to read data into. */
                        if (!(data_in = malloc(type_size * MAPLEN)))
                            return PIO_ENOMEM;

                        /* Read the data. */
                        if ((ret = PIOc_read_darray(ncid, varid, rioid, MAPLEN, data_in)))
                            return ret;

                        /* Check results. */
                        if (memcmp(data_in, expected, type_size * MAPLEN))
                            return ERR_AWFUL;

                        /* Release storage. */
                        free(data_in);

                        /* Close file. */
                        if ((ret = PIOc_closefile(ncid)))
                            return ret;
                    } /* next iotype */

                    /* Free decompositions. */
                    if ((ret = PIOc_freedecomp(iosysid, wioid)))
                        return ret;
                    if ((ret = PIOc_freedecomp(iosysid, rioid)))
                        return ret;

                } /* next type */
            } /* next fill value test case */
        } /* next rearranger */

        /* Finalize PIO system. */
        if ((ret = PIOc_finalize(iosysid)))
            return ret;

    } /* endif my_rank < TARGET_NTASKS */

    /* Finalize the MPI library. */
    if ((ret = pio_test_finalize(&test_comm)))
        return ret;

    printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);
    return 0;
}
Exemplo n.º 8
0
/* Write, then read, a simple example with darrays.

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump):

    <pre>
netcdf darray_no_async_iotype_1 {
dimensions:
	unlimted = UNLIMITED ; // (2 currently)
	x = 4 ;
	y = 4 ;
variables:
	int foo(unlimted, x, y) ;
data:

 foo =
  42, 42, 42, 42,
  43, 43, 43, 43,
  44, 44, 44, 44,
  45, 45, 45, 45,
  142, 142, 142, 142,
  143, 143, 143, 143,
  144, 144, 144, 144,
  145, 145, 145, 145 ;
}
    </pre>

*/
    int main(int argc, char* argv[])
    {
	int my_rank;  /* Zero-based rank of processor. */
	int ntasks;   /* Number of processors involved in current execution. */
	int ioproc_stride = 1;	    /* Stride in the mpi rank between io tasks. */
	int ioproc_start = 0; 	    /* Rank of first task to be used for I/O. */
        PIO_Offset elements_per_pe; /* Array elements per processing unit. */
	int iosysid;  /* The ID for the parallel I/O system. */	
	int ncid;     /* The ncid of the netCDF file. */
	int dimid[NDIM3];    /* The dimension ID. */
	int varid;    /* The ID of the netCDF varable. */
	int ioid;     /* The I/O description ID. */
        char filename[NC_MAX_NAME + 1]; /* Test filename. */
        int num_flavors = 0;            /* Number of iotypes available in this build. */
	int format[NUM_NETCDF_FLAVORS]; /* Different output flavors. */
	int ret;                        /* Return value. */

#ifdef TIMING
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif

	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
	if (ntasks != TARGET_NTASKS)
	    fprintf(stderr, "Number of processors must be 4!\n");
        printf("%d: ParallelIO Library darray_no_async example running on %d processors.\n",
               my_rank, ntasks);

        /* Turn on logging. */
        if ((ret = PIOc_set_log_level(LOG_LEVEL)))
            return ret;
        
	/* Initialize the PIO IO system. This specifies how many and
	 * which processors are involved in I/O. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, 1, ioproc_stride,
				       ioproc_start, PIO_REARR_BOX, &iosysid)))
	    ERR(ret);

	/* Describe the decomposition. */
	elements_per_pe = DIM_LEN_X * DIM_LEN_Y / TARGET_NTASKS;

        /* Allocate and initialize array of decomposition mapping. */
	PIO_Offset compdof[elements_per_pe];
	for (int i = 0; i < elements_per_pe; i++)
	    compdof[i] = my_rank * elements_per_pe + i;

	/* Create the PIO decomposition for this example. Since this
         * is a variable with an unlimited dimension, we want to
         * create a 2-D composition which represents one record. */
        printf("rank: %d Creating decomposition...\n", my_rank);
	if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe,
				   compdof, &ioid, 0, NULL, NULL)))
	    ERR(ret);

        /* The number of favors may change with the build parameters. */
#ifdef _PNETCDF
        format[num_flavors++] = PIO_IOTYPE_PNETCDF;
#endif
        format[num_flavors++] = PIO_IOTYPE_NETCDF;
#ifdef _NETCDF4
        format[num_flavors++] = PIO_IOTYPE_NETCDF4C;
        format[num_flavors++] = PIO_IOTYPE_NETCDF4P;
#endif

	/* Use PIO to create the example file in each of the four
	 * available ways. */
	for (int fmt = 0; fmt < num_flavors; fmt++)
	{
            /* Create a filename. */
            sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]);

	    /* Create the netCDF output file. */
            printf("rank: %d Creating sample file %s with format %d...\n",
                   my_rank, filename, format[fmt]);
	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER)))
		ERR(ret);

	    /* Define netCDF dimension and variable. */
            printf("rank: %d Defining netCDF metadata...\n", my_rank);
            for (int d = 0; d < NDIM3; d++)
                if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d])))
                    ERR(ret);
	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid)))
	        ERR(ret);
	    if ((ret = PIOc_enddef(ncid)))
	        ERR(ret);

	    /* Allocate storage for sample data. */
            int buffer[elements_per_pe];

            /* Write each timestep. */
            for (int t = 0; t < NUM_TIMESTEPS; t++)
            {
                /* Create some data for this timestep. */
                for (int i = 0; i < elements_per_pe; i++)
                    buffer[i] = 100 * t + START_DATA_VAL + my_rank;
                
                /* Write data to the file. */
                printf("rank: %d Writing sample data...\n", my_rank);
                if ((ret = PIOc_setframe(ncid, varid, t)))
                    ERR(ret);
                if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL)))
                    ERR(ret);
            }

            /* THis will cause all data to be written to disk. */
            if ((ret = PIOc_sync(ncid)))
	        ERR(ret);

	    /* Close the netCDF file. */
            printf("rank: %d Closing the sample data file...\n", my_rank);
	    if ((ret = PIOc_closefile(ncid)))
		ERR(ret);

            /* Check the output file. */
            /* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, */
            /*                       my_rank, ioid))) */
            /*     ERR(ret); */
	}

	/* Free the PIO decomposition. */
        printf("rank: %d Freeing PIO decomposition...\n", my_rank);
	if ((ret = PIOc_freedecomp(iosysid, ioid)))
	    ERR(ret);

	/* Finalize the IO system. */
        printf("rank: %d Freeing PIO resources...\n", my_rank);
	if ((ret = PIOc_finalize(iosysid)))
	    ERR(ret);

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif

        printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }
Exemplo n.º 9
0
/* Run async tests. */
int main(int argc, char **argv)
{
    int my_rank; /* Zero-based rank of processor. */
    int ntasks; /* Number of processors involved in current execution. */
    int iosysid[COMPONENT_COUNT]; /* The ID for the parallel I/O system. */
    int num_flavors; /* Number of PIO netCDF flavors in this build. */
    int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
    int ret; /* Return code. */
    MPI_Comm test_comm;

    /* Num procs for computation. */
    int num_procs2[NUM_COMBOS][COMPONENT_COUNT] = {{1}, {2}, {3}};

    /* Number of processors that will do IO. */
    int num_io_procs[NUM_COMBOS] = {3, 2, 1};

    /* Initialize test. */
    if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS,
                              -1, &test_comm)))
        ERR(ERR_INIT);

    /* Test code runs on TARGET_NTASKS tasks. The left over tasks do
     * nothing. */
    if (my_rank < TARGET_NTASKS)
    {
        /* Figure out iotypes. */
        if ((ret = get_iotypes(&num_flavors, flavor)))
            ERR(ret);

        for (int combo = 0; combo < NUM_COMBOS; combo++)
        {
            /* Is the current process a computation task? */
            int comp_task = my_rank < num_io_procs[combo] ? 0 : 1;

            /* Initialize the IO system. */
            if ((ret = PIOc_init_async(test_comm, num_io_procs[combo], NULL, COMPONENT_COUNT,
                                       num_procs2[combo], NULL, NULL, NULL, PIO_REARR_BOX, iosysid)))
                ERR(ERR_INIT);

            /* All the netCDF calls are only executed on the computation
             * tasks. The IO tasks have not returned from PIOc_Init_Intercomm,
             * and when the do, they should go straight to finalize. */
            if (comp_task)
            {
                for (int flv = 0; flv < num_flavors; flv++)
                {
                    char filename[NC_MAX_NAME + 1]; /* Test filename. */
                    int my_comp_idx = 0; /* Index in iosysid array. */

                    for (int sample = 0; sample < NUM_SAMPLES; sample++)
                    {
                        char iotype_name[NC_MAX_NAME + 1];

                        /* Create a filename. */
                        if ((ret = get_iotype_name(flavor[flv], iotype_name)))
                            return ret;
                        sprintf(filename, "%s_%s_%d_%d.nc", TEST_NAME, iotype_name, sample, my_comp_idx);

                        /* Create sample file. */
                        if ((ret = create_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL)))
                            ERR(ret);

                        /* Check the file for correctness. */
                        if ((ret = check_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL)))
                            ERR(ret);
                    }
                } /* next netcdf flavor */

                /* Finalize the IO system. Only call this from the computation tasks. */
                for (int c = 0; c < COMPONENT_COUNT; c++)
                    if ((ret = PIOc_finalize(iosysid[c])))
                        ERR(ret);
            } /* endif comp_task */

            /* Wait for everyone to catch up. */
            MPI_Barrier(test_comm);
        } /* next combo */
    }/* my_rank < TARGET_NTASKS */

    /* Finalize test. */
    if ((ret = pio_test_finalize(&test_comm)))
        return ERR_AWFUL;

    printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);

    return 0;
}
Exemplo n.º 10
0
/* Write, then read, a simple example with darrays.

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump):

    <pre>
netcdf darray_no_async_iotype_1 {
dimensions:
	unlimted = UNLIMITED ; // (2 currently)
	x = 4 ;
	y = 4 ;
variables:
	int foo(unlimted, x, y) ;
data:

 foo =
  42, 42, 42, 42,
  43, 43, 43, 43,
  44, 44, 44, 44,
  45, 45, 45, 45,
  142, 142, 142, 142,
  143, 143, 143, 143,
  144, 144, 144, 144,
  145, 145, 145, 145 ;
}
    </pre>

*/
    int main(int argc, char* argv[])
    {
	int my_rank;  /* Zero-based rank of processor. */
	int ntasks;   /* Number of processors involved in current execution. */
        int iosysid; /* The ID for the parallel I/O system. */
	/* int ncid;     /\* The ncid of the netCDF file. *\/ */
	/* int dimid[NDIM3];    /\* The dimension ID. *\/ */
	/* int varid;    /\* The ID of the netCDF varable. *\/ */
        /* char filename[NC_MAX_NAME + 1]; /\* Test filename. *\/ */
        /* int num_flavors = 0;            /\* Number of iotypes available in this build. *\/ */
	/* int format[NUM_NETCDF_FLAVORS]; /\* Different output flavors. *\/ */
	int ret;                        /* Return value. */

#ifdef TIMING
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif
        
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
        printf("%d: ParallelIO Library darray_async example running on %d processors.\n",
               my_rank, ntasks);
	if (ntasks != TARGET_NTASKS)
        {
	    fprintf(stderr, "Number of processors must be %d!\n", TARGET_NTASKS);
            return ERR_BAD;
        }

        /* Turn on logging. */
        if ((ret = PIOc_set_log_level(LOG_LEVEL)))
            return ret;

        /* Num procs for computation. */
        int num_procs2[COMPONENT_COUNT] = {4};
        
        /* Is the current process a computation task? */
        int comp_task = my_rank < NUM_IO_TASKS ? 0 : 1;

        /* Initialize the IO system. */
        if ((ret = PIOc_init_async(MPI_COMM_WORLD, NUM_IO_TASKS, NULL, COMPONENT_COUNT,
                                   num_procs2, NULL, NULL, NULL, PIO_REARR_BOX, &iosysid)))
            ERR(ret);


        /* The rest of the code executes on computation tasks only. As
         * PIO functions are called on the computation tasks, the
         * async system will call them on the IO task. When the
         * computation tasks call PIO_finalize(), the IO task will get
         * a message to shut itself down. */
        if (comp_task)
        {
            /* PIO_Offset elements_per_pe; /\* Array elements per processing unit. *\/ */
            /* int ioid;     /\* The I/O description ID. *\/ */
            
            /* /\* How many elements on each computation task? *\/ */
            /* elements_per_pe = DIM_LEN_X * DIM_LEN_Y / NUM_COMP_TASKS; */

            /* /\* Allocate and initialize array of decomposition mapping. *\/ */
            /* PIO_Offset compdof[elements_per_pe]; */
            /* for (int i = 0; i < elements_per_pe; i++) */
            /*     compdof[i] = my_rank * elements_per_pe + i; */

            /* /\* Create the PIO decomposition for this example. Since */
            /*    this is a variable with an unlimited dimension, we want */
            /*    to create a 2-D composition which represents one */
            /*    record. *\/ */
            /* printf("rank: %d Creating decomposition...\n", my_rank); */
            /* if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe, */
            /*                             compdof, &ioid, 0, NULL, NULL))) */
            /*     ERR(ret); */

/*         /\* The number of favors may change with the build parameters. *\/ */
/* #ifdef _PNETCDF */
/*         format[num_flavors++] = PIO_IOTYPE_PNETCDF; */
/* #endif */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF; */
/* #ifdef _NETCDF4 */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF4C; */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF4P; */
/* #endif */

/* 	/\* Use PIO to create the example file in each of the four */
/* 	 * available ways. *\/ */
/* 	for (int fmt = 0; fmt < num_flavors; fmt++) */
/* 	{ */
/*             /\* Create a filename. *\/ */
/*             sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]); */

/* 	    /\* Create the netCDF output file. *\/ */
/*             printf("rank: %d Creating sample file %s with format %d...\n", */
/*                    my_rank, filename, format[fmt]); */
/* 	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER))) */
/* 		ERR(ret); */

/* 	    /\* Define netCDF dimension and variable. *\/ */
/*             printf("rank: %d Defining netCDF metadata...\n", my_rank); */
/*             for (int d = 0; d < NDIM3; d++) */
/*                 if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d]))) */
/*                     ERR(ret); */
/* 	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid))) */
/* 	        ERR(ret); */
/* 	    if ((ret = PIOc_enddef(ncid))) */
/* 	        ERR(ret); */

/* 	    /\* Allocate storage for sample data. *\/ */
/*             int buffer[elements_per_pe]; */

/*             /\* Write each timestep. *\/ */
/*             for (int t = 0; t < NUM_TIMESTEPS; t++) */
/*             { */
/*                 /\* Create some data for this timestep. *\/ */
/*                 for (int i = 0; i < elements_per_pe; i++) */
/*                     buffer[i] = 100 * t + START_DATA_VAL + my_rank; */
                
/*                 /\* Write data to the file. *\/ */
/*                 printf("rank: %d Writing sample data...\n", my_rank); */
/*                 if ((ret = PIOc_setframe(ncid, varid, t))) */
/*                     ERR(ret); */
/*                 if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL))) */
/*                     ERR(ret); */
/*             } */

/*             /\* THis will cause all data to be written to disk. *\/ */
/*             if ((ret = PIOc_sync(ncid))) */
/* 	        ERR(ret); */

/* 	    /\* Close the netCDF file. *\/ */
/*             printf("rank: %d Closing the sample data file...\n", my_rank); */
/* 	    if ((ret = PIOc_closefile(ncid))) */
/* 		ERR(ret); */

/*             /\* Check the output file. *\/ */
/*             /\* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, *\/ */
/*             /\*                       my_rank, ioid))) *\/ */
/*             /\*     ERR(ret); *\/ */
/* 	} */

            /* Free the PIO decomposition. */
            /* printf("rank: %d Freeing PIO decomposition...\n", my_rank); */
            /* if ((ret = PIOc_freedecomp(iosysid, ioid))) */
            /*     ERR(ret); */

            /* Finalize the IO system. Only call this from the computation tasks. */
            printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME);
            if ((ret = PIOc_finalize(iosysid)))
                ERR(ret);
        } /* endif comp_task */

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif

        printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }
Exemplo n.º 11
0
/** Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
int main(int argc, char* argv[])
{
    /** Set to non-zero to get output to stdout. */
    int verbose = 0;

    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Different output flavors. The example file is written (and
     * then read) four times. The first two flavors,
     * parallel-netcdf, and netCDF serial, both produce a netCDF
     * classic format file (but with different libraries). The
     * last two produce netCDF4/HDF5 format files, written with
     * and without using netCDF-4 parallel I/O. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. Two of them (pnetcdf and
     * classic) will be in classic netCDF format, the others
     * (serial4 and parallel4) will be in netCDF-4/HDF5
     * format. All four can be read by the netCDF library, and all
     * will contain the same contents. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"example2_pnetcdf.nc",
							  "example2_classic.nc",
							  "example2_serial4.nc",
							  "example2_parallel4.nc"};
	
    /** Number of processors that will do IO. In this example we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * example. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this example. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. This is the number of
     * elements of the data array that will be handled by each
     * processor. In this example there are 16 data elements. If the
     * example is run on 4 processors, then arrIdxPerPe will be 4. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. It is set by
     * PIOc_Init_Intracomm(). It references an internal structure
     * containing the general IO subsystem data and MPI
     * structure. It is passed to PIOc_finalize() to free
     * associated resources, after all I/O, but before
     * MPI_Finalize is called. */
    int iosysid;

    /** The ncid of the netCDF file created in this example. */
    int ncid = 0;

    /** The ID of the netCDF varable in the example file. */
    int varid;

    /** The I/O description ID as passed back by PIOc_InitDecomp()
     * and freed in PIOc_freedecomp(). */
    int ioid;

    /** A buffer for sample data.  The size of this array will
     * vary depending on how many processors are involved in the
     * execution of the example code. It's length will be the same
     * as elements_per_pe.*/
    float *buffer;

    /** A buffer for reading data back from the file. The size of
     * this array will vary depending on how many processors are
     * involved in the execution of the example code. It's length
     * will be the same as elements_per_pe.*/
    int *read_buffer;

    /** A 1-D array which holds the decomposition mapping for this
     * example. The size of this array will vary depending on how
     * many processors are involved in the execution of the
     * example code. It's length will be the same as
     * elements_per_pe. */
    PIO_Offset *compdof;

#ifdef HAVE_MPE	
    /** MPE event numbers used to track start and stop of
     * different parts of the program for later display with
     * Jumpshot. */
    int event_num[2][NUM_EVENTS];
#endif /* HAVE_MPE */

    /** Needed for command line processing. */
    int c;

    /* Parse command line. */
    while ((c = getopt(argc, argv, "v")) != -1)
	switch (c)
	{
	case 'v':
	    verbose++;
	    break;
	default:
	    break;
	}

#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    int ret;
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);
    if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library example1 running on %d processors.\n",
	       my_rank, ntasks);

#ifdef HAVE_MPE
    /* Initialize MPE logging. */
    if ((ret = MPE_Init_log()))
	ERR(ret);
    if (init_logging(my_rank, event_num))
	ERR(ERR_LOGGING);

    /* Log with MPE that we are starting INIT. */
    if ((ret = MPE_Log_event(event_num[START][INIT], 0, "start init")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (int i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this example. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */
	
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (int d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);
	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

#ifdef HAVE_MPE
	/* Log with MPE that we are done with CREATE. */
	if ((ret = MPE_Log_event(event_num[END][CREATE_PNETCDF + fmt], 0, "end create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Allocate space for sample data. */
	if (!(buffer = malloc(elements_per_pe * sizeof(float))))
	    return PIO_ENOMEM;

	/* Write data for each timestep. */
	for (int ts = 0; ts < NUM_TIMESTEPS; ts++) {

#ifdef HAVE_MPE
	    /* Log with MPE that we are starting CALCULATE. */
	    if ((ret = MPE_Log_event(event_num[START][CALCULATE], 0, "start calculate")))
		MPIERR(ret);
#endif /* HAVE_MPE */

	    /* Calculate sample data. Add some math function calls to make this slower. */
	    for (int i = 0; i < elements_per_pe; i++)
		if ((ret = calculate_value(my_rank, ts, &buffer[i])))
		    ERR(ret);

#ifdef HAVE_MPE
	    /* Log with MPE that we are done with CALCULATE. */
	    if ((ret = MPE_Log_event(event_num[END][CALCULATE], 0, "end calculate")))
		MPIERR(ret);
	    /* Log with MPE that we are starting WRITE. */
	    if ((ret = MPE_Log_event(event_num[START][WRITE], 0, "start write")))
		MPIERR(ret);
#endif /* HAVE_MPE */
		
	    /* Write data to the file. */
	    if (verbose)
		printf("rank: %d Writing sample data...\n", my_rank);

	    if ((ret = PIOc_setframe(ncid, varid, ts)))
		ERR(ret);
	    if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
					 buffer, NULL)))
		ERR(ret);
	    if ((ret = PIOc_sync(ncid)))
		ERR(ret);
#ifdef HAVE_MPE
	    /* Log with MPE that we are done with WRITE. */
	    if ((ret = MPE_Log_event(event_num[END][WRITE], 0, "end write")))
		MPIERR(ret);
#endif /* HAVE_MPE */
	}

#ifdef HAVE_MPE
	/* Log with MPE that we are starting CLOSE. */
	if ((ret = MPE_Log_event(event_num[START][CLOSE], 0, "start close")))
	    MPIERR(ret);
#endif /* HAVE_MPE */
		
	/* Free buffer space used in this example. */
	free(buffer);
	
	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);

#ifdef HAVE_MPE
	/* Log with MPE that we are done with CLOSE. */
	if ((ret = MPE_Log_event(event_num[END][CLOSE], 0, "end close")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* After each file is closed, make all processors wait so that
	 * all start creating the next file at the same time. */
	if ((ret = MPI_Barrier(MPI_COMM_WORLD)))
	    MPIERR(ret);
    }
	
#ifdef HAVE_MPE
    /* Log with MPE that we are starting FREE. */
    if ((ret = MPE_Log_event(event_num[START][FREE], 0, "start free")))
	MPIERR(ret);
#endif /* HAVE_MPE */
    
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with FREE. */
    if ((ret = MPE_Log_event(event_num[END][FREE], 0, "end free")))
	MPIERR(ret);
    /* Log with MPE that we are starting READ. */
    if ((ret = MPE_Log_event(event_num[START][READ], 0, "start read")))
	MPIERR(ret);
#endif /* HAVE_MPE */
    
    /* Check the output file. */
    /* if (!my_rank) */
    /*     for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++)  */
    /* 	if ((ret = check_file(ntasks, filename[fmt]))) */
    /* 	    ERR(ret); */

#ifdef HAVE_MPE
    /* Log with MPE that we are done with READ. */
    if ((ret = MPE_Log_event(event_num[END][READ], 0, "end read")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    

    if (verbose)
	printf("rank: %d SUCCESS!\n", my_rank);
    return 0;
}