Exemplo n.º 1
0
/** Check the dimension names.
 *
 * @param my_rank rank of process
 * @param ncid ncid of open netCDF file
 * 
 * @returns 0 for success, error code otherwise. */
int
check_dim_names(int my_rank, int ncid, int verbose)
{
    char dim_name[NC_MAX_NAME + 1];
    char zero_dim_name[NC_MAX_NAME + 1];
    int ret;

    for (int d = 0; d < NDIM; d++)
    {
	strcpy(dim_name, "11111111111111111111111111111111");
	if ((ret = PIOc_inq_dimname(ncid, d, dim_name)))
	    return ret;
	if (verbose)
	    printf("my_rank %d dim %d name %s\n", my_rank, d, dim_name);

	/* Did other ranks get the same name? */
	if (!my_rank)
	    strcpy(zero_dim_name, dim_name);
	/* if (verbose) */
	/*     printf("rank %d dim_name %s zero_dim_name %s\n", my_rank, dim_name, zero_dim_name); */
	if ((ret = MPI_Bcast(&zero_dim_name, strlen(dim_name) + 1, MPI_CHAR, 0,
				MPI_COMM_WORLD)))
	    MPIERR(ret);
	if (strcmp(dim_name, zero_dim_name))
	    return ERR_AWFUL;
    }
    return 0;
}
Exemplo n.º 2
0
/**
 * Run all the tests.
 *
 * @param iosysid the IO system ID.
 * @param num_flavors number of available iotypes in the build.
 * @param flavor pointer to array of the available iotypes.
 * @param my_rank rank of this task.
 * @param rearranger the rearranger to use (PIO_REARR_BOX or
 * PIO_REARR_SUBSET).
 * @param test_comm the communicator the test is running on.
 * @returns 0 for success, error code otherwise.
 */
int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank,
                    int rearranger, MPI_Comm test_comm)
{
    int ioid;
    int my_test_size;
    int ret; /* Return code. */

    if ((ret = MPI_Comm_size(test_comm, &my_test_size)))
        MPIERR(ret);

    /* Decompose the data over the tasks. */
    if ((ret = create_decomposition_3d(TARGET_NTASKS, my_rank, iosysid, &ioid)))
        return ret;

    /* Test decomposition read/write. */
    if ((ret = test_decomp_read_write(iosysid, ioid, num_flavors, flavor, my_rank,
                                      rearranger, test_comm)))
        return ret;

    /* Test with/without providing a fill value to PIOc_write_darray(). */
    for (int provide_fill = 0; provide_fill < NUM_TEST_CASES_FILLVALUE; provide_fill++)
    {
        /* Run a simple darray test. */
        if ((ret = test_darray(iosysid, ioid, num_flavors, flavor, my_rank, provide_fill)))
            return ret;
    }

    /* Free the PIO decomposition. */
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
        ERR(ret);

    return PIO_NOERR;
}
Exemplo n.º 3
0
/** Check the attribute name.
 *
 * @param my_rank rank of process
 * @param ncid ncid of open netCDF file
 * 
 * @returns 0 for success, error code otherwise. */
int
check_att_name(int my_rank, int ncid, int verbose)
{
    char att_name[NC_MAX_NAME + 1];
    char zero_att_name[NC_MAX_NAME + 1];
    int ret;

    strcpy(att_name, "11111111111111111111111111111111");
    if ((ret = PIOc_inq_attname(ncid, NC_GLOBAL, 0, att_name)))
	return ret;
    if (verbose)
	printf("my_rank %d att name %s\n", my_rank, att_name);

    /* Did everyone ranks get the same length name? */
/*    if (strlen(att_name) != strlen(ATT_NAME))
      return ERR_AWFUL;*/
    if (!my_rank)
    	strcpy(zero_att_name, att_name);
    if ((ret = MPI_Bcast(&zero_att_name, strlen(att_name) + 1, MPI_CHAR, 0,
    			 MPI_COMM_WORLD)))
    	MPIERR(ret);
    if (strcmp(att_name, zero_att_name))
    	return ERR_AWFUL;
    return 0;
}
Exemplo n.º 4
0
/** Check the variable name.
 *
 * @param my_rank rank of process
 * @param ncid ncid of open netCDF file
 * 
 * @returns 0 for success, error code otherwise. */
int
check_var_name(int my_rank, int ncid, int verbose)
{
    char var_name[NC_MAX_NAME + 1];
    char zero_var_name[NC_MAX_NAME + 1];
    int ret;

    strcpy(var_name, "11111111111111111111111111111111");
    if ((ret = PIOc_inq_varname(ncid, 0, var_name)))
	return ret;
    if (verbose)
	printf("my_rank %d var name %s\n", my_rank, var_name);

    /* Did other ranks get the same name? */
    if (!my_rank)
	strcpy(zero_var_name, var_name);
    if ((ret = MPI_Bcast(&zero_var_name, strlen(var_name) + 1, MPI_CHAR, 0,
			 MPI_COMM_WORLD)))
	MPIERR(ret);
    if (strcmp(var_name, zero_var_name))
	return ERR_AWFUL;
    return 0;
}
Exemplo n.º 5
0
int
main(int argc, char* argv[]) 
{
   int n, my_rank;
   int array_of_subsizes[NDIMS], array_of_starts[NDIMS], array_of_sizes[NDIMS];
   int size = 4;
   int sqrtn;
   int ln;
   MPI_Datatype filetype, memtype;
   MPI_File fh;
   char hdr[128];
   int header_bytes;
   unsigned char *cur;
   char name[128];
   int resultlen;
   int ret;
   int i, j;

   /* Initialize MPI. */
   MPI_Init(&argc, &argv);
   MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

   /* Learn my rank and the total number of processors. */
   MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
   MPI_Comm_size(MPI_COMM_WORLD, &n);

   /* Speak! */
   MPI_Get_processor_name(name, &resultlen);
   printf("process %d running on %s\n", my_rank, name);

   /* Set up our values. */
   sqrtn = (int)sqrt(n);
   ln = size/sqrtn;
   printf("n = %d, sqrtn = %d, ln = %d storage = %d\n", n, sqrtn, ln, (ln + 2) * (ln + 2));

   /* Allocation storage. */
   if (!(cur = calloc((ln + 2) * (ln + 2), 1)))
      return ERR;

   /* Initialize data. */
   for (i = 1; i < ln + 1; i++)
      for (j = 1; j < ln + 1; j++)
	 cur[i * (ln + 2) + j] = my_rank;

   /* Create a subarray type for the file. */
   array_of_sizes[0] = array_of_sizes[1] = size;
   array_of_subsizes[0] = array_of_subsizes[1] = ln;
   array_of_starts[0] = my_rank/sqrtn * ln;
   array_of_starts[1] = (my_rank % sqrtn) * ln;
   if ((ret = MPI_Type_create_subarray(NDIMS, array_of_sizes, array_of_subsizes, array_of_starts, MPI_ORDER_C, MPI_BYTE, &filetype)))
      MPIERR(ret);
   if ((ret = MPI_Type_commit(&filetype)))
      MPIERR(ret);

   /* Create a subarray type for memory. */
   array_of_sizes[0] = array_of_sizes[1] = ln + 2;
   array_of_subsizes[0] = array_of_subsizes[1] = ln;
   array_of_starts[0] = array_of_starts[1] = 1;
   if ((ret = MPI_Type_create_subarray(NDIMS, array_of_sizes, array_of_subsizes, array_of_starts, MPI_ORDER_C, MPI_BYTE, &memtype)))
      MPIERR(ret);
   if ((ret = MPI_Type_commit(&memtype)))
      MPIERR(ret);

   MPI_File_delete(FILE_NAME, MPI_INFO_NULL);
   if ((ret = MPI_File_open(MPI_COMM_WORLD, FILE_NAME, MPI_MODE_CREATE|MPI_MODE_RDWR, 
        MPI_INFO_NULL, &fh)))
      MPIERR(ret);

   /* Create header info, and have process 0 write it to the file. */
   sprintf(hdr, "P5\n%d %d\n255\n", size, size);
   header_bytes = strlen(hdr);
   if ((ret = MPI_File_write_all(fh, hdr, header_bytes, MPI_BYTE, MPI_STATUS_IGNORE)))
      MPIERR(ret);
	 
   /* Set the file view to translate our memory data into the file's data layout. */
   MPI_File_set_view(fh, header_bytes, MPI_BYTE, filetype, "native", MPI_INFO_NULL);

   /* Write the output. */
   MPI_File_write(fh, cur, 1, memtype, MPI_STATUS_IGNORE);

   if ((ret = MPI_File_close(&fh)))
      MPIERR(ret);

   MPI_Finalize();
   return 0;
}
Exemplo n.º 6
0
/** Run Tests for NetCDF-4 Functions.
 *
 * @param argc argument count
 * @param argv array of arguments
 */
int
main(int argc, char **argv)
{
    int verbose = 1;
    
    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Different output flavors. The example file is written (and
     * then read) four times. The first two flavors,
     * parallel-netcdf, and netCDF serial, both produce a netCDF
     * classic format file (but with different libraries). The
     * last two produce netCDF4/HDF5 format files, written with
     * and without using netCDF-4 parallel I/O. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. Two of them (pnetcdf and
     * classic) will be in classic netCDF format, the others
     * (serial4 and parallel4) will be in netCDF-4/HDF5
     * format. All four can be read by the netCDF library, and all
     * will contain the same contents. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_nc4_pnetcdf.nc",
							  "test_nc4_classic.nc",
							  "test_nc4_serial4.nc",
							  "test_nc4_parallel4.nc"};
	
    /** Number of processors that will do IO. In this example we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * example. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this example. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. This is the number of
     * elements of the data array that will be handled by each
     * processor. In this example there are 16 data elements. If the
     * example is run on 4 processors, then arrIdxPerPe will be 4. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. It is set by
     * PIOc_Init_Intracomm(). It references an internal structure
     * containing the general IO subsystem data and MPI
     * structure. It is passed to PIOc_finalize() to free
     * associated resources, after all I/O, but before
     * MPI_Finalize is called. */
    int iosysid;

    /** The ncid of the netCDF file created in this example. */
    int ncid = 0;

    /** The ID of the netCDF varable in the example file. */
    int varid;

    /** The I/O description ID as passed back by PIOc_InitDecomp()
     * and freed in PIOc_freedecomp(). */
    int ioid;

    /** A buffer for sample data.  The size of this array will
     * vary depending on how many processors are involved in the
     * execution of the example code. It's length will be the same
     * as elements_per_pe.*/
    float *buffer;

    /** A buffer for reading data back from the file. The size of
     * this array will vary depending on how many processors are
     * involved in the execution of the example code. It's length
     * will be the same as elements_per_pe.*/
    int *read_buffer;

    /** A 1-D array which holds the decomposition mapping for this
     * example. The size of this array will vary depending on how
     * many processors are involved in the execution of the
     * example code. It's length will be the same as
     * elements_per_pe. */
    PIO_Offset *compdof;

    /** Return code. */
    int ret;
    
#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);
    if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library example1 running on %d processors.\n",
	       my_rank, ntasks);

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (int i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this test. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */
	
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (int d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);

	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);

	    /** Check that the inq_var_chunking function works. */
	    int storage;
	    size_t my_chunksize[NDIM];
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)))
	    	ERR(ret);
	    
	    /** For serial netCDF-4, only processor rank 0 gets the answers. */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C && !my_rank ||
		format[fmt] == PIO_IOTYPE_NETCDF4P)
	    {
		if (storage != NC_CHUNKED)
		    ERR(ERR_AWFUL);
		for (int d = 0; d < NDIM; d++)
		    if (my_chunksize[d] != chunksize[d])
		    	ERR(ERR_AWFUL);
	    }

	    /* Check that the inv_var_deflate functions works. */
	    int shuffle;
	    int deflate;
	    int deflate_level;
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level)))
	    	ERR(ret);

	    /** For serial netCDF-4, only processor rank 0 gets the
	     * answers. Also deflate is turned on by default */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C && !my_rank)
		if (shuffle || !deflate || deflate_level != 1)
		    ERR(ERR_AWFUL);

	    /* For parallel netCDF, no compression available. :-( */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4P)
		if (shuffle || deflate)
		    ERR(ERR_AWFUL);

	} else {
	    /* Trying to set chunking for non-netCDF-4 files results
	     * in the PIO_ENOTNC4 error. */
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}	    
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);
    }
	
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    
    
    return 0;
}
Exemplo n.º 7
0
/** Run Tests for NetCDF-4 Functions.
 *
 * @param argc argument count
 * @param argv array of arguments
 */
int
main(int argc, char **argv)
{
    int verbose = 1;
    
    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** Different output flavors. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_names_pnetcdf.nc",
							  "test_names_classic.nc",
							  "test_names_serial4.nc",
							  "test_names_parallel4.nc"};
	
    /** Number of processors that will do IO. In this test we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * test. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this test. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. */
    int iosysid;

    /** The ncid of the netCDF file. */
    int ncid = 0;

    /** The ID of the netCDF varable. */
    int varid;

    /** Storage of netCDF-4 files (contiguous vs. chunked). */
    int storage;

    /** Chunksizes set in the file. */
    size_t my_chunksize[NDIM];
    
    /** The shuffle filter setting in the netCDF-4 test file. */
    int shuffle;
    
    /** Non-zero if deflate set for the variable in the netCDF-4 test file. */
    int deflate;

    /** The deflate level set for the variable in the netCDF-4 test file. */
    int deflate_level;

    /** Non-zero if fletcher32 filter is used for variable. */
    int fletcher32;

    /** Endianness of variable. */
    int endianness;

    /* Size of the file chunk cache. */
    size_t chunk_cache_size;

    /* Number of elements in file cache. */
    size_t nelems;

    /* File cache preemption. */
    float preemption;

    /* Size of the var chunk cache. */
    size_t var_cache_size;

    /* Number of elements in var cache. */
    size_t var_cache_nelems;

    /* Var cache preemption. */    
    float var_cache_preemption;
    
    /** The I/O description ID. */
    int ioid;

    /** A buffer for sample data. */
    float *buffer;

    /** A buffer for reading data back from the file. */
    int *read_buffer;

    /** The decomposition mapping. */
    PIO_Offset *compdof;

    /** Return code. */
    int ret;

    /** Index for loops. */
    int fmt, d, d1, i;
    
#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library example1 running on %d processors.\n",
	       my_rank, ntasks);

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this test. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

    /* How many flavors will we be running for? */
    int num_flavors = 0;
    int fmtidx = 0;
#ifdef _PNETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_PNETCDF;
#endif
#ifdef _NETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_NETCDF;
#endif
#ifdef _NETCDF4
    num_flavors += 2;
    format[fmtidx++] = PIO_IOTYPE_NETCDF4C;
    format[fmtidx] = PIO_IOTYPE_NETCDF4P;
#endif
    
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (fmt = 0; fmt < num_flavors; fmt++) 
    {
	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}

	/* Check the dimension names. */
	if ((ret = check_dim_names(my_rank, ncid, verbose)))
	    ERR(ret);

	/* Define a global attribute. */
	int att_val = 42;
	if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, ATT_NAME, NC_INT, 1, &att_val)))
	    ERR(ret);

	/* Check the attribute name. */
	if ((ret = check_att_name(my_rank, ncid, verbose)))
	    ERR(ret);

	/* Define a variable. */
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);

	/* Check the variable name. */
	if ((ret = check_var_name(my_rank, ncid, verbose)))
	    ERR(ret);

	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);

	/* Put a barrier here to make verbose output look better. */
	if ((ret = MPI_Barrier(MPI_COMM_WORLD)))
	    MPIERR(ret);

    }
	
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    
    

    return 0;
}
Exemplo n.º 8
0
/** @brief Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
    int main(int argc, char* argv[])
    {
	/** Set to non-zero to get output to stdout. */
	int verbose = 0;

	/** Zero-based rank of processor. */
	int my_rank;

	/** Number of processors involved in current execution. */
	int ntasks;

	/** Different output flavors. The example file is written (and
	 * then read) four times. The first two flavors,
	 * parallel-netcdf, and netCDF serial, both produce a netCDF
	 * classic format file (but with different libraries). The
	 * last two produce netCDF4/HDF5 format files, written with
	 * and without using netCDF-4 parallel I/O. */
	int format[NUM_NETCDF_FLAVORS];

	/** Number of processors that will do IO. In this example we
	 * will do IO from all processors. */
	int niotasks;

	/** Stride in the mpi rank between io tasks. Always 1 in this
	 * example. */
	int ioproc_stride = 1;

	/** Zero based rank of first processor to be used for I/O. */
	int ioproc_start = 0;

	/** The dimension ID. */
	int dimid;

	/** Array index per processing unit. This is the number of
	 * elements of the data array that will be handled by each
	 * processor. In this example there are 16 data elements. If the
	 * example is run on 4 processors, then arrIdxPerPe will be 4. */
	PIO_Offset elements_per_pe;

	/* Length of the dimensions in the data. This simple example
	 * uses one-dimensional data. The lenght along that dimension
	 * is DIM_LEN (16). */
	int dim_len[1] = {DIM_LEN};

	/** The ID for the parallel I/O system. It is set by
	 * PIOc_Init_Intracomm(). It references an internal structure
	 * containing the general IO subsystem data and MPI
	 * structure. It is passed to PIOc_finalize() to free
	 * associated resources, after all I/O, but before
	 * MPI_Finalize is called. */
	int iosysid;

	/** The ncid of the netCDF file created in this example. */
	int ncid;

	/** The ID of the netCDF varable in the example file. */
	int varid;

	/** The I/O description ID as passed back by PIOc_InitDecomp()
	 * and freed in PIOc_freedecomp(). */
	int ioid;

	/** A buffer for sample data.  The size of this array will
	 * vary depending on how many processors are involved in the
	 * execution of the example code. It's length will be the same
	 * as elements_per_pe.*/
	int *buffer;

	/** A 1-D array which holds the decomposition mapping for this
	 * example. The size of this array will vary depending on how
	 * many processors are involved in the execution of the
	 * example code. It's length will be the same as
	 * elements_per_pe. */
	PIO_Offset *compdof;

        /** Test filename. */
        char filename[NC_MAX_NAME + 1];

        /** The number of netCDF flavors available in this build. */
        int num_flavors = 0;
            
	/** Used for command line processing. */
	int c;

	/** Return value. */
	int ret;

	/* Parse command line. */
	while ((c = getopt(argc, argv, "v")) != -1)
	    switch (c)
	    {
	    case 'v':
		verbose++;
		break;
	    default:
		break;
	    }

#ifdef TIMING    
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif    
    
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
	if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	      ntasks == 8 || ntasks == 16))
	    fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
	if (verbose)
	    printf("%d: ParallelIO Library example1 running on %d processors.\n",
		   my_rank, ntasks);

	/* keep things simple - 1 iotask per MPI process */    
	niotasks = ntasks;

        /* Turn on logging if available. */
        /* PIOc_set_log_level(4); */

        /* Change error handling to return errors. */
        if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
            return ret;
        
	/* Initialize the PIO IO system. This specifies how
	 * many and which processors are involved in I/O. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				       ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	    ERR(ret);

	/* Describe the decomposition. This is a 1-based array, so add 1! */
	elements_per_pe = DIM_LEN / ntasks;
	if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	    return PIO_ENOMEM;
	for (int i = 0; i < elements_per_pe; i++)
	    compdof[i] = my_rank * elements_per_pe + i + 1;
	
	/* Create the PIO decomposition for this example. */
	if (verbose)
	    printf("rank: %d Creating decomposition...\n", my_rank);
	if ((ret = PIOc_InitDecomp(iosysid, PIO_INT, NDIM, dim_len, (PIO_Offset)elements_per_pe,
				   compdof, &ioid, NULL, NULL, NULL)))
	    ERR(ret);
	free(compdof);

        /* The number of favors may change with the build parameters. */
#ifdef _PNETCDF
        format[num_flavors++] = PIO_IOTYPE_PNETCDF;
#endif
        format[num_flavors++] = PIO_IOTYPE_NETCDF;
#ifdef _NETCDF4
        format[num_flavors++] = PIO_IOTYPE_NETCDF4C;
        format[num_flavors++] = PIO_IOTYPE_NETCDF4P;
#endif
	
	/* Use PIO to create the example file in each of the four
	 * available ways. */
	for (int fmt = 0; fmt < num_flavors; fmt++) 
	{
            /* Create a filename. */
            sprintf(filename, "example1_%d.nc", fmt);
            
	    /* Create the netCDF output file. */
	    if (verbose)
		printf("rank: %d Creating sample file %s with format %d...\n",
		       my_rank, filename, format[fmt]);
	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename,
				       PIO_CLOBBER)))
		ERR(ret);
	
	    /* Define netCDF dimension and variable. */
	    if (verbose)
		printf("rank: %d Defining netCDF metadata...\n", my_rank);
	    if ((ret = PIOc_def_dim(ncid, DIM_NAME, (PIO_Offset)dim_len[0], &dimid)))
		ERR(ret);
	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, &dimid, &varid)))
		ERR(ret);
	    if ((ret = PIOc_enddef(ncid)))
		ERR(ret);
	
	    /* Prepare sample data. */
	    if (!(buffer = malloc(elements_per_pe * sizeof(int))))
	        return PIO_ENOMEM;
	    for (int i = 0; i < elements_per_pe; i++)
	        buffer[i] = START_DATA_VAL + my_rank;

	    /* Write data to the file. */
	    if (verbose)
	        printf("rank: %d Writing sample data...\n", my_rank);
	    if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
	    			     buffer, NULL)))
	        ERR(ret);
	    if ((ret = PIOc_sync(ncid)))
	        ERR(ret);

	    /* Free buffer space used in this example. */
	    free(buffer);
	
	    /* Close the netCDF file. */
	    if (verbose)
		printf("rank: %d Closing the sample data file...\n", my_rank);
	    if ((ret = PIOc_closefile(ncid)))
		ERR(ret);
	}
	
	/* Free the PIO decomposition. */
	if (verbose)
	    printf("rank: %d Freeing PIO decomposition...\n", my_rank);
	if ((ret = PIOc_freedecomp(iosysid, ioid)))
	    ERR(ret);
	
	/* Finalize the IO system. */
	if (verbose)
	    printf("rank: %d Freeing PIO resources...\n", my_rank);
	if ((ret = PIOc_finalize(iosysid)))
	    ERR(ret);

	/* Check the output file. */
	if (!my_rank)
	    for (int fmt = 0; fmt < num_flavors; fmt++)
            {
                sprintf(filename, "example1_%d.nc", fmt);
		if ((ret = check_file(ntasks, filename)))
		    ERR(ret);
            }

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING    
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif    

	if (verbose)
	    printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }
Exemplo n.º 9
0
/** Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
int main(int argc, char* argv[])
{
    /** Set to non-zero to get output to stdout. */
    int verbose = 0;

    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Different output flavors. The example file is written (and
     * then read) four times. The first two flavors,
     * parallel-netcdf, and netCDF serial, both produce a netCDF
     * classic format file (but with different libraries). The
     * last two produce netCDF4/HDF5 format files, written with
     * and without using netCDF-4 parallel I/O. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. Two of them (pnetcdf and
     * classic) will be in classic netCDF format, the others
     * (serial4 and parallel4) will be in netCDF-4/HDF5
     * format. All four can be read by the netCDF library, and all
     * will contain the same contents. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"example2_pnetcdf.nc",
							  "example2_classic.nc",
							  "example2_serial4.nc",
							  "example2_parallel4.nc"};
	
    /** Number of processors that will do IO. In this example we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * example. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this example. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. This is the number of
     * elements of the data array that will be handled by each
     * processor. In this example there are 16 data elements. If the
     * example is run on 4 processors, then arrIdxPerPe will be 4. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. It is set by
     * PIOc_Init_Intracomm(). It references an internal structure
     * containing the general IO subsystem data and MPI
     * structure. It is passed to PIOc_finalize() to free
     * associated resources, after all I/O, but before
     * MPI_Finalize is called. */
    int iosysid;

    /** The ncid of the netCDF file created in this example. */
    int ncid = 0;

    /** The ID of the netCDF varable in the example file. */
    int varid;

    /** The I/O description ID as passed back by PIOc_InitDecomp()
     * and freed in PIOc_freedecomp(). */
    int ioid;

    /** A buffer for sample data.  The size of this array will
     * vary depending on how many processors are involved in the
     * execution of the example code. It's length will be the same
     * as elements_per_pe.*/
    float *buffer;

    /** A buffer for reading data back from the file. The size of
     * this array will vary depending on how many processors are
     * involved in the execution of the example code. It's length
     * will be the same as elements_per_pe.*/
    int *read_buffer;

    /** A 1-D array which holds the decomposition mapping for this
     * example. The size of this array will vary depending on how
     * many processors are involved in the execution of the
     * example code. It's length will be the same as
     * elements_per_pe. */
    PIO_Offset *compdof;

#ifdef HAVE_MPE	
    /** MPE event numbers used to track start and stop of
     * different parts of the program for later display with
     * Jumpshot. */
    int event_num[2][NUM_EVENTS];
#endif /* HAVE_MPE */

    /** Needed for command line processing. */
    int c;

    /* Parse command line. */
    while ((c = getopt(argc, argv, "v")) != -1)
	switch (c)
	{
	case 'v':
	    verbose++;
	    break;
	default:
	    break;
	}

#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    int ret;
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);
    if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library example1 running on %d processors.\n",
	       my_rank, ntasks);

#ifdef HAVE_MPE
    /* Initialize MPE logging. */
    if ((ret = MPE_Init_log()))
	ERR(ret);
    if (init_logging(my_rank, event_num))
	ERR(ERR_LOGGING);

    /* Log with MPE that we are starting INIT. */
    if ((ret = MPE_Log_event(event_num[START][INIT], 0, "start init")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (int i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this example. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */
	
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (int d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);
	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

#ifdef HAVE_MPE
	/* Log with MPE that we are done with CREATE. */
	if ((ret = MPE_Log_event(event_num[END][CREATE_PNETCDF + fmt], 0, "end create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* Allocate space for sample data. */
	if (!(buffer = malloc(elements_per_pe * sizeof(float))))
	    return PIO_ENOMEM;

	/* Write data for each timestep. */
	for (int ts = 0; ts < NUM_TIMESTEPS; ts++) {

#ifdef HAVE_MPE
	    /* Log with MPE that we are starting CALCULATE. */
	    if ((ret = MPE_Log_event(event_num[START][CALCULATE], 0, "start calculate")))
		MPIERR(ret);
#endif /* HAVE_MPE */

	    /* Calculate sample data. Add some math function calls to make this slower. */
	    for (int i = 0; i < elements_per_pe; i++)
		if ((ret = calculate_value(my_rank, ts, &buffer[i])))
		    ERR(ret);

#ifdef HAVE_MPE
	    /* Log with MPE that we are done with CALCULATE. */
	    if ((ret = MPE_Log_event(event_num[END][CALCULATE], 0, "end calculate")))
		MPIERR(ret);
	    /* Log with MPE that we are starting WRITE. */
	    if ((ret = MPE_Log_event(event_num[START][WRITE], 0, "start write")))
		MPIERR(ret);
#endif /* HAVE_MPE */
		
	    /* Write data to the file. */
	    if (verbose)
		printf("rank: %d Writing sample data...\n", my_rank);

	    if ((ret = PIOc_setframe(ncid, varid, ts)))
		ERR(ret);
	    if ((ret = PIOc_write_darray(ncid, varid, ioid, (PIO_Offset)elements_per_pe,
					 buffer, NULL)))
		ERR(ret);
	    if ((ret = PIOc_sync(ncid)))
		ERR(ret);
#ifdef HAVE_MPE
	    /* Log with MPE that we are done with WRITE. */
	    if ((ret = MPE_Log_event(event_num[END][WRITE], 0, "end write")))
		MPIERR(ret);
#endif /* HAVE_MPE */
	}

#ifdef HAVE_MPE
	/* Log with MPE that we are starting CLOSE. */
	if ((ret = MPE_Log_event(event_num[START][CLOSE], 0, "start close")))
	    MPIERR(ret);
#endif /* HAVE_MPE */
		
	/* Free buffer space used in this example. */
	free(buffer);
	
	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);

#ifdef HAVE_MPE
	/* Log with MPE that we are done with CLOSE. */
	if ((ret = MPE_Log_event(event_num[END][CLOSE], 0, "end close")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	/* After each file is closed, make all processors wait so that
	 * all start creating the next file at the same time. */
	if ((ret = MPI_Barrier(MPI_COMM_WORLD)))
	    MPIERR(ret);
    }
	
#ifdef HAVE_MPE
    /* Log with MPE that we are starting FREE. */
    if ((ret = MPE_Log_event(event_num[START][FREE], 0, "start free")))
	MPIERR(ret);
#endif /* HAVE_MPE */
    
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with FREE. */
    if ((ret = MPE_Log_event(event_num[END][FREE], 0, "end free")))
	MPIERR(ret);
    /* Log with MPE that we are starting READ. */
    if ((ret = MPE_Log_event(event_num[START][READ], 0, "start read")))
	MPIERR(ret);
#endif /* HAVE_MPE */
    
    /* Check the output file. */
    /* if (!my_rank) */
    /*     for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++)  */
    /* 	if ((ret = check_file(ntasks, filename[fmt]))) */
    /* 	    ERR(ret); */

#ifdef HAVE_MPE
    /* Log with MPE that we are done with READ. */
    if ((ret = MPE_Log_event(event_num[END][READ], 0, "end read")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    

    if (verbose)
	printf("rank: %d SUCCESS!\n", my_rank);
    return 0;
}
Exemplo n.º 10
0
/** @brief Main execution of code.

    Executes the functions to:
    - create a new examplePioClass instance
    - initialize MPI and the ParallelIO libraries
    - create the decomposition for this example
    - create the netCDF output file
    - define the variable in the file
    - write data to the variable in the file using decomposition
    - read the data back from the file using decomposition
    - close the file
    - clean up resources

    The example can be run from the command line (on system that support it) like this:
    <pre>
    mpiexec -n 4 ./examplePio
    </pre>

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump) for a 4-processor
    run:

    <pre>
    netcdf examplePio_c {
    dimensions:
    x = 16 ;
    variables:
    int foo(x) ;
    data:

    foo = 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45 ;
    }
    </pre>
    
    @param [in] argc argument count (should be zero)
    @param [in] argv argument array (should be NULL)
    @retval examplePioClass* Pointer to self.
*/
    int main(int argc, char* argv[])
    {
	/** Set to non-zero to get output to stdout. */
	int verbose = 0;

	/** Zero-based rank of processor. */
	int my_rank;

	/** Number of processors involved in current execution. */
	int ntasks;

	/** Number of processors that will do IO. In this example we
	 * will do IO from all processors. */
	int niotasks;

	/** Stride in the mpi rank between io tasks. Always 1 in this
	 * example. */
	int stride = 1;

	/** Number of the aggregator? Always 0 in this example. */
	int numAggregator = 0;

	/** */
	int optBase = 1;

	/** Specifies the flavor of netCDF output format. */
	int iotype;

	/** The dimension ID. */
	int pioDimId;

	/** */
	PIO_Offset ista;

	/** */
	PIO_Offset isto;

	/** Array index per processing unit. This is the number of
	 * elements of the data array that will be handled by each
	 * processor. In this example there are 16 data elements. If the
	 * example is run on 4 processors, then arrIdxPerPe will be 4. */
	PIO_Offset arrIdxPerPe;

	/* Length of the dimension in data. */
	int dimLen[1];

	/** The ID for the parallel I/O system. It is set by
	 * PIOc_Init_Intracomm(). It references an internal structure
	 * containing the general IO subsystem data and MPI structure. */
	int pio_io_system;

	/** The ncid of the netCDF file created in this example. */
	int pioFileDesc;

	/** The ID of the netCDF varable in the example file. */
	int pioVar;

	/** The I/O description ID as passed back by PIOc_InitDecomp(). */
	int iodescNCells;

	/** A buffer for sample data. */
	int *dataBuffer;

	/** A buffer for reading data back from the file. */
	int *readBuffer;

	/** A 1-D array which holds the decomposition mapping for this
	 * example. */
	PIO_Offset *compdof;

	/** The example file name. */
	char file_name[] = EXAMPLE_FILENAME;     

	/** Used for command line processing. */
	int c;

	/* Parse command line. */
	while ((c = getopt(argc, argv, "v")) != -1)
	    switch (c)
	    {
	    case 'v':
		verbose++;
		break;
	    default:
		break;
	    }

#ifdef TIMING    
	/* Initialize the GPTL timing library. */
	int ret;
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif    
    
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);
	/* Check that a valid number of processors was specified. */
	if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	      ntasks == 8 || ntasks == 16))
	    fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
	if (verbose)
	    printf("%d: ParallelIO Library example1 running on %d processors.\n",
		   my_rank, ntasks);

	/* Initialize the ParallelIO library IO system. */
	iotype = PIO_IOTYPE_NETCDF;

	/* keep things simple - 1 iotask per MPI process */    
	niotasks = ntasks; 

	/* Initialize the IO system. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, stride, optBase,
				       PIO_REARR_SUBSET, &pio_io_system)))
	    ERR(ret);

	/* Finalize the IO system. */
	if ((ret = PIOc_finalize(pio_io_system)))
	    ERR(ret);

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING    
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif    

	return 0;
    }
Exemplo n.º 11
0
/* Run async tests. */
int main(int argc, char **argv)
{
    int my_rank; /* Zero-based rank of processor. */
    int ntasks; /* Number of processors involved in current execution. */
    int iosysid_world; /* The ID for the parallel I/O system. */
    int even_iosysid; /* The ID for iosystem of even_comm. */
    int overlap_iosysid; /* The ID for iosystem of even_comm. */
    MPI_Group world_group; /* An MPI group of world. */
    MPI_Group even_group; /* An MPI group of 0 and 2. */
    MPI_Group overlap_group; /* An MPI group of 0, 1, and 3. */
    MPI_Comm even_comm = MPI_COMM_NULL; /* Communicator for tasks 0, 2 */
    MPI_Comm overlap_comm = MPI_COMM_NULL; /* Communicator for tasks 0, 1, 2. */
    int even_rank = -1, overlap_rank = -1; /* Tasks rank in communicator. */
    int even_size = 0, overlap_size = 0; /* Size of communicator. */
    int num_flavors; /* Number of PIO netCDF flavors in this build. */
    int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
    MPI_Comm test_comm;
    int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET};
    int ret; /* Return code. */

    /* Initialize test. */
    if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS,
                              -1, &test_comm)))
        ERR(ERR_INIT);

    /* Test code runs on TARGET_NTASKS tasks. The left over tasks do
     * nothing. */
    if (my_rank < TARGET_NTASKS)
    {
        /* Figure out iotypes. */
        if ((ret = get_iotypes(&num_flavors, flavor)))
            ERR(ret);

        /* Test with both rearrangers. */
        for (int r = 0; r < NUM_REARRANGERS; r++)
        {
            /* Initialize PIO system on world. */
            if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO4, STRIDE1, BASE0, rearranger[r],
                                           &iosysid_world)))
                ERR(ret);

            /* Set the error handler. */
            if ((ret = PIOc_set_iosystem_error_handling(iosysid_world, PIO_BCAST_ERROR, NULL)))
                ERR(ret);

            /* Get MPI_Group of world comm. */
            if ((ret = MPI_Comm_group(test_comm, &world_group)))
                ERR(ret);

            /* Create a group with tasks 0 and 2. */
            int even_ranges[EVEN_NUM_RANGES][3] = {{0, 2, 2}};
            if ((ret = MPI_Group_range_incl(world_group, EVEN_NUM_RANGES, even_ranges,
                                            &even_group)))
                ERR(ret);

            /* Create a communicator from the even_group. */
            if ((ret = MPI_Comm_create(test_comm, even_group, &even_comm)))
                ERR(ret);

            /* Learn my rank and the total number of processors in even group. */
            if (even_comm != MPI_COMM_NULL)
            {
                if ((ret = MPI_Comm_rank(even_comm, &even_rank)))
                    MPIERR(ret);
                if ((ret = MPI_Comm_size(even_comm, &even_size)))
                    MPIERR(ret);
            }

            /* Create a group with tasks 0, 1, and 3. */
            int overlap_ranges[OVERLAP_NUM_RANGES][3] = {{0, 0, 1}, {1, 3, 2}};
            if ((ret = MPI_Group_range_incl(world_group, OVERLAP_NUM_RANGES, overlap_ranges,
                                            &overlap_group)))
                ERR(ret);

            /* Create a communicator from the overlap_group. */
            if ((ret = MPI_Comm_create(test_comm, overlap_group, &overlap_comm)))
                ERR(ret);

            /* Learn my rank and the total number of processors in overlap
             * group. */
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = MPI_Comm_rank(overlap_comm, &overlap_rank)))
                    MPIERR(ret);
                if ((ret = MPI_Comm_size(overlap_comm, &overlap_size)))
                    MPIERR(ret);
            }

            /* Initialize PIO system for even. */
            if (even_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_Init_Intracomm(even_comm, NUM_IO1, STRIDE1, BASE1, rearranger[r],
                                               &even_iosysid)))
                    ERR(ret);

                /* These should not work. */
                if (PIOc_set_hint(even_iosysid + TEST_VAL_42, NULL, NULL) != PIO_EBADID)
                    ERR(ERR_WRONG);
                if (PIOc_set_hint(even_iosysid, NULL, NULL) != PIO_EINVAL)
                    ERR(ERR_WRONG);

                /* Set the hint (which will be ignored). */
                if ((ret = PIOc_set_hint(even_iosysid, "hint", "hint_value")))
                    ERR(ret);

                /* Set the error handler. */
                /*PIOc_Set_IOSystem_Error_Handling(even_iosysid, PIO_BCAST_ERROR);*/
                if ((ret = PIOc_set_iosystem_error_handling(even_iosysid, PIO_BCAST_ERROR, NULL)))
                    ERR(ret);
            }

            /* Initialize PIO system for overlap comm. */
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_Init_Intracomm(overlap_comm, NUM_IO2, STRIDE1, BASE1, rearranger[r],
                                               &overlap_iosysid)))
                    ERR(ret);

                /* Set the error handler. */
                PIOc_Set_IOSystem_Error_Handling(overlap_iosysid, PIO_BCAST_ERROR);
            }

            for (int i = 0; i < num_flavors; i++)
            {
                char fname0[PIO_MAX_NAME + 1];
                char fname1[PIO_MAX_NAME + 1];
                char fname2[PIO_MAX_NAME + 1];

                sprintf(fname0, "%s_file_0_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname0, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                sprintf(fname1, "%s_file_1_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname1, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                sprintf(fname2, "%s_file_2_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]);
                if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname2, ATTNAME,
                                       DIMNAME, my_rank)))
                    ERR(ret);

                /* Now check the first file from WORLD communicator. */
                int ncid;
                if ((ret = open_and_check_file(test_comm, iosysid_world, flavor[i], &ncid, fname0,
                                               ATTNAME, DIMNAME, 1, my_rank)))
                    ERR(ret);

                /* Now have the even communicators check the files. */
                int ncid2;
                if (even_comm != MPI_COMM_NULL)
                {
                    if ((ret = open_and_check_file(even_comm, even_iosysid, flavor[i], &ncid2,
                                                   fname2, ATTNAME, DIMNAME, 1, my_rank)))
                        ERR(ret);
                    if ((ret = check_file(even_comm, even_iosysid, flavor[i], ncid2, fname2,
                                          ATTNAME, DIMNAME, my_rank)))
                        ERR(ret);
                }

                /* Now have the overlap communicators check the files. */
                int ncid3;
                if (overlap_comm != MPI_COMM_NULL)
                {
                    if ((ret = open_and_check_file(overlap_comm, overlap_iosysid, flavor[i],
                                                   &ncid3, fname1, ATTNAME, DIMNAME, 1, my_rank)))
                        ERR(ret);
                    if ((ret = check_file(overlap_comm, overlap_iosysid, flavor[i], ncid3, fname1,
                                          ATTNAME, DIMNAME, my_rank)))
                        ERR(ret);
                }

                /* Close the still-open files. */
                if (even_comm != MPI_COMM_NULL)
                    if ((ret = PIOc_closefile(ncid2)))
                        ERR(ret);
                if (overlap_comm != MPI_COMM_NULL)
                    if ((ret = PIOc_closefile(ncid3)))
                        ERR(ret);
                if ((ret = PIOc_closefile(ncid)))
                    ERR(ret);

            } /* next iotype */
        
            /* Finalize PIO systems. */
            if (even_comm != MPI_COMM_NULL)
                if ((ret = PIOc_finalize(even_iosysid)))
                    ERR(ret);
            if (overlap_comm != MPI_COMM_NULL)
            {
                if ((ret = PIOc_finalize(overlap_iosysid)))
                    ERR(ret);
            }
            if ((ret = PIOc_finalize(iosysid_world)))
                ERR(ret);

            /* Free MPI resources used by test. */
            if ((ret = MPI_Group_free(&overlap_group)))
                ERR(ret);
            if ((ret = MPI_Group_free(&even_group)))
                ERR(ret);
            if ((ret = MPI_Group_free(&world_group)))
                ERR(ret);
            if (overlap_comm != MPI_COMM_NULL)
                if ((ret = MPI_Comm_free(&overlap_comm)))
                    ERR(ret);
            if (even_comm != MPI_COMM_NULL)
                if ((ret = MPI_Comm_free(&even_comm)))
                    ERR(ret);
        } /* next rearranger */
    } /* my_rank < TARGET_NTASKS */

    /* Finalize test. */
    if ((ret = pio_test_finalize(&test_comm)))
        return ERR_AWFUL;

    printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);

    return 0;
}
Exemplo n.º 12
0
/** Run Tests for NetCDF-4 Functions.
 *
 * @param argc argument count
 * @param argv array of arguments
 */
int
main(int argc, char **argv)
{
    int verbose = 1;
    
    /** Zero-based rank of processor. */
    int my_rank;

    /** Number of processors involved in current execution. */
    int ntasks;

    /** Specifies the flavor of netCDF output format. */
    int iotype;

    /** Different output flavors. */
    int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, 
				      PIO_IOTYPE_NETCDF,
				      PIO_IOTYPE_NETCDF4C,
				      PIO_IOTYPE_NETCDF4P};

    /** Names for the output files. */
    char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_nc4_pnetcdf.nc",
							  "test_nc4_classic.nc",
							  "test_nc4_serial4.nc",
							  "test_nc4_parallel4.nc"};
	
    /** Number of processors that will do IO. In this test we
     * will do IO from all processors. */
    int niotasks;

    /** Stride in the mpi rank between io tasks. Always 1 in this
     * test. */
    int ioproc_stride = 1;

    /** Number of the aggregator? Always 0 in this test. */
    int numAggregator = 0;

    /** Zero based rank of first processor to be used for I/O. */
    int ioproc_start = 0;

    /** The dimension IDs. */
    int dimids[NDIM];

    /** Array index per processing unit. */
    PIO_Offset elements_per_pe;

    /** The ID for the parallel I/O system. */
    int iosysid;

    /** The ncid of the netCDF file. */
    int ncid = 0;

    /** The ID of the netCDF varable. */
    int varid;

    /** Storage of netCDF-4 files (contiguous vs. chunked). */
    int storage;

    /** Chunksizes set in the file. */
    PIO_Offset my_chunksize[NDIM];
    
    /** The shuffle filter setting in the netCDF-4 test file. */
    int shuffle;
    
    /** Non-zero if deflate set for the variable in the netCDF-4 test file. */
    int deflate;

    /** The deflate level set for the variable in the netCDF-4 test file. */
    int deflate_level;

    /** Endianness of variable. */
    int endianness;

    /* Size of the var chunk cache. */
    PIO_Offset var_cache_size;

    /* Number of elements in var cache. */
    PIO_Offset var_cache_nelems;

    /* Var cache preemption. */    
    float var_cache_preemption;
    
    /** The I/O description ID. */
    int ioid;

    /** A buffer for sample data. */
    float *buffer;

    /** A buffer for reading data back from the file. */
    int *read_buffer;

    /** The decomposition mapping. */
    PIO_Offset *compdof;

    /** Return code. */
    int ret;

    /** Index for loops. */
    int fmt, d, d1, i;

    /** For setting the chunk cache. */
    PIO_Offset chunk_cache_size = 1024*1024;
    PIO_Offset chunk_cache_nelems = 1024;
    float chunk_cache_preemption = 0.5;

    /* For reading the chunk cache. */
    PIO_Offset chunk_cache_size_in;
    PIO_Offset chunk_cache_nelems_in;
    float chunk_cache_preemption_in;
    
    char varname[15];
    
#ifdef TIMING    
    /* Initialize the GPTL timing library. */
    if ((ret = GPTLinitialize ()))
	return ret;
#endif    
    
    /* Initialize MPI. */
    if ((ret = MPI_Init(&argc, &argv)))
	MPIERR(ret);

    /* Learn my rank and the total number of processors. */
    if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	MPIERR(ret);
    if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	MPIERR(ret);

    /* Check that a valid number of processors was specified. */
    if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 ||
	  ntasks == 8 || ntasks == 16))
	fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n");
    if (verbose)
	printf("%d: ParallelIO Library test_nc4 running on %d processors.\n",
	       my_rank, ntasks);

    /* keep things simple - 1 iotask per MPI process */    
    niotasks = ntasks; 

    /* Initialize the PIO IO system. This specifies how
     * many and which processors are involved in I/O. */
    if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride,
				   ioproc_start, PIO_REARR_SUBSET, &iosysid)))
	ERR(ret);

    /* Describe the decomposition. This is a 1-based array, so add 1! */
    elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks;
    if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
	return PIO_ENOMEM;
    for (i = 0; i < elements_per_pe; i++) {
	compdof[i] = my_rank * elements_per_pe + i + 1;
    }
	
    /* Create the PIO decomposition for this test. */
    if (verbose)
	printf("rank: %d Creating decomposition...\n", my_rank);
    if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe,
			       compdof, &ioid, NULL, NULL, NULL)))
	ERR(ret);
    free(compdof);

#ifdef HAVE_MPE
    /* Log with MPE that we are done with INIT. */
    if ((ret = MPE_Log_event(event_num[END][INIT], 0, "end init")))
	MPIERR(ret);
#endif /* HAVE_MPE */

    /* How many flavors will we be running for? */
    int num_flavors = 0;
    int fmtidx = 0;
#ifdef _PNETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_PNETCDF;
#endif
#ifdef _NETCDF
    num_flavors++;
    format[fmtidx++] = PIO_IOTYPE_NETCDF;
#endif
#ifdef _NETCDF4
    num_flavors += 2;
    format[fmtidx++] = PIO_IOTYPE_NETCDF4C;
    format[fmtidx] = PIO_IOTYPE_NETCDF4P;
#endif
    
    /* Use PIO to create the example file in each of the four
     * available ways. */
    for (fmt = 0; fmt < num_flavors; fmt++) 
    {
#ifdef HAVE_MPE
	/* Log with MPE that we are starting CREATE. */
	if ((ret = MPE_Log_event(event_num[START][CREATE_PNETCDF+fmt], 0, "start create")))
	    MPIERR(ret);
#endif /* HAVE_MPE */

	if (verbose)
	    printf("rank: %d Setting chunk cache for file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);

	/* Try to set the chunk cache with invalid preemption to check error handling. */
	chunk_cache_preemption = 50.0;
	ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size,
				   chunk_cache_nelems, chunk_cache_preemption);
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (ret != NC_EINVAL)
		ERR(ERR_AWFUL);
	}
	else
	{
	    if (ret != NC_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Try to set the chunk cache. */
	chunk_cache_preemption = 0.5;
	ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size,
				   chunk_cache_nelems, chunk_cache_preemption);

	/* Should only have worked for netCDF-4 iotypes. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (ret != PIO_NOERR)
		ERR(ret);
	}
	else
	{
	    if (ret != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Now check the chunk cache. */
	ret = PIOc_get_chunk_cache(iosysid, format[fmt], &chunk_cache_size_in,
				   &chunk_cache_nelems_in, &chunk_cache_preemption_in);

	/* Should only have worked for netCDF-4 iotypes. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    /* Check that there was no error. */
	    if (ret != PIO_NOERR)
		ERR(ret);

	    /* Check that we got the correct values. */
	    if (chunk_cache_size_in != chunk_cache_size || chunk_cache_nelems_in != chunk_cache_nelems ||
		chunk_cache_preemption_in != chunk_cache_preemption)
		ERR(ERR_AWFUL);
	}
	else
	{
	    if (ret != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	}

	/* Create the netCDF output file. */
	if (verbose)
	    printf("rank: %d Creating sample file %s with format %d...\n",
		   my_rank, filename[fmt], format[fmt]);
	if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt],
				   PIO_CLOBBER)))
	    ERR(ret);

	/* Set error handling. */
	PIOc_Set_File_Error_Handling(ncid, PIO_BCAST_ERROR);
	
	/* Define netCDF dimensions and variable. */
	if (verbose)
	    printf("rank: %d Defining netCDF metadata...\n", my_rank);
	for (d = 0; d < NDIM; d++) {
	    if (verbose)
		printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank,
		       dim_name[d], dim_len[d]);
	    if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
		ERR(ret);
	}
	if (verbose)
	    printf("rank: %d Defining netCDF variable %s, ndims %d\n", my_rank, VAR_NAME, NDIM);
	if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid)))
	    ERR(ret);

	/* For netCDF-4 files, set the chunksize to improve performance. */
	if (format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P)
	{
	    if (verbose)
		printf("rank: %d Defining chunksizes\n", my_rank);
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)))
		ERR(ret);

	    /** Check that the inq_varname function works. */
	    if (verbose)
	    	printf("rank: %d Checking varname\n", my_rank);
	    ret = PIOc_inq_varname(ncid, 0, varname);
	    printf("rank: %d ret: %d varname: %s\n", my_rank, ret, varname);
	    
	    /** Check that the inq_var_chunking function works. */
	    if (verbose)
		printf("rank: %d Checking chunksizes\n");
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)))
	    	ERR(ret);
	    if (verbose)
	    {
		printf("rank: %d ret: %d storage: %d\n", my_rank, ret, storage);
		for (d1 = 0; d1 < NDIM; d1++)
		{
		    printf("chunksize[%d]=%d\n", d1, my_chunksize[d1]);
		}
	    }
	    
	    /** Check the answers. */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C ||
		format[fmt] == PIO_IOTYPE_NETCDF4P)
	    {
		if (storage != NC_CHUNKED)
		    ERR(ERR_AWFUL);
		for (d1 = 0; d1 < NDIM; d1++)
		    if (my_chunksize[d1] != chunksize[d1])
		    	ERR(ERR_AWFUL);
	    }

	    /* Check that the inq_var_deflate functions works. */
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level)))
	    	ERR(ret);

	    /** For serial netCDF-4 deflate is turned on by default */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4C)
		if (shuffle || !deflate || deflate_level != 1)
		    ERR(ERR_AWFUL);

	    /* For parallel netCDF-4, no compression available. :-( */
	    if (format[fmt] == PIO_IOTYPE_NETCDF4P)
		if (shuffle || deflate)
		    ERR(ERR_AWFUL);

	    /* Check setting the chunk cache for the variable. */
	    printf("rank: %d PIOc_set_var_chunk_cache...\n", my_rank);
	    if ((ret = PIOc_set_var_chunk_cache(ncid, 0, VAR_CACHE_SIZE, VAR_CACHE_NELEMS,
						VAR_CACHE_PREEMPTION)))
	    	ERR(ret);

	    /* Check getting the chunk cache values for the variable. */
	    printf("rank: %d PIOc_get_var_chunk_cache...\n", my_rank);	    
	    if ((ret = PIOc_get_var_chunk_cache(ncid, 0, &var_cache_size, &var_cache_nelems,
						&var_cache_preemption)))
	    	ERR(ret);
	    PIO_Offset len;
	    if ((ret = PIOc_inq_dimlen(ncid, 0, &len)))
	    	ERR(ret);

	    /* Check that we got expected values. */
	    printf("rank: %d var_cache_size = %d\n", my_rank, var_cache_size);	    
	    if (var_cache_size != VAR_CACHE_SIZE)
		ERR(ERR_AWFUL);
	    if (var_cache_nelems != VAR_CACHE_NELEMS)
		ERR(ERR_AWFUL);
	    if (var_cache_preemption != VAR_CACHE_PREEMPTION)
		ERR(ERR_AWFUL);
	} else {
	    /* Trying to set or inq netCDF-4 settings for non-netCDF-4
	     * files results in the PIO_ENOTNC4 error. */
	    if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	    if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize)) != PIO_ENOTNC4)
		ERR(ERR_AWFUL);
	    if ((ret = PIOc_inq_var_deflate(ncid, 0, &shuffle, &deflate, &deflate_level))
		!= PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_def_var_endian(ncid, 0, 1)) != PIO_ENOTNC4)
		ERR(ret);
	    if ((ret = PIOc_inq_var_endian(ncid, 0, &endianness)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_set_var_chunk_cache(ncid, 0, VAR_CACHE_SIZE, VAR_CACHE_NELEMS,
						VAR_CACHE_PREEMPTION)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_get_var_chunk_cache(ncid, 0, &var_cache_size, &var_cache_nelems,
						&var_cache_preemption)) != PIO_ENOTNC4)
		ERR(ret);
	    if ((ret = PIOc_set_chunk_cache(iosysid, format[fmt], chunk_cache_size, chunk_cache_nelems,
	    				    chunk_cache_preemption)) != PIO_ENOTNC4)
	    	ERR(ret);
	    if ((ret = PIOc_get_chunk_cache(iosysid, format[fmt], &chunk_cache_size,
	    				    &chunk_cache_nelems, &chunk_cache_preemption)) != PIO_ENOTNC4)
	    	ERR(ret);
	}	    
	
	if ((ret = PIOc_enddef(ncid)))
	    ERR(ret);

	/* Close the netCDF file. */
	if (verbose)
	    printf("rank: %d Closing the sample data file...\n", my_rank);
	if ((ret = PIOc_closefile(ncid)))
	    ERR(ret);
    }
	
    /* Free the PIO decomposition. */
    if (verbose)
	printf("rank: %d Freeing PIO decomposition...\n", my_rank);
    if ((ret = PIOc_freedecomp(iosysid, ioid)))
	ERR(ret);
	
    /* Finalize the IO system. */
    if (verbose)
	printf("rank: %d Freeing PIO resources...\n", my_rank);
    if ((ret = PIOc_finalize(iosysid)))
	ERR(ret);

    /* Finalize the MPI library. */
    MPI_Finalize();

#ifdef TIMING    
    /* Finalize the GPTL timing library. */
    if ((ret = GPTLfinalize ()))
	return ret;
#endif    
    

    return 0;
}
Exemplo n.º 13
0
/* Write, then read, a simple example with darrays.

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump):

    <pre>
netcdf darray_no_async_iotype_1 {
dimensions:
	unlimted = UNLIMITED ; // (2 currently)
	x = 4 ;
	y = 4 ;
variables:
	int foo(unlimted, x, y) ;
data:

 foo =
  42, 42, 42, 42,
  43, 43, 43, 43,
  44, 44, 44, 44,
  45, 45, 45, 45,
  142, 142, 142, 142,
  143, 143, 143, 143,
  144, 144, 144, 144,
  145, 145, 145, 145 ;
}
    </pre>

*/
    int main(int argc, char* argv[])
    {
	int my_rank;  /* Zero-based rank of processor. */
	int ntasks;   /* Number of processors involved in current execution. */
	int ioproc_stride = 1;	    /* Stride in the mpi rank between io tasks. */
	int ioproc_start = 0; 	    /* Rank of first task to be used for I/O. */
        PIO_Offset elements_per_pe; /* Array elements per processing unit. */
	int iosysid;  /* The ID for the parallel I/O system. */	
	int ncid;     /* The ncid of the netCDF file. */
	int dimid[NDIM3];    /* The dimension ID. */
	int varid;    /* The ID of the netCDF varable. */
	int ioid;     /* The I/O description ID. */
        char filename[NC_MAX_NAME + 1]; /* Test filename. */
        int num_flavors = 0;            /* Number of iotypes available in this build. */
	int format[NUM_NETCDF_FLAVORS]; /* Different output flavors. */
	int ret;                        /* Return value. */

#ifdef TIMING
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif

	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
	if (ntasks != TARGET_NTASKS)
	    fprintf(stderr, "Number of processors must be 4!\n");
        printf("%d: ParallelIO Library darray_no_async example running on %d processors.\n",
               my_rank, ntasks);

        /* Turn on logging. */
        if ((ret = PIOc_set_log_level(LOG_LEVEL)))
            return ret;
        
	/* Initialize the PIO IO system. This specifies how many and
	 * which processors are involved in I/O. */
	if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, 1, ioproc_stride,
				       ioproc_start, PIO_REARR_BOX, &iosysid)))
	    ERR(ret);

	/* Describe the decomposition. */
	elements_per_pe = DIM_LEN_X * DIM_LEN_Y / TARGET_NTASKS;

        /* Allocate and initialize array of decomposition mapping. */
	PIO_Offset compdof[elements_per_pe];
	for (int i = 0; i < elements_per_pe; i++)
	    compdof[i] = my_rank * elements_per_pe + i;

	/* Create the PIO decomposition for this example. Since this
         * is a variable with an unlimited dimension, we want to
         * create a 2-D composition which represents one record. */
        printf("rank: %d Creating decomposition...\n", my_rank);
	if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe,
				   compdof, &ioid, 0, NULL, NULL)))
	    ERR(ret);

        /* The number of favors may change with the build parameters. */
#ifdef _PNETCDF
        format[num_flavors++] = PIO_IOTYPE_PNETCDF;
#endif
        format[num_flavors++] = PIO_IOTYPE_NETCDF;
#ifdef _NETCDF4
        format[num_flavors++] = PIO_IOTYPE_NETCDF4C;
        format[num_flavors++] = PIO_IOTYPE_NETCDF4P;
#endif

	/* Use PIO to create the example file in each of the four
	 * available ways. */
	for (int fmt = 0; fmt < num_flavors; fmt++)
	{
            /* Create a filename. */
            sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]);

	    /* Create the netCDF output file. */
            printf("rank: %d Creating sample file %s with format %d...\n",
                   my_rank, filename, format[fmt]);
	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER)))
		ERR(ret);

	    /* Define netCDF dimension and variable. */
            printf("rank: %d Defining netCDF metadata...\n", my_rank);
            for (int d = 0; d < NDIM3; d++)
                if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d])))
                    ERR(ret);
	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid)))
	        ERR(ret);
	    if ((ret = PIOc_enddef(ncid)))
	        ERR(ret);

	    /* Allocate storage for sample data. */
            int buffer[elements_per_pe];

            /* Write each timestep. */
            for (int t = 0; t < NUM_TIMESTEPS; t++)
            {
                /* Create some data for this timestep. */
                for (int i = 0; i < elements_per_pe; i++)
                    buffer[i] = 100 * t + START_DATA_VAL + my_rank;
                
                /* Write data to the file. */
                printf("rank: %d Writing sample data...\n", my_rank);
                if ((ret = PIOc_setframe(ncid, varid, t)))
                    ERR(ret);
                if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL)))
                    ERR(ret);
            }

            /* THis will cause all data to be written to disk. */
            if ((ret = PIOc_sync(ncid)))
	        ERR(ret);

	    /* Close the netCDF file. */
            printf("rank: %d Closing the sample data file...\n", my_rank);
	    if ((ret = PIOc_closefile(ncid)))
		ERR(ret);

            /* Check the output file. */
            /* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, */
            /*                       my_rank, ioid))) */
            /*     ERR(ret); */
	}

	/* Free the PIO decomposition. */
        printf("rank: %d Freeing PIO decomposition...\n", my_rank);
	if ((ret = PIOc_freedecomp(iosysid, ioid)))
	    ERR(ret);

	/* Finalize the IO system. */
        printf("rank: %d Freeing PIO resources...\n", my_rank);
	if ((ret = PIOc_finalize(iosysid)))
	    ERR(ret);

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif

        printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }
Exemplo n.º 14
0
/* Write, then read, a simple example with darrays.

    The sample file created by this program is a small netCDF file. It
    has the following contents (as shown by ncdump):

    <pre>
netcdf darray_no_async_iotype_1 {
dimensions:
	unlimted = UNLIMITED ; // (2 currently)
	x = 4 ;
	y = 4 ;
variables:
	int foo(unlimted, x, y) ;
data:

 foo =
  42, 42, 42, 42,
  43, 43, 43, 43,
  44, 44, 44, 44,
  45, 45, 45, 45,
  142, 142, 142, 142,
  143, 143, 143, 143,
  144, 144, 144, 144,
  145, 145, 145, 145 ;
}
    </pre>

*/
    int main(int argc, char* argv[])
    {
	int my_rank;  /* Zero-based rank of processor. */
	int ntasks;   /* Number of processors involved in current execution. */
        int iosysid; /* The ID for the parallel I/O system. */
	/* int ncid;     /\* The ncid of the netCDF file. *\/ */
	/* int dimid[NDIM3];    /\* The dimension ID. *\/ */
	/* int varid;    /\* The ID of the netCDF varable. *\/ */
        /* char filename[NC_MAX_NAME + 1]; /\* Test filename. *\/ */
        /* int num_flavors = 0;            /\* Number of iotypes available in this build. *\/ */
	/* int format[NUM_NETCDF_FLAVORS]; /\* Different output flavors. *\/ */
	int ret;                        /* Return value. */

#ifdef TIMING
	/* Initialize the GPTL timing library. */
	if ((ret = GPTLinitialize ()))
	    return ret;
#endif
        
	/* Initialize MPI. */
	if ((ret = MPI_Init(&argc, &argv)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)))
	    MPIERR(ret);

	/* Learn my rank and the total number of processors. */
	if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank)))
	    MPIERR(ret);
	if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks)))
	    MPIERR(ret);

	/* Check that a valid number of processors was specified. */
        printf("%d: ParallelIO Library darray_async example running on %d processors.\n",
               my_rank, ntasks);
	if (ntasks != TARGET_NTASKS)
        {
	    fprintf(stderr, "Number of processors must be %d!\n", TARGET_NTASKS);
            return ERR_BAD;
        }

        /* Turn on logging. */
        if ((ret = PIOc_set_log_level(LOG_LEVEL)))
            return ret;

        /* Num procs for computation. */
        int num_procs2[COMPONENT_COUNT] = {4};
        
        /* Is the current process a computation task? */
        int comp_task = my_rank < NUM_IO_TASKS ? 0 : 1;

        /* Initialize the IO system. */
        if ((ret = PIOc_init_async(MPI_COMM_WORLD, NUM_IO_TASKS, NULL, COMPONENT_COUNT,
                                   num_procs2, NULL, NULL, NULL, PIO_REARR_BOX, &iosysid)))
            ERR(ret);


        /* The rest of the code executes on computation tasks only. As
         * PIO functions are called on the computation tasks, the
         * async system will call them on the IO task. When the
         * computation tasks call PIO_finalize(), the IO task will get
         * a message to shut itself down. */
        if (comp_task)
        {
            /* PIO_Offset elements_per_pe; /\* Array elements per processing unit. *\/ */
            /* int ioid;     /\* The I/O description ID. *\/ */
            
            /* /\* How many elements on each computation task? *\/ */
            /* elements_per_pe = DIM_LEN_X * DIM_LEN_Y / NUM_COMP_TASKS; */

            /* /\* Allocate and initialize array of decomposition mapping. *\/ */
            /* PIO_Offset compdof[elements_per_pe]; */
            /* for (int i = 0; i < elements_per_pe; i++) */
            /*     compdof[i] = my_rank * elements_per_pe + i; */

            /* /\* Create the PIO decomposition for this example. Since */
            /*    this is a variable with an unlimited dimension, we want */
            /*    to create a 2-D composition which represents one */
            /*    record. *\/ */
            /* printf("rank: %d Creating decomposition...\n", my_rank); */
            /* if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3 - 1, &dim_len[1], elements_per_pe, */
            /*                             compdof, &ioid, 0, NULL, NULL))) */
            /*     ERR(ret); */

/*         /\* The number of favors may change with the build parameters. *\/ */
/* #ifdef _PNETCDF */
/*         format[num_flavors++] = PIO_IOTYPE_PNETCDF; */
/* #endif */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF; */
/* #ifdef _NETCDF4 */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF4C; */
/*         format[num_flavors++] = PIO_IOTYPE_NETCDF4P; */
/* #endif */

/* 	/\* Use PIO to create the example file in each of the four */
/* 	 * available ways. *\/ */
/* 	for (int fmt = 0; fmt < num_flavors; fmt++) */
/* 	{ */
/*             /\* Create a filename. *\/ */
/*             sprintf(filename, "darray_no_async_iotype_%d.nc", format[fmt]); */

/* 	    /\* Create the netCDF output file. *\/ */
/*             printf("rank: %d Creating sample file %s with format %d...\n", */
/*                    my_rank, filename, format[fmt]); */
/* 	    if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename, PIO_CLOBBER))) */
/* 		ERR(ret); */

/* 	    /\* Define netCDF dimension and variable. *\/ */
/*             printf("rank: %d Defining netCDF metadata...\n", my_rank); */
/*             for (int d = 0; d < NDIM3; d++) */
/*                 if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d]))) */
/*                     ERR(ret); */
/* 	    if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimid, &varid))) */
/* 	        ERR(ret); */
/* 	    if ((ret = PIOc_enddef(ncid))) */
/* 	        ERR(ret); */

/* 	    /\* Allocate storage for sample data. *\/ */
/*             int buffer[elements_per_pe]; */

/*             /\* Write each timestep. *\/ */
/*             for (int t = 0; t < NUM_TIMESTEPS; t++) */
/*             { */
/*                 /\* Create some data for this timestep. *\/ */
/*                 for (int i = 0; i < elements_per_pe; i++) */
/*                     buffer[i] = 100 * t + START_DATA_VAL + my_rank; */
                
/*                 /\* Write data to the file. *\/ */
/*                 printf("rank: %d Writing sample data...\n", my_rank); */
/*                 if ((ret = PIOc_setframe(ncid, varid, t))) */
/*                     ERR(ret); */
/*                 if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, buffer, NULL))) */
/*                     ERR(ret); */
/*             } */

/*             /\* THis will cause all data to be written to disk. *\/ */
/*             if ((ret = PIOc_sync(ncid))) */
/* 	        ERR(ret); */

/* 	    /\* Close the netCDF file. *\/ */
/*             printf("rank: %d Closing the sample data file...\n", my_rank); */
/* 	    if ((ret = PIOc_closefile(ncid))) */
/* 		ERR(ret); */

/*             /\* Check the output file. *\/ */
/*             /\* if ((ret = check_file(iosysid, ntasks, filename, format[fmt], elements_per_pe, *\/ */
/*             /\*                       my_rank, ioid))) *\/ */
/*             /\*     ERR(ret); *\/ */
/* 	} */

            /* Free the PIO decomposition. */
            /* printf("rank: %d Freeing PIO decomposition...\n", my_rank); */
            /* if ((ret = PIOc_freedecomp(iosysid, ioid))) */
            /*     ERR(ret); */

            /* Finalize the IO system. Only call this from the computation tasks. */
            printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME);
            if ((ret = PIOc_finalize(iosysid)))
                ERR(ret);
        } /* endif comp_task */

	/* Finalize the MPI library. */
	MPI_Finalize();

#ifdef TIMING
	/* Finalize the GPTL timing library. */
	if ((ret = GPTLfinalize ()))
	    return ret;
#endif

        printf("rank: %d SUCCESS!\n", my_rank);
	return 0;
    }