Пример #1
0
void sdatio_collective(struct sdatio_file * sfile, char * variable_name){
#ifdef PARALLEL
  int retval;
  struct sdatio_variable * svar = sdatio_find_variable(sfile, variable_name);
  if ((retval = nc_var_par_access(sfile->nc_file_id, svar->nc_id, NC_COLLECTIVE))) ERR(retval);
#endif
}
Пример #2
0
/* type = 1 for integer, 2 for real, 3 for character */
void ex_compress_variable(int exoid, int varid, int type)
{
#if NC_HAS_HDF5

  struct ex_file_item *file = ex_find_file_item(exoid);

  if (!file) {
    char errmsg[MAX_ERR_LENGTH];
    exerrval = EX_BADFILEID;
    snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: unknown file id %d for ex_compress_variable().",
             exoid);
    ex_err("ex_compress_variable", errmsg, exerrval);
  }
  else {
    int deflate_level = file->compression_level;
    int compress      = 1;
    int shuffle       = file->shuffle;
    if (!file->is_parallel && deflate_level > 0 && (file->file_type == 2 || file->file_type == 3)) {
      nc_def_var_deflate(exoid, varid, shuffle, compress, deflate_level);
    }
#if defined(PARALLEL_AWARE_EXODUS)
    if (type != 3 && file->is_parallel && file->is_mpiio) {
      nc_var_par_access(exoid, varid, NC_COLLECTIVE);
    }
#endif
  }
#endif
}
Пример #3
0
void sdatio_independent(struct sdatio_file * sfile, char * variable_name){
#ifdef PARALLEL
  int retval;
  struct sdatio_variable * svar = sdatio_find_variable(sfile, variable_name);
  if ((retval = nc_var_par_access(sfile->nc_file_id, svar->nc_id, NC_INDEPENDENT))) ERR(retval);
#endif
}
Пример #4
0
    void CONetCDF4::writeData(const boost::multi_array<DataType, ndim> & _data,
                              const std::string                        & _varname,
                              std::size_t                                _record,
                              bool                                       _collective,
                              const std::vector<std::size_t>           * _start,
                              const std::vector<std::size_t>           * _count)
 {
    int grpid = this->getCurrentGroup();
    int varid = this->getVariable(_varname);
    std::size_t _array_size = 1;
    std::vector<std::size_t> _sstart, _scount;
    
    if (this->comm_server && _collective)
       CheckError(nc_var_par_access(grpid, varid, NC_COLLECTIVE));
    if (this->comm_server && !_collective)
       CheckError(nc_var_par_access(grpid, varid, NC_INDEPENDENT));
    
    this->getWriteDataInfos
       (_varname, _record, _array_size,  _sstart, _scount, _start, _count);
    this->writeData_(grpid, varid, _sstart, _scount, _data.data());
 }
Пример #5
0
/*!
This function makes a request to netcdf with a ncid, to change the way read/write operations are performed
collectively or independently on the variable.
\param [in] ncid Id groupd(or File Id)
\param [in] varId Id of the variable
\param [in] noFill turn on/off nofill mode on a variable
\param [in/out] fillValue
\return Status code
*/
int CNetCdfInterface::varParAccess(int ncid, int varId, int access)
{
  int status = nc_var_par_access(ncid, varId, access);
  if (NC_NOERR != status)
  {
    StdString errormsg(nc_strerror(status));
    StdStringStream sstr;

    sstr << "Error when calling function nc_var_par_access(ncid, varId, access)" << std::endl;
    sstr << errormsg << std::endl;
    sstr << "Unable to change read/write option of the variable with id: " << varId << std::endl;
    StdString e = sstr.str();
    throw CNetCdfException(e);
  }

  return status;
}
Пример #6
0
/* test extending variables */
int test_pio_extend(int flag){
    int rank, procs;
    int ncFile;
    int ncDimPart;
    int ncDimVrtx;
    int ncVarVrtx;
    int dimsVrtx[2];
    size_t start[2];
    size_t count[2];
    int vertices[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &procs);

    /* Create netcdf file */
    if (nc_create_par("test.nc", NC_NETCDF4 | NC_MPIIO, MPI_COMM_WORLD, MPI_INFO_NULL, &ncFile)) ERR;

    /* Create netcdf dimensions */
    if (nc_def_dim(ncFile, "partitions", procs, &ncDimPart)) ERR;
    if (nc_def_dim(ncFile, "vertices", NC_UNLIMITED, &ncDimVrtx)) ERR;

    /* Create netcdf variables */
    dimsVrtx[0] = ncDimPart;
    dimsVrtx[1] = ncDimVrtx;
    if (nc_def_var(ncFile, "vertex", NC_INT, 2, dimsVrtx, &ncVarVrtx)) ERR;

    /* Start writing data */
    if (nc_enddef(ncFile)) ERR;

    /* Set access mode */
    if (nc_var_par_access(ncFile, ncVarVrtx, flag)) ERR;

    /* Write vertices */
    start[0] = rank;
    start[1] = 0;
    count[0] = 1;
    count[1] = rank;
    if (nc_put_vara_int(ncFile, ncVarVrtx, start, count, vertices)) ERR;

    /* Close netcdf file */
    if (nc_close(ncFile)) ERR;

   return 0;
}
Пример #7
0
/* Both read and write will be tested */
int test_pio(int flag)
{
   /* MPI stuff. */
   int mpi_size, mpi_rank;
   MPI_Comm comm = MPI_COMM_WORLD;
   MPI_Info info = MPI_INFO_NULL;

   /* Netcdf-4 stuff. */
   int ncid;
   int nvid,uvid;
   int rvid;
   unsigned m,k,j,i;

   /* two dimensional integer data test */
   int dimids[NDIMS1];
   size_t start[NDIMS1];
   size_t count[NDIMS1];

   int *data;
   int *tempdata;
   int *rdata;
   int *temprdata;

   /* four dimensional integer data test,
      time dimension is unlimited.*/
   int  dimuids[NDIMS2];
   size_t ustart[NDIMS2];
   size_t ucount[NDIMS2];

   int *udata;
   int *tempudata;
   int *rudata;
   int *temprudata;

   /* Initialize MPI. */
   MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
   MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

   /* Create a parallel netcdf-4 file. */
   if (nc_create_par(file_name, facc_type, comm, info, &ncid)) ERR;

   /* The first case is two dimensional variables, no unlimited dimension */

   /* Create two dimensions. */
   if (nc_def_dim(ncid, "d1", DIMSIZE2, dimids)) ERR;
   if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;

   /* Create one var. */
   if (nc_def_var(ncid, "v1", NC_INT, NDIMS1, dimids, &nvid)) ERR;

   if (nc_enddef(ncid)) ERR;

   /* Set up slab for this process. */
   start[0] = 0;
   start[1] = mpi_rank * DIMSIZE/mpi_size;
   count[0] = DIMSIZE2;
   count[1] = DIMSIZE/mpi_size;

   /* start parallel netcdf4 */
   if (nc_var_par_access(ncid, nvid, flag)) ERR;

   if (!(data = malloc(sizeof(int)*count[1]*count[0]))) ERR;
   tempdata = data;
   for (j = 0; j < count[0]; j++){
      for (i = 0; i < count[1]; i++)
      {
	 *tempdata = mpi_rank * (j + 1);
	 tempdata++;
      }
   }

   /* Write two dimensional integer data */
   if (nc_put_vara_int(ncid, nvid, start, count, data)) ERR;
   free(data);

   /* Case 2: create four dimensional integer data,
      one dimension is unlimited. */

   /* Create four dimensions. */
   if (nc_def_dim(ncid, "ud1", NC_UNLIMITED, dimuids)) ERR;
   if (nc_def_dim(ncid, "ud2", DIMSIZE3, &dimuids[1])) ERR;
   if (nc_def_dim(ncid, "ud3", DIMSIZE2, &dimuids[2])) ERR;
   if (nc_def_dim(ncid, "ud4", DIMSIZE, &dimuids[3])) ERR;

   /* Create one var. */
   if (nc_def_var(ncid, "uv1", NC_INT, NDIMS2, dimuids, &uvid)) ERR;

   if (nc_enddef(ncid)) ERR;

   /* Set up selection parameters */
   ustart[0] = 0;
   ustart[1] = 0;
   ustart[2] = 0;
   ustart[3] = DIMSIZE*mpi_rank/mpi_size;
   ucount[0] = TIMELEN;
   ucount[1] = DIMSIZE3;
   ucount[2] = DIMSIZE2;
   ucount[3] = DIMSIZE/mpi_size;

   /* Access parallel */
   if (nc_var_par_access(ncid, uvid, flag)) ERR;

   /* Create phony data. */
   if (!(udata = malloc(ucount[0]*ucount[1]*ucount[2]*ucount[3]*sizeof(int)))) ERR;
   tempudata = udata;
   for( m=0; m<ucount[0];m++)
      for( k=0; k<ucount[1];k++)
	 for (j=0; j<ucount[2];j++)
	    for (i=0; i<ucount[3]; i++)
	    {
	       *tempudata = (1+mpi_rank)*2*(j+1)*(k+1)*(m+1);
	       tempudata++;
	    }

   /* Write slabs of phoney data. */
   if (NC_INDEPENDENT == flag) {
       int res;
       res = nc_put_vara_int(ncid, uvid, ustart, ucount, udata);
       if(res != NC_ECANTEXTEND) ERR;
   }
   else {
       if (nc_put_vara_int(ncid, uvid, ustart, ucount, udata)) ERR;
   }
   free(udata);

   /* Close the netcdf file. */
   if (nc_close(ncid)) ERR;

   if (nc_open_par(file_name, facc_type_open, comm, info, &ncid)) ERR;

   /* Case 1: read two-dimensional variables, no unlimited dimension */
   /* Set up slab for this process. */
   start[0] = 0;
   start[1] = mpi_rank * DIMSIZE/mpi_size;
   count[0] = DIMSIZE2;
   count[1] = DIMSIZE/mpi_size;

   if (nc_inq_varid(ncid, "v1", &rvid)) ERR;

   if (nc_var_par_access(ncid, rvid, flag)) ERR;

   if (!(rdata = malloc(sizeof(int)*count[1]*count[0]))) ERR;
   if (nc_get_vara_int(ncid, rvid, start, count, rdata)) ERR;

   temprdata = rdata;
   for (j=0; j<count[0];j++){
      for (i=0; i<count[1]; i++){
	 if(*temprdata != mpi_rank*(j+1))
	 {
	    ERR_RET;
	    break;
	 }
	 temprdata++;
      }
   }
   free(rdata);

   /* Case 2: read four dimensional data, one dimension is unlimited. */

   /* set up selection parameters */
   ustart[0] = 0;
   ustart[1] = 0;
   ustart[2] = 0;
   ustart[3] = DIMSIZE*mpi_rank/mpi_size;
   ucount[0] = TIMELEN;
   ucount[1] = DIMSIZE3;
   ucount[2] = DIMSIZE2;
   ucount[3] = DIMSIZE/mpi_size;

   /* Inquiry the data */
   /* (NOTE: This variable isn't written out, when access is independent) */
   if (NC_INDEPENDENT != flag) {
       if (nc_inq_varid(ncid, "uv1", &rvid)) ERR;

       /* Access the parallel */
       if (nc_var_par_access(ncid, rvid, flag)) ERR;

       if (!(rudata = malloc(ucount[0]*ucount[1]*ucount[2]*ucount[3]*sizeof(int)))) ERR;
       temprudata = rudata;

       /* Read data */
       if (nc_get_vara_int(ncid, rvid, ustart, ucount, rudata)) ERR;

       for(m = 0; m < ucount[0]; m++)
          for(k = 0; k < ucount[1]; k++)
             for(j = 0; j < ucount[2]; j++)
                for(i = 0; i < ucount[3]; i++)
                {
                   if(*temprudata != (1+mpi_rank)*2*(j+1)*(k+1)*(m+1))
                      ERR_RET;
                   temprudata++;
                }

       free(rudata);
   }

   /* Close the netcdf file. */
   if (nc_close(ncid)) ERR;

   return 0;
}
Пример #8
0
///
/// PIO interface to nc_function
///
/// This routine is called collectively by all tasks in the communicator ios.union_comm.
///
/// Refer to the <A HREF="http://www.unidata.ucar.edu/software/netcdf/docs/netcdf_documentation.html"> netcdf documentation. </A>
///
int PIO_function()
{
  int ierr;
  int msg;
  int mpierr;
  iosystem_desc_t *ios;
  file_desc_t *file;
  var_desc_t *vdesc;
  PIO_Offset usage;
  int *request;

  ierr = PIO_NOERR;

  file = pio_get_file_from_id(ncid);
  if(file == NULL)
    return PIO_EBADID;
  ios = file->iosystem;
  msg = 0;

  if(ios->async_interface && ! ios->ioproc){
    if(ios->compmaster)
      mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm);
    mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm);
  }


  if(ios->ioproc){
    switch(file->iotype){
#ifdef _NETCDF
#ifdef _NETCDF4
    case PIO_IOTYPE_NETCDF4P:
      ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE);
      ierr = nc_function();
      break;
    case PIO_IOTYPE_NETCDF4C:
#endif
    case PIO_IOTYPE_NETCDF:
      if(ios->io_rank==0){
	ierr = nc_function();
      }
      break;
#endif
#ifdef _PNETCDF
    case PIO_IOTYPE_PNETCDF:
      vdesc = file->varlist + varid;

      if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){
	vdesc->request = realloc(vdesc->request,
				 sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK));
      }
      request = vdesc->request+vdesc->nreqs;

      if(ios->io_rank==0){
	ierr = ncmpi_function();
      }else{
	*request = PIO_REQ_NULL;
      }
      vdesc->nreqs++;
      flush_output_buffer(file, false, 0);
      break;
#endif
    default:
      ierr = iotype_error(file->iotype,__FILE__,__LINE__);
    }
  }

  ierr = check_netcdf(file, ierr, __FILE__,__LINE__);

  return ierr;
}
Пример #9
0
/* This function creates a file with 10 2D variables, no unlimited
 * dimension. */
int test_pio_2d(size_t cache_size, int facc_type, int access_flag, MPI_Comm comm, 
		MPI_Info info, int mpi_size, int mpi_rank, 
		size_t *chunk_size) 
{
   double starttime, endtime, write_time = 0, bandwidth = 0;
   int ncid;
   int dimids[NDIMS1];
   size_t start[NDIMS1], count[NDIMS1];
   float *data;
   char file_name[NC_MAX_NAME + 1];
   char var_name1[NUMVARS][NC_MAX_NAME + 1] = {"GWa", "JAd", "TJe", "JMa", "JMo", 
					       "JQA", "AJa", "MVB", "WHH", "JTy"};
   int varid1[NUMVARS];
   size_t nelems_in;
   float preemption_in;
   int j, i, t;

   /* Create some data. */
   if (!(data = malloc(sizeof(float) * DIMSIZE2 * DIMSIZE1 / mpi_size)))
      return -2;
   for (j = 0; j < DIMSIZE2; j++)
      for (i = 0; i < DIMSIZE1 / mpi_size; i++)
	 data[j * DIMSIZE1 / mpi_size + i] = (float)mpi_rank * (j + 1);

   /* Get the file name. */
   sprintf(file_name, "%s/%s", TEMP_LARGE, FILENAME);

   /* Set the cache size. */
   if (nc_get_chunk_cache(NULL, &nelems_in, &preemption_in)) ERR;
   if (nc_set_chunk_cache(cache_size, nelems_in, preemption_in)) ERR;

   for (t = 0; t < NUM_TRIES; t++)
   {
      /* Create a netcdf-4 file, opened for parallel I/O. */
      if (nc_create_par(file_name, facc_type|NC_NETCDF4, comm, 
			info, &ncid)) ERR;

      /* Create two dimensions. */
      if (nc_def_dim(ncid, "d1", DIMSIZE2, &dimids[0])) ERR;
      if (nc_def_dim(ncid, "d2", DIMSIZE1, &dimids[1])) ERR;

      /* Create our variables. */
      for (i = 0; i < NUMVARS; i++)
      {
	 if (nc_def_var(ncid, var_name1[i], NC_INT, NDIMS1,
			dimids, &varid1[i])) ERR;
	 if (chunk_size[0])
	    if (nc_def_var_chunking(ncid, varid1[i], 0, chunk_size)) ERR;
      }

      if (nc_enddef(ncid)) ERR;

      /* Set up slab for this process. */
      start[0] = 0;
      start[1] = mpi_rank * DIMSIZE1/mpi_size;
      count[0] = DIMSIZE2;
      count[1] = DIMSIZE1 / mpi_size;

      /* start parallel netcdf4 */
      for (i = 0; i < NUMVARS; i++) 
	 if (nc_var_par_access(ncid, varid1[i], access_flag)) ERR;

      starttime = MPI_Wtime();

      /* Write two dimensional float data */
      for (i = 0; i < NUMVARS; i++)
	 if (nc_put_vara_float(ncid, varid1[i], start, count, data)) ERR;

      /* Close the netcdf file. */
      if (nc_close(ncid)) ERR;

      endtime = MPI_Wtime();
      if (!mpi_rank) 
      {
	 bandwidth += ((sizeof(float) * DIMSIZE1 * DIMSIZE2 * NUMVARS) / 
		       ((endtime - starttime) * 1024 * 1024)) / NUM_TRIES;
	 write_time += (endtime - starttime) / NUM_TRIES;
      }
   }
   free(data);
   if (!mpi_rank) 
   {
      char chunk_string[NC_MAX_NAME + 1] = "";

      /* What was our chunking? */
      if (chunk_size[0])
	 sprintf(chunk_string, "%dx%d    ", (int)chunk_size[0], (int)chunk_size[1]);
      else
	 strcat(chunk_string, "contiguous");

      /* Print the results. */
      printf("%d\t\t%s\t%s\t%d\t\t%dx%d\t\t%s\t%f\t\t%f\t\t\t%d\n", mpi_size, 
	     (facc_type == NC_MPIIO ? "MPI-IO   " : "MPI-POSIX"), 
	     (access_flag == NC_INDEPENDENT ? "independent" : "collective"), 
	     (int)cache_size/MEGABYTE, DIMSIZE1, DIMSIZE2, chunk_string, 
	     write_time, bandwidth, NUM_TRIES); 
   }

   /* Delete this file. */
   remove(file_name); 

   return 0;
}
Пример #10
0
/* Case 2: create four dimensional integer data,
   one dimension is unlimited. */
int test_pio_4d(size_t cache_size, int facc_type, int access_flag, MPI_Comm comm, 
		MPI_Info info, int mpi_size, int mpi_rank, size_t *chunk_size) 
{
   int ncid, dimuids[NDIMS2], varid2[NUMVARS];
   size_t ustart[NDIMS2], ucount[NDIMS2];
   float *udata, *tempudata;
   char file_name[NC_MAX_NAME + 1];
   char var_name2[NUMVARS][NC_MAX_NAME + 1] = {"JKP", "ZTa", "MFi", "FPi", "JBu", 
					       "ALi", "AJo", "USG", "RBH", "JAG"};
   double starttime, endtime, write_time = 0, bandwidth = 0;
   size_t nelems_in;
   float preemption_in;
   int k, j, i, t;

   udata = malloc(DIMSIZE3 * DIMSIZE2 * DIMSIZE1 / mpi_size * sizeof(int));

   /* Create phony data. */
   tempudata = udata;
   for(k = 0; k < DIMSIZE3; k++)
      for(j = 0; j < DIMSIZE2; j++)
	 for(i = 0; i < DIMSIZE1 / mpi_size; i++)
	 {
	    *tempudata = (float)(1 + mpi_rank) * 2 * (j + 1) * (k + 1);
	    tempudata++;
	 }

   /* Get the file name. */
   sprintf(file_name, "%s/%s", TEMP_LARGE, FILENAME);
      
   /* Set the cache size. */
   if (nc_get_chunk_cache(NULL, &nelems_in, &preemption_in)) ERR;
   if (nc_set_chunk_cache(cache_size, nelems_in, preemption_in)) ERR;

   for (t = 0; t < NUM_TRIES; t++)
   {
      /* Create a netcdf-4 file. */
      if (nc_create_par(file_name, facc_type|NC_NETCDF4, comm, info, 
			&ncid)) ERR;

      /* Create four dimensions. */
      if (nc_def_dim(ncid, "ud1", TIMELEN, dimuids)) ERR;
      if (nc_def_dim(ncid, "ud2", DIMSIZE3, &dimuids[1])) ERR;
      if (nc_def_dim(ncid, "ud3", DIMSIZE2, &dimuids[2])) ERR;
      if (nc_def_dim(ncid, "ud4", DIMSIZE1, &dimuids[3])) ERR;

      /* Create 10 variables. */
      for (i = 0; i < NUMVARS; i++)
	 if (nc_def_var(ncid, var_name2[i], NC_INT, NDIMS2,
			dimuids, &varid2[i])) ERR;

      if (nc_enddef(ncid)) ERR;
 
      /* Set up selection parameters */
      ustart[0] = 0;
      ustart[1] = 0;
      ustart[2] = 0;
      ustart[3] = DIMSIZE1 * mpi_rank / mpi_size;
      ucount[0] = 1;
      ucount[1] = DIMSIZE3;
      ucount[2] = DIMSIZE2;
      ucount[3] = DIMSIZE1 / mpi_size;

      /* Access parallel */
      for (i = 0; i < NUMVARS; i++)
	 if (nc_var_par_access(ncid, varid2[i], access_flag)) ERR;

      starttime = MPI_Wtime();

      /* Write slabs of phony data. */
      for(ustart[0] = 0; ustart[0] < TIMELEN; ustart[0]++)
	 for (i = 0; i < NUMVARS; i++)
	    if (nc_put_vara_float(ncid, varid2[i], ustart, ucount, udata)) ERR;

      /* Close the netcdf file. */
      if (nc_close(ncid)) ERR;

      endtime = MPI_Wtime();
      if (!mpi_rank)
      {
	 write_time += (endtime - starttime) / NUM_TRIES;
	 bandwidth += (sizeof(float) * TIMELEN * DIMSIZE1 * DIMSIZE2 * DIMSIZE3 * NUMVARS) / 
	    ((endtime - starttime) * 1024 * 1024 * NUM_TRIES);
      }
   }
   free(udata);
   if (!mpi_rank)
   {
      char chunk_string[NC_MAX_NAME + 1] = "";

      /* What was our chunking? */
      if (chunk_size[0])
	 sprintf(chunk_string, "%dx%dx%dx%d", (int)chunk_size[0], (int)chunk_size[1], 
		 (int)chunk_size[2], (int)chunk_size[3]);
      else
	 strcat(chunk_string, "contiguous");

      /* Print our results. */
      printf("%d\t\t%s\t%s\t%d\t\t%dx%dx%dx%d\t%s\t%f\t\t%f\t\t\t%d\n", mpi_size, 
	     (facc_type == NC_MPIIO ? "MPI-IO   " : "MPI-POSIX"), 
	     (access_flag == NC_INDEPENDENT ? "independent" : "collective"), 
	     (int)cache_size / MEGABYTE, TIMELEN, DIMSIZE3, DIMSIZE2, DIMSIZE1, chunk_string, write_time, 
	     bandwidth, NUM_TRIES); 
   }

   /* Delete this file. */
   remove(file_name); 

   return 0;
}
Пример #11
0
int
main(int argc, char **argv)
{
    /* MPI stuff. */
    int mpi_namelen;		
    char mpi_name[MPI_MAX_PROCESSOR_NAME];
    int mpi_size, mpi_rank;
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;

    /* Netcdf-4 stuff. */
    int ncid, v1id, dimids[NDIMS];
    size_t start[NDIMS], count[NDIMS];

    int data[DIMSIZE * DIMSIZE], i, res;
    int slab_data[DIMSIZE * DIMSIZE / 4]; /* one slab */
    char file_name[NC_MAX_NAME + 1];

#ifdef USE_MPE
    int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */

    /* Initialize MPI. */
    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Get_processor_name(mpi_name, &mpi_namelen);
    /*printf("mpi_name: %s size: %d rank: %d\n", mpi_name, 
      mpi_size, mpi_rank);*/

#ifdef USE_MPE
    MPE_Init_log();
    s_init = MPE_Log_get_event_number(); 
    e_init = MPE_Log_get_event_number(); 
    s_define = MPE_Log_get_event_number(); 
    e_define = MPE_Log_get_event_number(); 
    s_write = MPE_Log_get_event_number(); 
    e_write = MPE_Log_get_event_number(); 
    s_close = MPE_Log_get_event_number(); 
    e_close = MPE_Log_get_event_number(); 
    MPE_Describe_state(s_init, e_init, "Init", "red");
    MPE_Describe_state(s_define, e_define, "Define", "yellow");
    MPE_Describe_state(s_write, e_write, "Write", "green");
    MPE_Describe_state(s_close, e_close, "Close", "purple");
    MPE_Start_log();
    MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */

    if (mpi_rank == 1)
    {
       printf("\n*** tst_parallel testing very basic parallel access.\n");
       printf("*** tst_parallel testing whether we can create file for parallel access and write to it...");
    }

    /* Create phony data. We're going to write a 24x24 array of ints,
       in 4 sets of 144. */
    /*printf("mpi_rank*QTR_DATA=%d (mpi_rank+1)*QTR_DATA-1=%d\n",
      mpi_rank*QTR_DATA, (mpi_rank+1)*QTR_DATA);*/
    for (i = mpi_rank * QTR_DATA; i < (mpi_rank + 1) * QTR_DATA; i++)
       data[i] = mpi_rank;
    for (i = 0; i < DIMSIZE * DIMSIZE / 4; i++)
       slab_data[i] = mpi_rank;

#ifdef USE_MPE
    MPE_Log_event(e_init, 0, "end init");
    MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */

    /* Create a parallel netcdf-4 file. */
    /*nc_set_log_level(3);*/
    sprintf(file_name, "%s/%s", TEMP_LARGE, FILE);
    if ((res = nc_create_par(file_name, NC_NETCDF4|NC_MPIIO, comm, 
			     info, &ncid))) ERR;

    /* Create three dimensions. */
    if (nc_def_dim(ncid, "d1", DIMSIZE, dimids)) ERR;
    if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
    if (nc_def_dim(ncid, "d3", NUM_SLABS, &dimids[2])) ERR;

    /* Create one var. */
    if ((res = nc_def_var(ncid, "v1", NC_INT, NDIMS, dimids, &v1id))) ERR;

    /* Write metadata to file. */
    if ((res = nc_enddef(ncid))) ERR;

#ifdef USE_MPE
    MPE_Log_event(e_define, 0, "end define file");
    if (mpi_rank)
       sleep(mpi_rank);
#endif /* USE_MPE */

    /* Set up slab for this process. */
    start[0] = mpi_rank * DIMSIZE/mpi_size;
    start[1] = 0;
    count[0] = DIMSIZE/mpi_size;
    count[1] = DIMSIZE;
    count[2] = 1;
    /*printf("mpi_rank=%d start[0]=%d start[1]=%d count[0]=%d count[1]=%d\n",
      mpi_rank, start[0], start[1], count[0], count[1]);*/

    if (nc_var_par_access(ncid, v1id, NC_COLLECTIVE)) ERR;
/*    if (nc_var_par_access(ncid, v1id, NC_INDEPENDENT)) ERR;*/

    for (start[2] = 0; start[2] < NUM_SLABS; start[2]++)
    {
#ifdef USE_MPE
       MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */

       /* Write slabs of phoney data. */
       if (nc_put_vara_int(ncid, v1id, start, count, slab_data)) ERR;
#ifdef USE_MPE
       MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
    }

#ifdef USE_MPE
    MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

    /* Close the netcdf file. */
    if ((res = nc_close(ncid)))	ERR;
    
#ifdef USE_MPE
    MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

    /* Delete this large file. */
    remove(file_name); 

    /* Shut down MPI. */
    MPI_Finalize();

    if (mpi_rank == 1)
    {
       SUMMARIZE_ERR;
       FINAL_RESULTS;
    }
    return 0;
}
Пример #12
0
void sdatio_create_variable(struct sdatio_file * sfile,
                            int variable_type,
                            char * variable_name,
                            char * dimension_list,
                            char * description,
                            char * units){
  int ndims;
  int nunlim;
  struct sdatio_variable  * svar;
  int retval;
  /*int * dimension_ids;*/

  /*dimension_ids = (int **)malloc(sizeof(int*));*/


  /*printf("dimension_list is %s\n", dimension_list);*/
  svar = (struct sdatio_variable *) malloc(sizeof(struct sdatio_variable));

  /* Set variable name*/
  svar->name = (char *)malloc(sizeof(char)*(strlen(variable_name)+1));
  strcpy(svar->name, variable_name);

  /*ndims = strlen(dimension_list);*/
  ndims = sdatio_ndims_from_string(sfile, dimension_list);
  svar->ndims = ndims;
  DEBUG_MESS("ndims = %d for variable %s\n", ndims, variable_name);

  sdatio_get_dimension_ids(sfile, dimension_list, svar);
  /*svar->dimension_ids = dimension_ids;*/


  sdatio_recommence_definitions(sfile);
  /*if (sfile->is_parallel){}*/
  /*else {*/
    if ((retval = nc_def_var(sfile->nc_file_id, variable_name, sdatio_netcdf_variable_type(variable_type), ndims, svar->dimension_ids, &(svar->nc_id)))) ERR(retval);
    if ((retval = nc_put_att_text(sfile->nc_file_id, svar->nc_id, "description", strlen(description), description))) ERR(retval);
    if ((retval = nc_put_att_text(sfile->nc_file_id, svar->nc_id, "units", strlen(units), units))) ERR(retval);
    /*}*/
  switch (variable_type){
    case SDATIO_INT:
      svar->type_size = sizeof(int);
      break;
    case SDATIO_FLOAT:
      svar->type_size = sizeof(float);
      break;
    case SDATIO_DOUBLE:
      svar->type_size = sizeof(double);
      break;
    case SDATIO_CHAR:
      svar->type_size = sizeof(char);
      break;
    default:
      printf("Unknown type in sdatio_create_variable\n");
      abort();
  }

  sdatio_end_definitions(sfile);
  
  svar->type = variable_type;
  svar->dimension_list = (char *)malloc(sizeof(char)*(strlen(dimension_list)+1));
  strcpy(svar->dimension_list, dimension_list);

  svar->manual_starts=(int*)malloc(sizeof(int)*ndims);
  svar->manual_counts=(int*)malloc(sizeof(int)*ndims);
  svar->manual_offsets=(int*)malloc(sizeof(int)*ndims);
  int i;

  for (i=0;i<ndims;i++){
    svar->manual_starts[i]=-1;
    svar->manual_counts[i]=-1;
    svar->manual_offsets[i]=-1;
  }

  DEBUG_MESS("Starting sdatio_append_variable\n");

  sdatio_append_variable(sfile, svar);

  DEBUG_MESS("Ending sdatio_append_variable\n");

#ifdef PARALLEL
  if (sfile->is_parallel){
    sdatio_number_of_unlimited_dimensions(sfile, variable_name, &nunlim);
    if (nunlim > 0)
      if ((retval = nc_var_par_access(sfile->nc_file_id, svar->nc_id, NC_COLLECTIVE))) ERR(retval);
  }
#endif
  
}
Пример #13
0
/* Has to be at the bottom because it uses many
 * other functions */
void sdatio_open_file(struct sdatio_file * sfile )  {
  /*printf("called\n");*/
  int retval;
  int ndims, nvars;
  int i, j;
  struct sdatio_dimension * sdim;
  struct sdatio_variable * svar;
  size_t lengthp;
  int nunlimdims;
  int *unlimdims;
  int is_unlimited;
  char * name_tmp;
  int * vardimids;
  char * dimension_list;
  nc_type vartype;
  int vartypeint;
  int dummy;
  int *nunlim;

  DEBUG_MESS("starting sdatio_open_file\n");

  retval = 0;

  if (sfile->is_open){
    printf("ERROR: The supplied sdatio_file struct corresponds to an already open file.\n");
    abort();
  }

  /* We make the choice to always open the file in readwrite mode*/
  sfile->mode = sfile->mode|NC_WRITE;

  DEBUG_MESS("sdatio_open_file opening file\n");
  /* Open the file*/
  if (sfile->is_parallel) {
#ifdef PARALLEL 
    if ((retval = nc_open_par(sfile->name, sfile->mode, *(sfile->communicator), MPI_INFO_NULL,  &(sfile->nc_file_id)))) ERR(retval);
#else
    printf("sdatio was built without --enable-parallel, sdatio_create_file will not work for parallel files\n");
    abort();
#endif
  }
  else {
    if ((retval = nc_open(sfile->name, sfile->mode, &(sfile->nc_file_id)))) ERR(retval);
  }
  DEBUG_MESS("sdatio_open_file opened file\n");
  /*Initialize object file data*/
  sfile->is_open = 1;
  sfile->n_dimensions = 0;
  sfile->n_variables = 0;
  sfile->data_written = 0;
  /* Get number of dimensions in the file*/
  if ((retval = nc_inq_ndims(sfile->nc_file_id, &ndims))) ERR(retval);
  /* Allocate some temp storate*/
  name_tmp = (char*)malloc(sizeof(char*)*(NC_MAX_NAME+1));
  /* Get a list of unlimited dimensions*/
  if ((retval = nc_inq_unlimdims(sfile->nc_file_id, &nunlimdims, NULL))) ERR(retval);
  unlimdims = (int*)malloc(sizeof(int*)*nunlimdims);
  if ((retval = nc_inq_unlimdims(sfile->nc_file_id, &nunlimdims, unlimdims))) ERR(retval);
  /* Add each dimension to the sfile object*/
  for (i=0; i<ndims; i++){
    if ((retval = nc_inq_dim(sfile->nc_file_id, i, name_tmp, &lengthp))) ERR(retval);
    sdim = (struct sdatio_dimension *) malloc(sizeof(struct sdatio_dimension));
    /*if ((retval = nc_def_dim(sfile->nc_file_id, dimension_name, size, &(sdim->nc_id)))) ERR(retval);*/
    /*}*/
    /*sdatio_end_definitions(sfile);*/
    is_unlimited = 0;
    for(j=0; j<nunlimdims; j++) if (unlimdims[j] == i) is_unlimited = 1;
    if (is_unlimited) {
      sdim->size = SDATIO_UNLIMITED;
      /* We choose the first write to unlimited variables to be the last
       * existing record. Users can easily move to the next by
       * calling sdatio_increment_start before writing anything */
      sdim->start = lengthp-1;
    }
    else {
      sdim->size = lengthp;
      sdim->start = 0;
    }
    if (strlen(name_tmp)>1){
      sfile->has_long_dim_names = 1;
      /*printf("Dimension names can only be one character long!\n");*/
      /*abort();*/
    }
    sdim->nc_id = i;
    sdim->name = (char *)malloc(sizeof(char)*(strlen(name_tmp)+1));
    strcpy(sdim->name, name_tmp);
    sdatio_append_dimension(sfile, sdim);
  }
  DEBUG_MESS("Finished reading dimensions\n");
  /* Get the number of variables in the file*/
  if ((retval = nc_inq_nvars(sfile->nc_file_id, &nvars))) ERR(retval);
  /* Add each variable to the sfile object*/
  for (i=0; i<nvars; i++){
    if ((retval = nc_inq_varndims(sfile->nc_file_id, i, &ndims))) ERR(retval);
    vardimids = (int*)malloc(sizeof(int)*ndims);
    if ((retval = nc_inq_var(sfile->nc_file_id, i, name_tmp, &vartype,
                            &ndims, vardimids, &dummy))) ERR(retval);
    vartypeint = vartype;
    vartypeint = sdatio_sdatio_variable_type(vartypeint);
    svar = (struct sdatio_variable *) malloc(sizeof(struct sdatio_variable));

    /*Set variable id*/
    svar->nc_id = i;

    /* Set variable name*/
    svar->name = (char *)malloc(sizeof(char)*(strlen(name_tmp)+1));
    strcpy(svar->name, name_tmp);

    /*ndims = strlen(dimension_list);*/
    svar->ndims = ndims;
    DEBUG_MESS("ndims = %d for variable %s\n", ndims, name_tmp);

    /* Set the dimension_ids*/
    svar->dimension_ids = vardimids;

    /*sdatio_get_dimension_ids(sfile, dimension_list, svar);*/
    sdatio_get_dimension_list(sfile, vardimids, svar);
    /*svar->dimension_ids = dimension_ids;*/


    DEBUG_MESS("Setting type for variable %s\n", svar->name);
    switch (vartypeint){
      case SDATIO_INT:
        svar->type_size = sizeof(int);
        break;
      case SDATIO_FLOAT:
        svar->type_size = sizeof(float);
        break;
      case SDATIO_DOUBLE:
        svar->type_size = sizeof(double);
        break;
      case SDATIO_CHAR:
        svar->type_size = sizeof(char);
        break;
      default:
        printf("Unknown type in sdatio_create_variable\n");
        abort();
    }

    
    svar->type = vartypeint;

    DEBUG_MESS("Allocating manual starts and counts for variable %s; ndims %d\n", svar->name, ndims);
    svar->manual_starts=(int*)malloc(sizeof(int)*ndims);
    svar->manual_counts=(int*)malloc(sizeof(int)*ndims);
    svar->manual_offsets=(int*)malloc(sizeof(int)*ndims);

    DEBUG_MESS("Setting manual starts and counts for variable %s\n", svar->name);
    for (j=0;j<ndims;j++){
      svar->manual_starts[j]=-1;
      svar->manual_counts[j]=-1;
      svar->manual_offsets[j]=-1;
    }

    DEBUG_MESS("Starting sdatio_append_variable\n");

    sdatio_append_variable(sfile, svar);

    DEBUG_MESS("Ending sdatio_append_variable\n");

#ifdef PARALLEL
    if (sfile->is_parallel){
      sdatio_number_of_unlimited_dimensions(sfile, svar->name, &nunlim);
      if (nunlim > 0)
        if ((retval = nc_var_par_access(sfile->nc_file_id, svar->nc_id, NC_COLLECTIVE))) ERR(retval);
    }
#endif
    /*vartypeint = */
  }
  DEBUG_MESS("Finished reading variables\n");
  
  free(unlimdims);
  free(name_tmp);
}
Пример #14
0
void seissol::LoopStatistics::writeSamples() {
  std::string loopStatFile = utils::Env::get<std::string>("SEISSOL_LOOP_STAT_PREFIX", "");
  if (!loopStatFile.empty()) {
#if defined(USE_NETCDF) && defined(USE_MPI)
    unsigned nRegions = m_times.size();
    for (unsigned region = 0; region < nRegions; ++region) {
      std::ofstream file;
      std::stringstream ss;
      ss << loopStatFile << m_regions[region] << ".nc";
      std::string fileName = ss.str();
      
      int nSamples = m_times[region].size();
      int sampleOffset;
      MPI_Scan(&nSamples, &sampleOffset, 1, MPI_INT, MPI_SUM, seissol::MPI::mpi.comm());
      
      int ncid, stat;
      stat = nc_create_par(fileName.c_str(), NC_MPIIO | NC_CLOBBER | NC_NETCDF4, seissol::MPI::mpi.comm(), MPI_INFO_NULL, &ncid); check_err(stat,__LINE__,__FILE__);
      
      int sampledim, rankdim, sampletyp, offsetid, sampleid;
      
      stat = nc_def_dim(ncid, "rank", 1+seissol::MPI::mpi.size(), &rankdim);             check_err(stat,__LINE__,__FILE__);
      stat = nc_def_dim(ncid, "sample", NC_UNLIMITED, &sampledim); check_err(stat,__LINE__,__FILE__);
      
      stat = nc_def_compound(ncid, sizeof(Sample), "Sample", &sampletyp); check_err(stat,__LINE__,__FILE__);
      {
        stat = nc_insert_compound(ncid, sampletyp, "time", NC_COMPOUND_OFFSET(Sample,time), NC_DOUBLE);   check_err(stat,__LINE__,__FILE__);
        stat = nc_insert_compound(ncid, sampletyp, "loopLength", NC_COMPOUND_OFFSET(Sample,numIters), NC_UINT); check_err(stat,__LINE__,__FILE__);
      }
      
      stat = nc_def_var(ncid, "offset", NC_INT,   1, &rankdim,   &offsetid); check_err(stat,__LINE__,__FILE__);
      stat = nc_def_var(ncid, "sample", sampletyp, 1, &sampledim, &sampleid); check_err(stat,__LINE__,__FILE__);
      
      stat = nc_enddef(ncid); check_err(stat,__LINE__,__FILE__);
      
      stat = nc_var_par_access(ncid, offsetid, NC_COLLECTIVE); check_err(stat,__LINE__,__FILE__);
      stat = nc_var_par_access(ncid, sampleid, NC_COLLECTIVE); check_err(stat,__LINE__,__FILE__);
  
      size_t start, count;
      int offsetData[2];
      if (seissol::MPI::mpi.rank() == 0) {
        start = 0;
        count = 2;        
        offsetData[0] = 0;
      } else {
        start = 1+seissol::MPI::mpi.rank();
        count = 1;
      }
      offsetData[count-1] = sampleOffset;
      stat = nc_put_vara_int(ncid, offsetid, &start, &count, offsetData);  check_err(stat,__LINE__,__FILE__);
      
      start = sampleOffset-nSamples;
      count = nSamples;
      stat = nc_put_vara(ncid, sampleid, &start, &count, m_times[region].data());  check_err(stat,__LINE__,__FILE__);      
      
      stat = nc_close(ncid); check_err(stat,__LINE__,__FILE__);
    }
#else
    logWarning(seissol::MPI::mpi.rank()) << "Writing loop statistics requires NetCDF and MPI.";
#endif
  }
}
Пример #15
0
int main(int argc, char* argv[])
{
    int i, j, rank, nprocs, ncid, cmode, varid[NVARS], dimid[2], *buf;
    int err = 0;
    char str[32];
    size_t start[2], count[2];
    MPI_Comm comm=MPI_COMM_SELF;
    MPI_Info info=MPI_INFO_NULL;

    printf("\n*** Testing bug fix with changing pnetcdf variable offsets...");

    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (nprocs > 1 && rank == 0)
        printf("This test program is intended to run on ONE process\n");
    if (rank > 0) goto fn_exit;

    /* first, use PnetCDF to create a file with default header/variable alignment */
#ifdef DISABLE_PNETCDF_ALIGNMENT
    MPI_Info_create(&info);
    MPI_Info_set(info, "nc_header_align_size", "1");
    MPI_Info_set(info, "nc_var_align_size",    "1");
#endif

    cmode = NC_PNETCDF | NC_CLOBBER;
    if (nc_create_par(FILENAME, cmode, comm, info, &ncid)) ERR_RET;

    /* define dimension */
    if (nc_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0])) ERR;
    if (nc_def_dim(ncid, "X", NX,           &dimid[1])) ERR;

    /* Odd numbers are fixed variables, even numbers are record variables */
    for (i=0; i<NVARS; i++) {
        if (i%2) {
            sprintf(str,"fixed_var_%d",i);
            if (nc_def_var(ncid, str, NC_INT, 1, dimid+1, &varid[i])) ERR;
        }
        else {
            sprintf(str,"record_var_%d",i);
            if (nc_def_var(ncid, str, NC_INT, 2, dimid, &varid[i])) ERR;
        }
    }
    if (nc_enddef(ncid)) ERR;

    for (i=0; i<NVARS; i++) {
        /* Note NC_INDEPENDENT is the default */
        if (nc_var_par_access(ncid, varid[i], NC_INDEPENDENT)) ERR;
    }

    /* write all variables */
    buf = (int*) malloc(NX * sizeof(int));
    for (i=0; i<NVARS; i++) {
        for (j=0; j<NX; j++) buf[j] = i*10 + j;
        if (i%2) {
            start[0] = 0; count[0] = NX;
            if (nc_put_vara_int(ncid, varid[i], start, count, buf)) ERR;
        }
        else {
            start[0] = 0; start[1] = 0;
            count[0] = 1; count[1] = NX;
            if (nc_put_vara_int(ncid, varid[i], start, count, buf)) ERR;
        }
    }
    if (nc_close(ncid)) ERR;
    if (info != MPI_INFO_NULL) MPI_Info_free(&info);

    /* re-open the file with netCDF (parallel) and enter define mode */
    if (nc_open_par(FILENAME, NC_WRITE|NC_PNETCDF, comm, info, &ncid)) ERR_RET;
    if (nc_redef(ncid)) ERR;

    /* add attributes to make header grow */
    for (i=0; i<NVARS; i++) {
        sprintf(str, "annotation_for_var_%d",i);
        if (nc_put_att_text(ncid, varid[i], "text_attr", strlen(str), str)) ERR;
    }
    if (nc_enddef(ncid)) ERR;

    /* read variables and check their contents */
    for (i=0; i<NVARS; i++) {
        for (j=0; j<NX; j++) buf[j] = -1;
        if (i%2) {
            start[0] = 0; count[0] = NX;
            if (nc_get_var_int(ncid, varid[i], buf)) ERR;
            for (j=0; j<NX; j++)
                if (buf[j] != i*10 + j)
                    printf("unexpected read value var i=%d buf[j=%d]=%d should be %d\n",i,j,buf[j],i*10+j);
        }
        else {
            start[0] = 0; start[1] = 0;
            count[0] = 1; count[1] = NX;
            if (nc_get_vara_int(ncid, varid[i], start, count, buf)) ERR;
            for (j=0; j<NX; j++)
                if (buf[j] != i*10+j)
                    printf("unexpected read value var i=%d buf[j=%d]=%d should be %d\n",i,j,buf[j],i*10+j);
        }
    }
    if (nc_close(ncid)) ERR;

fn_exit:
    MPI_Finalize();
    SUMMARIZE_ERR;
    FINAL_RESULTS;
    return 0;
}
Пример #16
0
int test_pio_attr(int flag)
{
   /* MPI stuff. */
   int mpi_size, mpi_rank;
   MPI_Comm comm = MPI_COMM_WORLD;
   MPI_Info info = MPI_INFO_NULL;

   /* Netcdf-4 stuff. */
   int ncid;
   int nvid;
   int j, i;

   double rh_range[2];
   static char title[] = "parallel attr to netCDF";
   nc_type    st_type,vr_type;
   size_t     vr_len,st_len;
   size_t     orivr_len;
   double *vr_val;
   char   *st_val;

   /* two dimensional integer data*/
   int dimids[NDIMS1];
   size_t start[NDIMS1];
   size_t count[NDIMS1];
   int *data;
   int *tempdata;

   /* Initialize MPI. */
   MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
   MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

   /* Create a parallel netcdf-4 file. */
/*    nc_set_log_level(NC_TURN_OFF_LOGGING); */
   /*    nc_set_log_level(3);*/

   if (nc_create_par(file_name, facc_type, comm, info, &ncid)) ERR;

   /* Create a 2-D variable so that an attribute can be added. */
   if (nc_def_dim(ncid, "d1", DIMSIZE2, dimids)) ERR;
   if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;

   /* Create one var. */
   if (nc_def_var(ncid, "v1", NC_INT, NDIMS1, dimids, &nvid)) ERR;

   orivr_len = 2;
   rh_range[0] = 1.0;
   rh_range[1] = 1000.0;

   /* Write attributes of a variable */

   if (nc_put_att_double (ncid, nvid, "valid_range", NC_DOUBLE,
			  orivr_len, rh_range)) ERR;

   if (nc_put_att_text (ncid, nvid, "title", strlen(title),
			title)) ERR;

   /* Write global attributes */
   if (nc_put_att_double (ncid, NC_GLOBAL, "g_valid_range", NC_DOUBLE,
			  orivr_len, rh_range)) ERR;
   if (nc_put_att_text (ncid, NC_GLOBAL, "g_title", strlen(title), title)) ERR;

   if (nc_enddef(ncid)) ERR;

   /* Set up slab for this process. */
   start[0] = 0;
   start[1] = mpi_rank * DIMSIZE/mpi_size;
   count[0] = DIMSIZE2;
   count[1] = DIMSIZE/mpi_size;

   /* Access parallel */
   if (nc_var_par_access(ncid, nvid, flag)) ERR;

   /* Allocating data */
   data      = malloc(sizeof(int)*count[1]*count[0]);
   tempdata  = data;
   for(j = 0; j < count[0]; j++)
      for (i = 0; i < count[1]; i++)
      {
	 *tempdata = mpi_rank * (j + 1);
	 tempdata++;
      }

   if (nc_put_vara_int(ncid, nvid, start, count, data)) ERR;
   free(data);

   /* Close the netcdf file. */
if (nc_close(ncid)) ERR;

   /* Read attributes */
if (nc_open_par(file_name, facc_type_open, comm, info, &ncid)) ERR;

   /* Set up slab for this process. */
   start[0] = 0;
   start[1] = mpi_rank * DIMSIZE/mpi_size;
   count[0] = DIMSIZE2;
   count[1] = DIMSIZE/mpi_size;

   /* Inquiry variable */
   if (nc_inq_varid(ncid, "v1", &nvid)) ERR;

   /* Access parallel */
   if (nc_var_par_access(ncid, nvid, flag)) ERR;

   /* Inquiry attribute */
   if (nc_inq_att (ncid, nvid, "valid_range", &vr_type, &vr_len)) ERR;

   /* check stuff */
   if(vr_type != NC_DOUBLE || vr_len != orivr_len) ERR;

   vr_val = (double *) malloc(vr_len * sizeof(double));

   /* Get variable attribute values */
   if (nc_get_att_double(ncid, nvid, "valid_range", vr_val)) ERR;

   /* Check variable attribute value */
   for(i = 0; i < vr_len; i++)
      if (vr_val[i] != rh_range[i])
	 ERR_RET;
   free(vr_val);

   /* Inquiry global attribute */
   if (nc_inq_att (ncid, NC_GLOBAL, "g_valid_range", &vr_type, &vr_len)) ERR;

   /* Check stuff. */
   if(vr_type != NC_DOUBLE || vr_len != orivr_len) ERR;

   /* Obtain global attribute value */
   vr_val = (double *) malloc(vr_len * sizeof(double));
   if (nc_get_att_double(ncid, NC_GLOBAL, "g_valid_range", vr_val)) ERR;

   /* Check global attribute value */
   for(i = 0; i < vr_len; i++)
      if (vr_val[i] != rh_range[i]) ERR_RET;
   free(vr_val);

   /* Inquiry string attribute of a variable */
   if (nc_inq_att (ncid, nvid, "title", &st_type, &st_len)) ERR;

   /* check string attribute length */
   if(st_len != strlen(title)) ERR_RET;

   /* Check string attribute type */
   if(st_type != NC_CHAR) ERR_RET;

   /* Allocate meory for string attribute */
   st_val = (char *) malloc(st_len * (sizeof(char)));

   /* Obtain variable string attribute value */
   if (nc_get_att_text(ncid, nvid,"title", st_val)) ERR;

   /*check string value */
   if(strncmp(st_val,title,st_len)) {
      free(st_val);
      ERR_RET;
   }
   free(st_val);

   /*Inquiry global attribute */
   if (nc_inq_att (ncid, NC_GLOBAL, "g_title", &st_type, &st_len)) ERR;

   /* check attribute length*/
   if(st_len != strlen(title)) ERR_RET;

   /*check attribute type*/
   if(st_type != NC_CHAR) ERR_RET;

   /* obtain global string attribute value */
   st_val = (char*)malloc(st_len*sizeof(char));
   if (nc_get_att_text(ncid, NC_GLOBAL,"g_title", st_val)) ERR;

   /* check attribute value */
   if(strncmp(st_val,title,st_len)){
      free(st_val);
      ERR_RET;
   }
   free(st_val);

   /* Close the netcdf file. */
   if (nc_close(ncid)) ERR;

   return 0;
}
Пример #17
0
/* test different hyperslab settings */
int test_pio_hyper(int flag){

   /* MPI stuff. */
   int mpi_size, mpi_rank;
   int res = NC_NOERR;
   MPI_Comm comm = MPI_COMM_WORLD;
   MPI_Info info = MPI_INFO_NULL;

   /* Netcdf-4 stuff. */
   int ncid;
   int nvid;
   int rvid;
   int j, i;

   /* two dimensional integer data test */
   int dimids[NDIMS1];
   size_t start[NDIMS1], count[NDIMS1];
   int *data;
   int *tempdata;
   int *rdata;
   int *temprdata;
   int count_atom;


   /* Initialize MPI. */
   MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
   MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

   if(mpi_size == 1) return 0;

   /* Create a parallel netcdf-4 file. */
/*      nc_set_log_level(NC_TURN_OFF_LOGGING); */
/*      nc_set_log_level(4);*/

   if (nc_create_par(file_name, facc_type, comm, info, &ncid)) ERR;

   /* The case is two dimensional variables, no unlimited dimension */

   /* Create two dimensions. */
   if (nc_def_dim(ncid, "d1", DIMSIZE2, dimids)) ERR;
   if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;

   /* Create one var. */
   if (nc_def_var(ncid, "v1", NC_INT, NDIMS1, dimids, &nvid)) ERR;

   if (nc_enddef(ncid)) ERR;


   /* hyperslab illustration for 3-processor case

      --------
      |aaaacccc|
      |aaaacccc|
      |bbbb    |
      |bbbb    |
      --------
   */

   /* odd number of processors should be treated differently */
   if(mpi_size%2 != 0) {

      count_atom = DIMSIZE*2/(mpi_size+1);
      if(mpi_rank <= mpi_size/2) {
         start[0] = 0;
         start[1] = mpi_rank*count_atom;
         count[0] = DIMSIZE2/2;
         count[1] = count_atom;
      }
      else {
         start[0] = DIMSIZE2/2;
         start[1] = (mpi_rank-mpi_size/2-1)*count_atom;
         count[0] = DIMSIZE2/2;
         count[1] = count_atom;
      }
   }
   else  {

      count_atom = DIMSIZE*2/mpi_size;
      if(mpi_rank < mpi_size/2) {
         start[0] = 0;
         start[1] = mpi_rank*count_atom;
         count[0] = DIMSIZE2/2;
         count[1] = count_atom;
      }
      else {
         start[0] = DIMSIZE2/2;
         start[1] = (mpi_rank-mpi_size/2)*count_atom;
         count[0] = DIMSIZE2/2;
         count[1] = count_atom;
      }
   }

   if (nc_var_par_access(ncid, nvid, flag)) ERR;
   data      = malloc(sizeof(int)*count[1]*count[0]);
   tempdata  = data;
   for (j=0; j<count[0];j++){
      for (i=0; i<count[1]; i++){
	 *tempdata = mpi_rank*(j+1);
	 tempdata ++;
      }
   }


   if (nc_put_vara_int(ncid, nvid, start, count, data)) ERR;
   free(data);

   /* Close the netcdf file. */
   if (nc_close(ncid)) ERR;

   if (nc_open_par(file_name, facc_type_open, comm, info, &ncid)) ERR;

   /* Inquiry the variable */
   if (nc_inq_varid(ncid, "v1", &rvid)) ERR;

   if (nc_var_par_access(ncid, rvid, flag)) ERR;

   rdata      = malloc(sizeof(int)*count[1]*count[0]);
   /* Read the data with the same slab settings */
   if (nc_get_vara_int(ncid, rvid, start, count, rdata)) ERR;

   temprdata = rdata;
   for (j=0; j<count[0];j++){
      for (i=0; i<count[1]; i++){
	 if(*temprdata != mpi_rank*(j+1))
	 {
	    res = -1;
	    break;
	 }
	 temprdata++;
      }
   }

   free(rdata);
   if(res == -1) ERR_RET;

   /* Close the netcdf file. */
   if (nc_close(ncid)) ERR;

   return 0;
}
Пример #18
0
int main(int argc, char* argv[])
{
    //init MPI
    MPI_Init( &argc, &argv);
    int rank, size;
    MPI_Comm_rank( MPI_COMM_WORLD, &rank);
    MPI_Comm_size( MPI_COMM_WORLD, &size);
    //create a grid and some data
    if( size != 4){ std::cerr << "Please run with 4 threads!\n"; return -1;}
    double Tmax=2.*M_PI;
    double NT = 10;
    double dt = Tmax/NT;
    dg::Grid1d<double> gx( 0, 2.*M_PI, 3, 10);
    dg::Grid1d<double> gy( 0, 2.*M_PI, 3, 10);
    dg::Grid1d<double> gz( 0, 2.*M_PI, 1, 20);
    dg::Grid3d<double> g( gx, gy, gz);
    std::string hello = "Hello world\n";
    thrust::host_vector<double> data = dg::evaluate( function, g);

    //create NetCDF File
    int ncid;
    file::NC_Error_Handle err;
    MPI_Info info = MPI_INFO_NULL;
    err = nc_create_par( "testmpi.nc", NC_NETCDF4|NC_MPIIO|NC_CLOBBER, MPI_COMM_WORLD, info, &ncid); 
    err = nc_put_att_text( ncid, NC_GLOBAL, "input", hello.size(), hello.data());

    int dimids[4], tvarID;
    err = file::define_dimensions( ncid, dimids, &tvarID, g);
    int dataID;
    err = nc_def_var( ncid, "data", NC_DOUBLE, 4, dimids, &dataID);

    /* Write metadata to file. */
    err = nc_enddef(ncid);

    //err = nc_enddef( ncid);
    size_t start[4] = {0, rank*g.Nz()/size, 0, 0};
    size_t count[4] = {1, g.Nz()/size, g.Ny()*g.n(), g.Nx()*g.n()};
    if( rank==0) std::cout<< "Write from "<< start[0]<< " "<<start[1]<<" "<<start[2]<<" "<<start[3]<<std::endl;
    if( rank==0) std::cout<< "Number of elements "<<count[0]<< " "<<count[1]<<" "<<count[2]<<" "<<count[3]<<std::endl;
    err = nc_var_par_access(ncid, dataID , NC_COLLECTIVE);
    err = nc_var_par_access(ncid, tvarID , NC_COLLECTIVE);
    size_t Tcount=1, Tstart=0;
    double time = 0;
    //err = nc_close(ncid);
    for(unsigned i=0; i<=NT; i++)
    {
        if(rank==0)std::cout<<"Write timestep "<<i<<"\n";
        //err = nc_open_par( "testmpi.nc", NC_WRITE|NC_MPIIO, MPI_COMM_WORLD, info, &ncid); //doesn't work I don't know why
        time = i*dt;
        Tstart = i;
        data = dg::evaluate( function, g);
        dg::blas1::scal( data, cos( time));
        start[0] = i;
        //write dataset (one timeslice)
        err = nc_put_vara_double( ncid, dataID, start, count, data.data() + start[1]*count[2]*count[3]);
        err = nc_put_vara_double( ncid, tvarID, &Tstart, &Tcount, &time);
        //err = nc_close(ncid);
    }
    err = nc_close(ncid);
    MPI_Finalize();
    return 0;
}
Пример #19
0
/* extract a variable from a netcdf file ant write it to another netcdf parallely, the function
 * will create new netcdf file if the output file does not exist. If the output file exists and the
 * output variable doesn't exist in the output file, the function creates a new variable or aborts.*/
int extract_unary(char *var,char *varOut,char **outDims,int outDimNum,size_t *begins,int beginNum,size_t *ends,int endNum,ptrdiff_t *strides,int strideNum,char *fileIn,char* fileOut){
	int mpi_size,mpi_rank;
	MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
	MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

	MPI_Comm comm=MPI_COMM_WORLD;
	MPI_Info info=MPI_INFO_NULL;
	
	int ncid,vlid,ndims,dimids[NC_MAX_VAR_DIMS],nattrs;
	int ncidout,vlidout;
	int i,res;
	char varName[NC_MAX_NAME+1];
	nc_type vtype;	

	/* 
		open input file
	*/
	if((res=nc_open_par(fileIn,NC_MPIIO,comm,info,&ncid))){
		printf("Cannot open input file %s\n",fileIn);
		BAIL(res);
	}
	if((res=nc_inq_varid(ncid,var,&vlid))){
		printf("Cannot query variable %s in input file %s\n",var,fileIn);
		BAIL(res);	
	}
	if((res=nc_inq_var(ncid,vlid,varName,&vtype,&ndims,dimids,&nattrs))){
		printf("Cannot query dimension info of varialbe %s in input file %s\n",var,fileIn);
		BAIL(res);
	}


	char **dimsName=(char **)malloc(sizeof(char *)*ndims);
	size_t *shape=(size_t *)malloc(sizeof(size_t)*ndims);


	for(i=0;i<ndims;++i){
		dimsName[i]=(char *)malloc(NC_MAX_NAME+1);
		nc_inq_dim(ncid,dimids[i],dimsName[i],&shape[i]);//get dimsName and dimsLen
	}

	int is_strides_null=0;
	int is_begins_null=0;
	int is_ends_null=0;
	
	/* initialize begins ends and strides*/
	init_begins_ends_strides(ndims,&begins,&beginNum,&is_begins_null,&ends,&endNum,&is_ends_null,&strides,&strideNum,&is_strides_null,shape);
	
	/* deal with -d and -r arguments */
	dim_opts_handler(&global_attr,dimids,ncid,ndims,begins,ends,strides);
	
	/* check begins ends and strides */
	if(-1==check_begins_ends_strides(ndims,begins,beginNum,ends,endNum,strides,strideNum,shape,dimsName)){return -1;}


	/* set independent parallel access for input file */
	if((res=nc_var_par_access(ncid,vlid,NC_INDEPENDENT))){
		printf("Cannot set parallel access for variable %s in input file %s\n",var,fileIn);
		BAIL(res);
	}
	/*
		create output file and define dims and var
	*/
	int *dimidsOut=(int *)malloc(sizeof(int)*ndims);
	if(-1==	open_output_file_and_define_var(fileOut,&ncidout,&vlidout,dimidsOut,global_attr.append,ndims,vtype,dimsName,begins,ends,strides,shape,var,varOut,outDims,outDimNum)){return -1;}
	
	
	/* define attributes */
	copy_attrs(ncid, vlid, ncidout, vlidout, nattrs);
	add_cmd_attr(ncidout,vlidout,global_attr.argc,global_attr.argv,mpi_size);
	if((res=nc_enddef(ncidout))){
		printf("End define netcdf id %d in output file %s failed\n",ncidout,fileOut);
		BAIL(res);
	}

	if((res=nc_var_par_access(ncidout,vlidout,NC_INDEPENDENT))) {
		printf("Cannot set parallel access for variable %s in output file %s\n",var,fileOut);
		BAIL(res);
	}
	extract_unary_selector(mpi_rank,mpi_size,ncid,vlid,ncidout,vlidout,ndims,vtype,shape,begins,ends,strides,0L,NULL);// select reasonable parallel funtion

	/* free resources */
	if(is_strides_null)
		free(strides);
	if(is_begins_null)
		free(begins);
	if(is_ends_null)
		free(ends);
	for(i=0;i<ndims;++i){
		free(dimsName[i]);
	}
	free(dimsName);
	free(dimidsOut);
	free(shape);

	/*close file*/
	if((res=nc_close(ncid))){
		printf("Close input file %s failed\n",fileIn);
		BAIL(res);
	}
	if((res=nc_close(ncidout))){
		printf("Close output file %s failed\n",fileOut);
		BAIL(res);
	}
	return 0;
}
Пример #20
0
int
main(int argc, char **argv)
{
    /* MPI stuff. */
    int mpi_namelen;		
    char mpi_name[MPI_MAX_PROCESSOR_NAME];
    int mpi_size, mpi_rank;
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;
    double start_time = 0, total_time;

    /* Netcdf-4 stuff. */
    int ncid, varid, dimids[NDIMS];
    size_t start[NDIMS] = {0, 0, 0};
    size_t count[NDIMS] = {1, DIMSIZE, DIMSIZE};
    int data[DIMSIZE * DIMSIZE], data_in[DIMSIZE * DIMSIZE];
    int j, i;
    
    char file_name[NC_MAX_NAME + 1];
    int ndims_in, nvars_in, natts_in, unlimdimid_in;

#ifdef USE_MPE
    int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */

    /* Initialize MPI. */
    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Get_processor_name(mpi_name, &mpi_namelen);
    /*printf("mpi_name: %s size: %d rank: %d\n", mpi_name, mpi_size, mpi_rank);*/

    /* Must be able to evenly divide my slabs between processors. */
    if (NUM_SLABS % mpi_size != 0)
    {
       if (!mpi_rank) printf("NUM_SLABS (%d) is not evenly divisible by mpi_size(%d)\n", 
                             NUM_SLABS, mpi_size);
       ERR;
    }

#ifdef USE_MPE
    MPE_Init_log();
    s_init = MPE_Log_get_event_number(); 
    e_init = MPE_Log_get_event_number(); 
    s_define = MPE_Log_get_event_number(); 
    e_define = MPE_Log_get_event_number(); 
    s_write = MPE_Log_get_event_number(); 
    e_write = MPE_Log_get_event_number(); 
    s_close = MPE_Log_get_event_number(); 
    e_close = MPE_Log_get_event_number(); 
    s_open = MPE_Log_get_event_number(); 
    e_open = MPE_Log_get_event_number(); 
    MPE_Describe_state(s_init, e_init, "Init", "red");
    MPE_Describe_state(s_define, e_define, "Define", "yellow");
    MPE_Describe_state(s_write, e_write, "Write", "green");
    MPE_Describe_state(s_close, e_close, "Close", "purple");
    MPE_Describe_state(s_open, e_open, "Open", "blue");
    MPE_Start_log();
    MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */

/*     if (!mpi_rank) */
/*     { */
/*        printf("\n*** Testing parallel I/O some more.\n"); */
/*        printf("*** writing a %d x %d x %d file from %d processors...\n",  */
/*               NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size); */
/*     } */

    /* We will write the same slab over and over. */
    for (i = 0; i < DIMSIZE * DIMSIZE; i++)
       data[i] = mpi_rank;

#ifdef USE_MPE
    MPE_Log_event(e_init, 0, "end init");
    MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */

    /* Create a parallel netcdf-4 file. */
    sprintf(file_name, "%s/%s", TEMP_LARGE, FILE_NAME);
    if (nc_create_par(file_name, NC_PNETCDF, comm, info, &ncid)) ERR;

    /* A global attribute holds the number of processors that created
     * the file. */
    if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR;

    /* Create three dimensions. */
    if (nc_def_dim(ncid, DIM1_NAME, NUM_SLABS, dimids)) ERR;
    if (nc_def_dim(ncid, DIM2_NAME, DIMSIZE, &dimids[1])) ERR;
    if (nc_def_dim(ncid, DIM3_NAME, DIMSIZE, &dimids[2])) ERR;

    /* Create one var. */
    if (nc_def_var(ncid, VAR_NAME, NC_INT, NDIMS, dimids, &varid)) ERR;

    /* Write metadata to file. */
    if (nc_enddef(ncid)) ERR;

#ifdef USE_MPE
    MPE_Log_event(e_define, 0, "end define file");
    if (mpi_rank)
       sleep(mpi_rank);
#endif /* USE_MPE */

/*    if (nc_var_par_access(ncid, varid, NC_COLLECTIVE)) ERR;*/
    if (nc_var_par_access(ncid, varid, NC_INDEPENDENT)) ERR;

    if (!mpi_rank)
       start_time = MPI_Wtime();

    /* Write all the slabs this process is responsible for. */
    for (i = 0; i < NUM_SLABS / mpi_size; i++)
    {
       start[0] = NUM_SLABS / mpi_size * mpi_rank + i;

#ifdef USE_MPE
       MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */

       /* Write one slab of data. */
       if (nc_put_vara_int(ncid, varid, start, count, data)) ERR;

#ifdef USE_MPE
       MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
    }

    if (!mpi_rank)
    {
       total_time = MPI_Wtime() - start_time;
/*       printf("num_proc\ttime(s)\n");*/
       printf("%d\t%g\t%g\n", mpi_size, total_time, DIMSIZE * DIMSIZE * NUM_SLABS * sizeof(int) / total_time);
    }

#ifdef USE_MPE
    MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

    /* Close the netcdf file. */
    if (nc_close(ncid))	ERR;
    
#ifdef USE_MPE
    MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

    /* Reopen the file and check it. */
    if (nc_open_par(file_name, NC_NOWRITE, comm, info, &ncid)) ERR;
    if (nc_inq(ncid, &ndims_in, &nvars_in, &natts_in, &unlimdimid_in)) ERR;
    if (ndims_in != NDIMS || nvars_in != 1 || natts_in != 1 || 
        unlimdimid_in != -1) ERR;

    /* Read all the slabs this process is responsible for. */
    for (i = 0; i < NUM_SLABS / mpi_size; i++)
    {
       start[0] = NUM_SLABS / mpi_size * mpi_rank + i;

#ifdef USE_MPE
       MPE_Log_event(s_read, 0, "start read slab");
#endif /* USE_MPE */

       /* Read one slab of data. */
       if (nc_get_vara_int(ncid, varid, start, count, data_in)) ERR;
       
       /* Check data. */
       for (j = 0; j < DIMSIZE * DIMSIZE; j++)
	  if (data_in[j] != mpi_rank) 
	  {
	     ERR;
	     break;
	  }

#ifdef USE_MPE
       MPE_Log_event(e_read, 0, "end read file");
#endif /* USE_MPE */
    }

#ifdef USE_MPE
    MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

    /* Close the netcdf file. */
    if (nc_close(ncid))	ERR;
    
#ifdef USE_MPE
    MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

    /* Delete this large file. */
    remove(file_name); 

    /* Shut down MPI. */
    MPI_Finalize();

/*     if (!mpi_rank) */
/*     { */
/*        SUMMARIZE_ERR; */
/*        FINAL_RESULTS; */
/*     } */
    return total_err;
}
Пример #21
0
int
main(int argc, char **argv)
{
   int mpi_namelen;
   char mpi_name[MPI_MAX_PROCESSOR_NAME];
   int mpi_size, mpi_rank;
   MPI_Comm comm = MPI_COMM_WORLD;
   MPI_Info info = MPI_INFO_NULL;
   double start_time = 0, total_time;
   int mpi_size_in;
#define NUM_TEST_TYPES 11
   nc_type test_type[NUM_TEST_TYPES] = {NC_BYTE, NC_CHAR, NC_SHORT, NC_INT, NC_FLOAT, NC_DOUBLE,
                                        NC_UBYTE, NC_USHORT, NC_UINT, NC_INT64, NC_UINT64};
   int tt, fv;
   int j, i, k, ret;

   /* Initialize MPI. */
   MPI_Init(&argc,&argv);
   MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
   MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
   MPI_Get_processor_name(mpi_name, &mpi_namelen);

   /* Must be able to evenly divide my slabs between processors. */
   if (NUM_SLABS % mpi_size)
   {
      if (!mpi_rank)
         printf("NUM_SLABS (%d) is not evenly divisible by mpi_size(%d)\n",
                NUM_SLABS, mpi_size);
      ERR;
   }

   if (!mpi_rank)
      printf("\n*** Testing parallel I/O some more.\n");

   /* Test for different fill value settings. */
   for (fv = 0; fv < NUM_FILL_TEST_RUNS; fv++)
   {
      /* Test for different netCDF types. */
      for (tt = 0; tt < NUM_TEST_TYPES; tt++)
      {
         char file_name[NC_MAX_NAME + 1];
         int fill_mode_in;
         void *data, *data_in;
         void *fill_value, *fill_value_in;
         size_t type_size;
         size_t write_start[NDIMS] = {0, 0, 1};
         size_t write_count[NDIMS] = {1, DIMSIZE, DIMSIZE - 1};
         size_t read_start[NDIMS] = {0, 0, 0};
         size_t read_count[NDIMS] = {1, DIMSIZE, DIMSIZE};
         int ncid, varid, dimids[NDIMS];
         int ndims_in, nvars_in, natts_in, unlimdimid_in;

         /* Fill values to be expected. */
         signed char byte_expected_fill_value;
         unsigned char char_expected_fill_value;
         short short_expected_fill_value;
         int int_expected_fill_value;
         float float_expected_fill_value;
         double double_expected_fill_value;
         unsigned char ubyte_expected_fill_value;
         unsigned short ushort_expected_fill_value;
         unsigned int uint_expected_fill_value;
         long long int int64_expected_fill_value;
         unsigned long long int uint64_expected_fill_value;

         /* Fill values used when writing. */
         signed char byte_fill_value = -TEST_VAL_42;
         unsigned char char_fill_value = 'x';
         short short_fill_value = TEST_VAL_42 * 100;
         int int_fill_value = TEST_VAL_42 * 1000;
         float float_fill_value = TEST_VAL_42 * 1000;
         double double_fill_value = TEST_VAL_42 * 1000;
         unsigned char ubyte_fill_value = TEST_VAL_42;
         unsigned short ushort_fill_value = TEST_VAL_42 * 100;
         unsigned int uint_fill_value = TEST_VAL_42 * 1000;
         long long int int64_fill_value = TEST_VAL_42 * 1000;
         unsigned long long int uint64_fill_value = TEST_VAL_42 * 1000;

         /* Fill values read in. */
         signed char byte_fill_value_in;
         unsigned char char_fill_value_in;
         short short_fill_value_in;
         int int_fill_value_in;
         float float_fill_value_in;
         double double_fill_value_in;
         unsigned char ubyte_fill_value_in;
         unsigned short ushort_fill_value_in;
         unsigned int uint_fill_value_in;
         long long int int64_fill_value_in;
         unsigned long long int uint64_fill_value_in;

         /* Data to write and read. */
         signed char byte_data[DIMSIZE * DIMSIZE], byte_data_in[DIMSIZE * DIMSIZE];
         unsigned char char_data[DIMSIZE * DIMSIZE], char_data_in[DIMSIZE * DIMSIZE];
         short short_data[DIMSIZE * DIMSIZE], short_data_in[DIMSIZE * DIMSIZE];
         int int_data[DIMSIZE * DIMSIZE], int_data_in[DIMSIZE * DIMSIZE];
         float float_data[DIMSIZE * DIMSIZE], float_data_in[DIMSIZE * DIMSIZE];
         double double_data[DIMSIZE * DIMSIZE], double_data_in[DIMSIZE * DIMSIZE];
         unsigned char ubyte_data[DIMSIZE * DIMSIZE], ubyte_data_in[DIMSIZE * DIMSIZE];
         unsigned short ushort_data[DIMSIZE * DIMSIZE], ushort_data_in[DIMSIZE * DIMSIZE];
         unsigned int uint_data[DIMSIZE * DIMSIZE], uint_data_in[DIMSIZE * DIMSIZE];
         long long int int64_data[DIMSIZE * DIMSIZE], int64_data_in[DIMSIZE * DIMSIZE];
         unsigned long long int uint64_data[DIMSIZE * DIMSIZE], uint64_data_in[DIMSIZE * DIMSIZE];

         if (!mpi_rank)
            printf("*** writing a %d x %d x %d file from %d processors for fill value test %d type %d...\n",
                   NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size, fv, test_type[tt]);

         /* Initialize test data. */
         switch(test_type[tt])
         {
         case NC_BYTE:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               byte_data[i] = mpi_rank;
            data = byte_data;
            data_in = byte_data_in;
            byte_expected_fill_value = fv ? byte_fill_value : NC_FILL_BYTE;
            fill_value = &byte_expected_fill_value;
            fill_value_in = &byte_fill_value_in;
            break;
         case NC_CHAR:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               char_data[i] = mpi_rank;
            data = char_data;
            data_in = char_data_in;
            char_expected_fill_value = fv ? char_fill_value : NC_FILL_CHAR;
            fill_value = &char_expected_fill_value;
            fill_value_in = &char_fill_value_in;
            break;
         case NC_SHORT:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               short_data[i] = mpi_rank;
            data = short_data;
            data_in = short_data_in;
            short_expected_fill_value = fv ? short_fill_value : NC_FILL_SHORT;
            fill_value = &short_expected_fill_value;
            fill_value_in = &short_fill_value_in;
            break;
         case NC_INT:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               int_data[i] = mpi_rank;
            data = int_data;
            data_in = int_data_in;
            int_expected_fill_value = fv ? int_fill_value : NC_FILL_INT;
            fill_value = &int_expected_fill_value;
            fill_value_in = &int_fill_value_in;
            break;
         case NC_FLOAT:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               float_data[i] = mpi_rank;
            data = float_data;
            data_in = float_data_in;
            float_expected_fill_value = fv ? float_fill_value : NC_FILL_FLOAT;
            fill_value = &float_expected_fill_value;
            fill_value_in = &float_fill_value_in;
            break;
         case NC_DOUBLE:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               double_data[i] = mpi_rank;
            data = double_data;
            data_in = double_data_in;
            double_expected_fill_value = fv ? double_fill_value : NC_FILL_DOUBLE;
            fill_value = &double_expected_fill_value;
            fill_value_in = &double_fill_value_in;
            break;
         case NC_UBYTE:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               ubyte_data[i] = mpi_rank;
            data = ubyte_data;
            data_in = ubyte_data_in;
            ubyte_expected_fill_value = fv ? ubyte_fill_value : NC_FILL_UBYTE;
            fill_value = &ubyte_expected_fill_value;
            fill_value_in = &ubyte_fill_value_in;
            break;
         case NC_USHORT:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               ushort_data[i] = mpi_rank;
            data = ushort_data;
            data_in = ushort_data_in;
            ushort_expected_fill_value = fv ? ushort_fill_value : NC_FILL_USHORT;
            fill_value = &ushort_expected_fill_value;
            fill_value_in = &ushort_fill_value_in;
            break;
         case NC_UINT:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               uint_data[i] = mpi_rank;
            data = uint_data;
            data_in = uint_data_in;
            uint_expected_fill_value = fv ? uint_fill_value : NC_FILL_UINT;
            fill_value = &uint_expected_fill_value;
            fill_value_in = &uint_fill_value_in;
            break;
         case NC_INT64:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               int64_data[i] = mpi_rank;
            data = int64_data;
            data_in = int64_data_in;
            int64_expected_fill_value = fv ? int64_fill_value : NC_FILL_INT64;
            fill_value = &int64_expected_fill_value;
            fill_value_in = &int64_fill_value_in;
            break;
         case NC_UINT64:
            for (i = 0; i < DIMSIZE * DIMSIZE; i++)
               uint64_data[i] = mpi_rank;
            data = uint64_data;
            data_in = uint64_data_in;
            uint64_expected_fill_value = fv ? uint64_fill_value : NC_FILL_UINT64;
            fill_value = &uint64_expected_fill_value;
            fill_value_in = &uint64_fill_value_in;
            break;
         }

         /* Create a file name. */
         sprintf(file_name, "%s_type_%d_fv_%d.nc", TEST_NAME, test_type[tt], fv);

         /* Create a parallel netcdf-4 file. */
         if (nc_create_par(file_name, NC_NETCDF4, comm, info, &ncid)) ERR;

         /* Get the type len. */
         if (nc_inq_type(ncid, test_type[tt], NULL, &type_size)) ERR;

         /* A global attribute holds the number of processors that created
          * the file. */
         if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR;

         /* Create three dimensions. */
         if (nc_def_dim(ncid, DIM1_NAME, NUM_SLABS, dimids)) ERR;
         if (nc_def_dim(ncid, DIM2_NAME, DIMSIZE, &dimids[1])) ERR;
         if (nc_def_dim(ncid, DIM3_NAME, DIMSIZE, &dimids[2])) ERR;

         /* Create one var. */
         if (nc_def_var(ncid, VAR_NAME, test_type[tt], NDIMS, dimids, &varid)) ERR;
         if (nc_put_att_int(ncid, varid, "var_num_processors", NC_INT, 1, &mpi_size)) ERR;
         if (fv == 1)
         {
            if (nc_def_var_fill(ncid, varid, NC_FILL, fill_value)) ERR;
            if (nc_inq_var_fill(ncid, varid, &fill_mode_in, fill_value_in)) ERR;
            if (fill_mode_in != NC_FILL) ERR;
            if (memcmp(fill_value_in, fill_value, type_size)) ERR;
         }
         else if (fv == 2)
         {
            if (nc_def_var_fill(ncid, varid, NC_NOFILL, NULL)) ERR;
            if (nc_inq_var_fill(ncid, varid, &fill_mode_in, NULL)) ERR;
            if (!fill_mode_in) ERR; /* nofill will be true */
         }

         /* Write metadata to file. */
         if (nc_enddef(ncid)) ERR;

         /* Change access mode to collective, then back to independent. */
         if (nc_var_par_access(ncid, varid, NC_COLLECTIVE)) ERR;
         if (nc_var_par_access(ncid, varid, NC_INDEPENDENT)) ERR;

         if (!mpi_rank)
            start_time = MPI_Wtime();

         /* Write all the slabs this process is responsible for. */
         for (i = 0; i < NUM_SLABS / mpi_size; i++)
         {
            write_start[0] = NUM_SLABS / mpi_size * mpi_rank + i;

            /* Write one slab of data. Due to start/count settings,
             * every 16th value will be a fill value. */
            if (nc_put_vara(ncid, varid, write_start, write_count, data)) ERR;
         }

         /* On rank 0, keep track of time. */
         if (!mpi_rank)
         {
            total_time = MPI_Wtime() - start_time;
            printf("%d\t%g\t%g\n", mpi_size, total_time, DIMSIZE * DIMSIZE * NUM_SLABS *
                   sizeof(int) / total_time);
         }

         /* Close the netcdf file. */
         if (nc_close(ncid)) ERR;

         /* Reopen the file and check it. */
         if ((ret = nc_open_par(file_name, NC_NOWRITE, comm, info, &ncid))) ERR;
         if (nc_inq(ncid, &ndims_in, &nvars_in, &natts_in, &unlimdimid_in)) ERR;
         if (ndims_in != NDIMS || nvars_in != 1 || natts_in != 1 ||
             unlimdimid_in != -1) ERR;

         /* Check the attributes. */
         if (nc_get_att_int(ncid, NC_GLOBAL, "num_processors", &mpi_size_in)) ERR;
         if (mpi_size_in != mpi_size) ERR;
         if (nc_get_att_int(ncid, 0, "var_num_processors", &mpi_size_in)) ERR;
         if (mpi_size_in != mpi_size) ERR;
         if (fv == 1)
         {
            if (nc_inq_var_fill(ncid, varid, &fill_mode_in, fill_value_in)) ERR;
            if (fill_mode_in != NC_FILL) ERR;
            if (memcmp(fill_value_in, fill_value, type_size)) ERR;
         }

         /* Read all the slabs this process is responsible for. */
         for (i = 0; i < NUM_SLABS / mpi_size; i++)
         {
            read_start[0] = NUM_SLABS / mpi_size * mpi_rank + i;
            /* printf("mpi_rank %d i %d read_start[0] %ld\n", mpi_rank, i, read_start[0]); */

            /* Read one slab of data. */
            if (nc_get_vara(ncid, varid, read_start, read_count, data_in)) ERR;

            /* Check data.  For the third fill value test, fill is
             * turned off. So don't bother testing the values where k
             * is zero. */
            /* printf("mpi_rank %d fv %d i %d j %d k %d int_data_in[j * k] %d int_expected_fill_value %d " */
            /*        "expected_value %d\n", mpi_rank, fv, i, j, k, int_data_in[j * k], */
            /*        int_expected_fill_value, expected_value); */
            switch (test_type[tt])
            {
            case NC_BYTE:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (byte_data_in[j * DIMSIZE + k] != (signed char)(k ? mpi_rank : byte_expected_fill_value)) ERR;
               break;
            case NC_SHORT:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (short_data_in[j * DIMSIZE + k] != (short)(k ? mpi_rank : short_expected_fill_value)) ERR;
               break;
            case NC_INT:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (int_data_in[j * DIMSIZE + k] != (int)(k ? mpi_rank : int_expected_fill_value)) ERR;
               break;
            case NC_FLOAT:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (float_data_in[j * DIMSIZE + k] != (float)(k ? mpi_rank : float_expected_fill_value)) ERR;
               break;
            case NC_DOUBLE:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (double_data_in[j * DIMSIZE + k] != (double)(k ? mpi_rank : double_expected_fill_value)) ERR;
               break;
            case NC_UBYTE:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (ubyte_data_in[j * DIMSIZE + k] != (unsigned char)(k ? mpi_rank : ubyte_expected_fill_value)) ERR;
               break;
            case NC_USHORT:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (ushort_data_in[j * DIMSIZE + k] != (unsigned short)(k ? mpi_rank : ushort_expected_fill_value)) ERR;
               break;
            case NC_UINT:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (uint_data_in[j * DIMSIZE + k] != (unsigned int)(k ? mpi_rank : uint_expected_fill_value)) ERR;
               break;
            case NC_INT64:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (int64_data_in[j * DIMSIZE + k] != (long long int)(k ? mpi_rank : int64_expected_fill_value)) ERR;
               break;
            case NC_UINT64:
               for (j = 0; j < DIMSIZE; j++)
                  for (k = 0; k < DIMSIZE; k++)
                     if (fv < 2 || k)
                        if (uint64_data_in[j * DIMSIZE + k] != (unsigned long long int)(k ? mpi_rank : uint64_expected_fill_value)) ERR;
               break;
            }
         } /* next slab */

         /* Close the netcdf file. */
         if (nc_close(ncid))  ERR;

         if (!mpi_rank)
            SUMMARIZE_ERR;
      } /* next test type */
   } /* next fill value test run */

   /* Shut down MPI. */
   MPI_Finalize();

   if (!mpi_rank)
      FINAL_RESULTS;
   return 0;
}
Пример #22
0
int ex_open_par_int(const char *path, int mode, int *comp_ws, int *io_ws, float *version,
                    MPI_Comm comm, MPI_Info info, int run_version)
{
  int     exoid;
  int     status, stat_att, stat_dim;
  nc_type att_type = NC_NAT;
  size_t  att_len  = 0;
  int     nc_mode  = 0;
  int     old_fill;
  int     file_wordsize;
  int     dim_str_name;
  int     int64_status = 0;
  int     is_hdf5      = 0;
  int     is_pnetcdf   = 0;
  int     in_redef     = 0;

  char errmsg[MAX_ERR_LENGTH];

  EX_FUNC_ENTER();

  /* set error handling mode to no messages, non-fatal errors */
  ex_opts(exoptval); /* call required to set ncopts first time through */

  if (run_version != EX_API_VERS_NODOT && warning_output == 0) {
    int run_version_major = run_version / 100;
    int run_version_minor = run_version % 100;
    int lib_version_major = EX_API_VERS_NODOT / 100;
    int lib_version_minor = EX_API_VERS_NODOT % 100;
    fprintf(stderr,
            "EXODUS: Warning: This code was compiled with exodus "
            "version %d.%02d,\n          but was linked with exodus "
            "library version %d.%02d\n          This is probably an "
            "error in the build process of this code.\n",
            run_version_major, run_version_minor, lib_version_major, lib_version_minor);
    warning_output = 1;
  }

  if ((mode & EX_READ) && (mode & EX_WRITE)) {
    snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: Cannot specify both EX_READ and EX_WRITE");
    ex_err(__func__, errmsg, EX_BADFILEMODE);
    EX_FUNC_LEAVE(EX_FATAL);
  }

  if (mode & EX_WRITE) {
    nc_mode = (NC_WRITE | NC_MPIIO);
  }
  else {
    nc_mode = (NC_NOWRITE | NC_SHARE | NC_MPIIO);
  }
  if ((status = nc_open_par(path, nc_mode, comm, info, &exoid)) != NC_NOERR) {
    /* It is possible that the user is trying to open a netcdf4
       file, but the netcdf4 capabilities aren't available in the
       netcdf linked to this library. Note that we can't just use a
       compile-time define since we could be using a shareable
       netcdf library, so the netcdf4 capabilities aren't known
       until runtime...

       Later versions of netcdf-4.X have a function that can be
       queried to determine whether the library being used was
       compiled with --enable-netcdf4, but not everyone is using that
       version yet, so we may have to do some guessing...

       At this time, query the beginning of the file and see if it
       is an HDF-5 file and if it is assume that the open failure
       is due to the netcdf library not enabling netcdf4 features unless
       we have the define that shows it is enabled, then assume other error...
    */
    int type = 0;
    ex_check_file_type(path, &type);

    if (type == 5) {
#if NC_HAS_HDF5
      fprintf(stderr,
              "EXODUS: ERROR: Attempting to open the netcdf-4 "
              "file:\n\t'%s'\n\t failed. The netcdf library supports "
              "netcdf-4 so there must be a filesystem or some other "
              "issue \n",
              path);
#else
      /* This is an hdf5 (netcdf4) file. If NC_HAS_HDF5 is not defined,
         then we either don't have hdf5 support in this netcdf version,
         OR this is an older netcdf version that doesn't provide that define.

         In either case, we don't have enough information, so we
         assume that the netcdf doesn't have netcdf4 capabilities
         enabled.  Tell the user...
      */
      fprintf(stderr,
              "EXODUS: ERROR: Attempting to open the netcdf-4 "
              "file:\n\t'%s'\n\tEither the netcdf library does not "
              "support netcdf-4 or there is a filesystem or some "
              "other issue \n",
              path);
#endif
    }
    else if (type == 4) {
#if defined(NC_64BIT_DATA)
      fprintf(stderr,
              "EXODUS: ERROR: Attempting to open the CDF5 "
              "file:\n\t'%s'\n\t failed. The netcdf library supports "
              "CDF5-type files so there must be a filesystem or some other "
              "issue \n",
              path);
#else
      /* This is an cdf5 (64BIT_DATA) file. If NC_64BIT_DATA is not defined,
         then we either don't have cdf5 support in this netcdf version,
         OR this is an older netcdf version that doesn't provide that define.

         In either case, we don't have enough information, so we
         assume that the netcdf doesn't have cdf5 capabilities
         enabled.  Tell the user...
      */
      fprintf(stderr,
              "EXODUS: ERROR: Attempting to open the CDF5 "
              "file:\n\t'%s'\n\tEither the netcdf library does not "
              "support CDF5 or there is a filesystem or some "
              "other issue \n",
              path);

#endif
    }
    else if (type == 1 || type == 2) {
#if NC_HAS_PNETCDF
      fprintf(stderr,
              "EXODUS: ERROR: Attempting to open the classic NetCDF "
              "file:\n\t'%s'\n\t failed. The netcdf library supports "
              "PNetCDF files as required for parallel readinf of this "
              "file type, so there must be a filesystem or some other "
              "issue \n",
              path);
#else
      /* This is an normal NetCDF format file, for parallel reading, the PNetCDF
         library is required but that is not compiled into this version.
      */
      fprintf(stderr,
              "EXODUS: ERROR: Attempting to open the NetCDF "
              "file:\n\t'%s'\n\tThe NetCDF library was not "
              "built with PNetCDF support as required for parallel access to this file.\n",
              path);

#endif
    }

    snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: failed to open %s of type %d read only", path, type);
    ex_err(__func__, errmsg, status);
    EX_FUNC_LEAVE(EX_FATAL);
  }

  /* File opened correctly */
  int type = 0;
  ex_check_file_type(path, &type);
  if (type == 5) {
    is_hdf5 = 1;
  }
  else if (type == 1 || type == 2 || type == 4) {
    is_pnetcdf = 1;
  }

  if (mode & EX_WRITE) { /* Appending */
    /* turn off automatic filling of netCDF variables */
    if (is_pnetcdf) {
      if ((status = nc_redef(exoid)) != NC_NOERR) {
        snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: failed to put file id %d into define mode", exoid);
        ex_err(__func__, errmsg, status);
        EX_FUNC_LEAVE(EX_FATAL);
      }
      in_redef = 1;
    }

    if ((status = nc_set_fill(exoid, NC_NOFILL, &old_fill)) != NC_NOERR) {
      snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: failed to set nofill mode in file id %d", exoid);
      ex_err(__func__, errmsg, status);
      EX_FUNC_LEAVE(EX_FATAL);
    }

    stat_att = nc_inq_att(exoid, NC_GLOBAL, ATT_MAX_NAME_LENGTH, &att_type, &att_len);
    stat_dim = nc_inq_dimid(exoid, DIM_STR_NAME, &dim_str_name);
    if (stat_att != NC_NOERR || stat_dim != NC_NOERR) {
      if (!in_redef) {
        if ((status = nc_redef(exoid)) != NC_NOERR) {
          snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: failed to put file id %d into define mode",
                   exoid);
          ex_err(__func__, errmsg, status);
          EX_FUNC_LEAVE(EX_FATAL);
        }
        in_redef = 1;
      }
      if (stat_att != NC_NOERR) {
        int max_so_far = 32;
        nc_put_att_int(exoid, NC_GLOBAL, ATT_MAX_NAME_LENGTH, NC_INT, 1, &max_so_far);
      }

      /* If the DIM_STR_NAME variable does not exist on the database, we need to
       * add it now. */
      if (stat_dim != NC_NOERR) {
        /* Not found; set to default value of 32+1. */
        int max_name = ex_default_max_name_length < 32 ? 32 : ex_default_max_name_length;
        nc_def_dim(exoid, DIM_STR_NAME, max_name + 1, &dim_str_name);
      }
    }

    if (in_redef) {
      if ((status = nc_enddef(exoid)) != NC_NOERR) {
        snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: failed to complete definition in file id %d",
                 exoid);
        ex_err(__func__, errmsg, status);
        EX_FUNC_LEAVE(EX_FATAL);
      }
      in_redef = 0;
    }

    /* If this is a parallel execution and we are appending, then we
     * need to set the parallel access method for all transient variables to NC_COLLECTIVE since
     * they will be being extended.
     */
    int ndims;    /* number of dimensions */
    int nvars;    /* number of variables */
    int ngatts;   /* number of global attributes */
    int recdimid; /* id of unlimited dimension */

    int varid;

    /* Determine number of variables on the database... */
    nc_inq(exoid, &ndims, &nvars, &ngatts, &recdimid);

    for (varid = 0; varid < nvars; varid++) {
      struct ncvar var;
      nc_inq_var(exoid, varid, var.name, &var.type, &var.ndims, var.dims, &var.natts);

      if ((strcmp(var.name, VAR_GLO_VAR) == 0) || (strncmp(var.name, "vals_elset_var", 14) == 0) ||
          (strncmp(var.name, "vals_sset_var", 13) == 0) ||
          (strncmp(var.name, "vals_fset_var", 13) == 0) ||
          (strncmp(var.name, "vals_eset_var", 13) == 0) ||
          (strncmp(var.name, "vals_nset_var", 13) == 0) ||
          (strncmp(var.name, "vals_nod_var", 12) == 0) ||
          (strncmp(var.name, "vals_edge_var", 13) == 0) ||
          (strncmp(var.name, "vals_face_var", 13) == 0) ||
          (strncmp(var.name, "vals_elem_var", 13) == 0) ||
          (strcmp(var.name, VAR_WHOLE_TIME) == 0)) {
        nc_var_par_access(exoid, varid, NC_COLLECTIVE);
      }
    }
  } /* End of (mode & EX_WRITE) */

  /* determine version of EXODUS file, and the word size of
   * floating point and integer values stored in the file
   */

  if ((status = nc_get_att_float(exoid, NC_GLOBAL, ATT_VERSION, version)) != NC_NOERR) {
    snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: failed to get database version for file id: %d",
             exoid);
    ex_err(__func__, errmsg, status);
    EX_FUNC_LEAVE(EX_FATAL);
  }

  /* check ExodusII file version - old version 1.x files are not supported */
  if (*version < 2.0) {
    snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: Unsupported file version %.2f in file id: %d",
             *version, exoid);
    ex_err(__func__, errmsg, EX_BADPARAM);
    EX_FUNC_LEAVE(EX_FATAL);
  }

  if (nc_get_att_int(exoid, NC_GLOBAL, ATT_FLT_WORDSIZE, &file_wordsize) !=
      NC_NOERR) { /* try old (prior to db version 2.02) attribute name */
    if ((status = nc_get_att_int(exoid, NC_GLOBAL, ATT_FLT_WORDSIZE_BLANK, &file_wordsize)) !=
        NC_NOERR) {
      snprintf(errmsg, MAX_ERR_LENGTH, "ERROR: failed to get file wordsize from file id: %d",
               exoid);
      ex_err(__func__, errmsg, status);
      EX_FUNC_LEAVE(EX_FATAL);
    }
  }

  /* See if int64 status attribute exists and if so, what data is stored as
   * int64
   * Older files don't have the attribute, so it is not an error if it is
   * missing
   */
  if (nc_get_att_int(exoid, NC_GLOBAL, ATT_INT64_STATUS, &int64_status) != NC_NOERR) {
    int64_status = 0; /* Just in case it gets munged by a failed get_att_int call */
  }

  /* Merge in API int64 status flags as specified by caller of function... */
  int64_status |= (mode & EX_ALL_INT64_API);

  /* Verify that there is not an existing file_item struct for this
     exoid This could happen (and has) when application calls
     ex_open(), but then closes file using nc_close() and then reopens
     file.  NetCDF will possibly reuse the exoid which results in
     internal corruption in exodus data structures since exodus does
     not know that file was closed and possibly new file opened for
     this exoid
  */
  if (ex_find_file_item(exoid) != NULL) {
    snprintf(errmsg, MAX_ERR_LENGTH,
             "ERROR: There is an existing file already using the file "
             "id %d which was also assigned to file %s.\n\tWas "
             "nc_close() called instead of ex_close() on an open Exodus "
             "file?\n",
             exoid, path);
    ex_err(__func__, errmsg, EX_BADFILEID);
    nc_close(exoid);
    EX_FUNC_LEAVE(EX_FATAL);
  }

  /* initialize floating point and integer size conversion. */
  if (ex_conv_ini(exoid, comp_ws, io_ws, file_wordsize, int64_status, 1, is_hdf5, is_pnetcdf) !=
      EX_NOERR) {
    snprintf(errmsg, MAX_ERR_LENGTH,
             "ERROR: failed to initialize conversion routines in file id %d", exoid);
    ex_err(__func__, errmsg, EX_LASTERR);
    EX_FUNC_LEAVE(EX_FATAL);
  }

  EX_FUNC_LEAVE(exoid);
}