Пример #1
0
/*
 * Create the appropriate File access property list
 */
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type,
                     hbool_t use_gpfs)
{
    hid_t ret_pl = -1;
    herr_t ret;                 /* generic return value */
    int mpi_rank;		/* mpi variables */

    /* need the rank for error checking macros */
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    ret_pl = H5Pcreate (H5P_FILE_ACCESS);
    VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");

    if (l_facc_type == FACC_DEFAULT)
        return (ret_pl);

    if (l_facc_type == FACC_MPIO) {
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_mpio(ret_pl, comm, info);
        VRFY((ret >= 0), "");
        return(ret_pl);
    }

    if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
        hid_t mpio_pl;

        mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
        VRFY((mpio_pl >= 0), "");
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
        VRFY((ret >= 0), "");

        /* setup file access template */
        ret_pl = H5Pcreate (H5P_FILE_ACCESS);
        VRFY((ret_pl >= 0), "");
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
        VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
        H5Pclose(mpio_pl);
        return(ret_pl);
    }

    if (l_facc_type == FACC_MPIPOSIX) {
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_mpiposix(ret_pl, comm, use_gpfs);
        VRFY((ret >= 0), "H5Pset_fapl_mpiposix succeeded");
        return(ret_pl);
    }

    /* unknown file access types */
    return (ret_pl);
}
Пример #2
0
  void hdf_archive::set_access_plist(bool use_collective, Communicate* comm)
  {
    access_id=H5P_DEFAULT;
    if(comm && comm->size()>1) //for parallel communicator
    {
      if(use_collective)
      {
#if defined(H5_HAVE_PARALLEL) && defined(ENABLE_PHDF5)
        MPI_Info info=MPI_INFO_NULL;
        access_id = H5Pcreate(H5P_FILE_ACCESS);
        H5Pset_fapl_mpio(access_id,comm->getMPI(),info);
        xfer_plist = H5Pcreate(H5P_DATASET_XFER);
        H5Pset_dxpl_mpio(xfer_plist,H5FD_MPIO_COLLECTIVE);
#else
        use_collective=false;//cannot use collective
#endif
      }
      //true, if this task does not need to participate in I/O
      Mode.set(IS_PARALLEL,use_collective);
      Mode.set(NOIO,comm->rank()&&!use_collective);
    }
    else
    {
      Mode.set(IS_PARALLEL,false);
      Mode.set(NOIO,false);
    }
  }
Пример #3
0
PetscErrorCode  PetscViewerFileSetName_HDF5(PetscViewer viewer, const char name[])
{
  PetscViewer_HDF5 *hdf5 = (PetscViewer_HDF5*) viewer->data;
#if defined(PETSC_HAVE_H5PSET_FAPL_MPIO)
  MPI_Info          info = MPI_INFO_NULL;
#endif
  hid_t             plist_id;
  herr_t            herr;
  PetscErrorCode    ierr;

  PetscFunctionBegin;
  ierr = PetscStrallocpy(name, &hdf5->filename);CHKERRQ(ierr);
  /* Set up file access property list with parallel I/O access */
  plist_id = H5Pcreate(H5P_FILE_ACCESS);
#if defined(PETSC_HAVE_H5PSET_FAPL_MPIO)
  herr = H5Pset_fapl_mpio(plist_id, PetscObjectComm((PetscObject)viewer), info);CHKERRQ(herr);
#endif
  /* Create or open the file collectively */
  switch (hdf5->btype) {
  case FILE_MODE_READ:
    hdf5->file_id = H5Fopen(name, H5F_ACC_RDONLY, plist_id);
    break;
  case FILE_MODE_APPEND:
    hdf5->file_id = H5Fopen(name, H5F_ACC_RDWR, plist_id);
    break;
  case FILE_MODE_WRITE:
    hdf5->file_id = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
    break;
  default:
    SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER, "Must call PetscViewerFileSetMode() before PetscViewerFileSetName()");
  }
  if (hdf5->file_id < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB, "H5Fcreate failed for %s", name);
  H5Pclose(plist_id);
  PetscFunctionReturn(0);
}
Пример #4
0
Файл: io.c Проект: darien0/cow
void _io_domain_commit(cow_domain *d)
{
#if (COW_HDF5)
  for (int n=0; n<d->n_dims; ++n) {
    d->L_nint_h5[n] = d->L_nint[n]; // Selection size, target and destination
    d->L_ntot_h5[n] = d->L_ntot[n]; // Memory space total size
    d->L_strt_h5[n] = d->L_strt[n]; // Memory space selection start
    d->G_ntot_h5[n] = d->G_ntot[n]; // Global space total size
    d->G_strt_h5[n] = d->G_strt[n]; // Global space selection start
  }
  // Here we create the following property lists:
  //
  // file access property list   ........ for the call to H5Fopen
  // dset creation property list ........ for the call to H5Dcreate
  // dset transfer property list ........ for the call to H5Dwrite
  // ---------------------------------------------------------------------------
  d->fapl = H5Pcreate(H5P_FILE_ACCESS);
  d->dcpl = H5Pcreate(H5P_DATASET_CREATE);
  d->dxpl = H5Pcreate(H5P_DATASET_XFER);
#if (COW_HDF5_MPI && COW_MPI)
  if (cow_mpirunning()) {
    H5Pset_fapl_mpio(d->fapl, d->mpi_cart, MPI_INFO_NULL);
  }
#endif // COW_HDF5_MPI && COW_MPI
#endif // COW_HDF5
}
Пример #5
0
OHDF5mpipp::OHDF5mpipp(std::string filename, int buf_size, nestio::Logger_type logger_type)
: buf_size(buf_size), RANK(2), logger_type(logger_type)
{
	//Init HDF5 file
	MPI_Comm_size(MPI_COMM_WORLD, &clientscount);
	
	hid_t fapl_id;
	fapl_id = H5Pcreate(H5P_FILE_ACCESS);
	H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL);
    /* Create _a new file. If file exists its contents will be overwritten. */
	file = H5Fcreate (filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
	
	
	MPI_Comm_rank (MPI_COMM_WORLD, &own_id);
	
	int num_threads=omp_get_max_threads();
	if (logger_type == nestio::Buffered || logger_type == nestio::Collective) {
	  
	  buffer_multi = new oHDF5Buffer;
	  buffer_spike = new oHDF5Buffer;
	  //for (int i=0; i<num_threads;i++) {
	  buffer_multi->extend(buf_size*num_threads);
	  buffer_spike->extend(buf_size*num_threads);
	  //}
	}
	
	H5Pclose(fapl_id);
}
Пример #6
0
hid_t seissol::checkpoint::h5::Fault::initFile(int odd, const char* filename)
{
	hid_t h5file;

	if (loaded()) {
		// Open the file
		h5file = open(filename, false);
		checkH5Err(h5file);

		// Fault writer
		m_h5timestepFault[odd] = H5Aopen(h5file, "timestep_fault", H5P_DEFAULT);
		checkH5Err(m_h5timestepFault[odd]);

		// Data
		for (unsigned int i = 0; i < NUM_VARIABLES; i++) {
			m_h5data[odd][i] = H5Dopen(h5file, VAR_NAMES[i], H5P_DEFAULT);
			checkH5Err(m_h5data[odd][i]);
		}
	} else {
		// Create the file
		hid_t h5plist = H5Pcreate(H5P_FILE_ACCESS);
		checkH5Err(h5plist);
		checkH5Err(H5Pset_libver_bounds(h5plist, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
#ifdef USE_MPI
		checkH5Err(H5Pset_fapl_mpio(h5plist, comm(), MPI_INFO_NULL));
#endif // USE_MPI

		h5file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, h5plist);
		checkH5Err(h5file);
		checkH5Err(H5Pclose(h5plist));

		// Create scalar dataspace for attributes
		hid_t h5spaceScalar = H5Screate(H5S_SCALAR);
		checkH5Err(h5spaceScalar);

		// Fault writer
		m_h5timestepFault[odd] = H5Acreate(h5file, "timestep_fault",
				H5T_STD_I32LE, h5spaceScalar, H5P_DEFAULT, H5P_DEFAULT);
		checkH5Err(m_h5timestepFault[odd]);
		int t = 0;
		checkH5Err(H5Awrite(m_h5timestepFault[odd], H5T_NATIVE_INT, &t));

		checkH5Err(H5Sclose(h5spaceScalar));

		// Variables
		for (unsigned int i = 0; i < NUM_VARIABLES; i++) {
			h5plist = H5Pcreate(H5P_DATASET_CREATE);
			checkH5Err(h5plist);
			checkH5Err(H5Pset_layout(h5plist, H5D_CONTIGUOUS));
			checkH5Err(H5Pset_alloc_time(h5plist, H5D_ALLOC_TIME_EARLY));
			m_h5data[odd][i] = H5Dcreate(h5file, VAR_NAMES[i], H5T_IEEE_F64LE, m_h5fSpaceData,
				H5P_DEFAULT, h5plist, H5P_DEFAULT);
			checkH5Err(m_h5data[odd][i]);
			checkH5Err(H5Pclose(h5plist));
		}
	}

	return h5file;
}
Пример #7
0
/* ------- begin -------------------------- readConvergence.c  --- */
void readConvergence(void) {
  /* This is a self-contained function to read the convergence matrix,
     written by RH. */
  const char routineName[] = "readConvergence";
  char *atmosID;
  int ncid, ncid_mpi, nx, ny;
  size_t attr_size;
  hid_t plist;
  H5T_class_t type_class;

  mpi.rh_converged = matrix_int(mpi.nx, mpi.ny);

  /* --- Open the inputdata file --- */
  if (( plist = H5Pcreate(H5P_FILE_ACCESS )) < 0) HERR(routineName);
  if (( H5Pset_fapl_mpio(plist, mpi.comm, mpi.info) ) < 0) HERR(routineName);
  if (( ncid = H5Fopen(INPUTDATA_FILE, H5F_ACC_RDWR, plist) ) < 0)
    HERR(routineName);
  if (( H5Pclose(plist) ) < 0) HERR(routineName);
  /* Get ncid of the MPI group */
  if (( ncid_mpi = H5Gopen(ncid, "mpi", H5P_DEFAULT) ) < 0) HERR(routineName);

  /* --- Consistency checks --- */
  /* Check that atmosID is the same */
  if (( H5LTget_attribute_info(ncid, "/", "atmosID", NULL, &type_class,
                               &attr_size) ) < 0) HERR(routineName);
  atmosID = (char *) malloc(attr_size + 1);
  if (( H5LTget_attribute_string(ncid, "/", "atmosID", atmosID) ) < 0)
    HERR(routineName);
  if (!strstr(atmosID, atmos.ID)) {
    sprintf(messageStr,
       "Indata file was calculated for different atmosphere (%s) than current",
	     atmosID);
    Error(WARNING, routineName, messageStr);
    }
  free(atmosID);
  /* Check that dimension sizes match */
  if (( H5LTget_attribute_int(ncid, "/", "nx", &nx) ) < 0) HERR(routineName);
  if (nx != mpi.nx) {
    sprintf(messageStr,
	    "Number of x points mismatch: expected %d, found %d.",
	    mpi.nx, (int)nx);
    Error(WARNING, routineName, messageStr);
  }
  if (( H5LTget_attribute_int(ncid, "/", "ny", &ny) ) < 0) HERR(routineName);
  if (ny != mpi.ny) {
    sprintf(messageStr,
	    "Number of y points mismatch: expected %d, found %d.",
	    mpi.ny, (int)ny);
    Error(WARNING, routineName, messageStr);
  }
  /* --- Read variable --- */
  if (( H5LTread_dataset_int(ncid_mpi, CONV_NAME,
                             mpi.rh_converged[0]) ) < 0) HERR(routineName);
  /* --- Close inputdata file --- */
  if (( H5Gclose(ncid_mpi) ) < 0) HERR(routineName);
  if (( H5Fclose(ncid) ) < 0) HERR(routineName);
  return;
}
Пример #8
0
/*
 * parse the command line options
 */
static int
parse_options(int argc, char **argv)
{
    while (--argc){
  if (**(++argv) != '-'){
      break;
  }else{
      switch(*(*argv+1)){
    case 'v':   if (*((*argv+1)+1))
        ParseTestVerbosity((*argv+1)+1);
          else
        SetTestVerbosity(VERBO_MED);
          break;
    case 'f':   if (--argc < 1) {
        nerrors++;
        return(1);
          }
          if (**(++argv) == '-') {
        nerrors++;
        return(1);
          }
          paraprefix = *argv;
          break;
    case 'h':   /* print help message--return with nerrors set */
          return(1);
    default:    nerrors++;
          return(1);
      }
  }
    } /*while*/

    /* compose the test filenames */
    {
  int i, n;
  hid_t plist;

  plist = H5Pcreate (H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist, MPI_COMM_WORLD, MPI_INFO_NULL);
  n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1;  /* exclude the NULL */

  for (i=0; i < n; i++)
      if (h5_fixname(FILENAME[i],plist,filenames[i],sizeof(filenames[i]))
    == NULL){
    printf("h5_fixname failed\n");
    nerrors++;
    return(1);
      }
  H5Pclose(plist);
  if (VERBOSE_MED){
      printf("Test filenames are:\n");
      for (i=0; i < n; i++)
    printf("    %s\n", filenames[i]);
  }
    }

    return(0);
}
Пример #9
0
void BigArray<T>::loadNC(const std::string &fileName)
{
    dataFileName = fileName;

    connection.disconnect();
    connection = releaseSignal().connect(boost::bind(&gurls::BigArray<T>::close, this));

    std::string errorString = "Error opening file " + fileName + ":";


    // Set up file access property list with parallel I/O access
    plist_id = H5Pcreate(H5P_FILE_ACCESS);
    if(plist_id == -1)
        throw gException(errorString);

    herr_t status;

#ifdef USE_MPIIO
    status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
    status = H5Pset_fapl_mpiposix(plist_id, MPI_COMM_WORLD, false);
#endif
    CHECK_HDF5_ERR(status, errorString)

    // Create a new file collectively and release property list identifier.
    file_id = H5Fopen(fileName.c_str(), H5F_ACC_RDWR, plist_id);
    CHECK_HDF5_ERR(file_id, errorString)

    status = H5Pclose(plist_id);
    CHECK_HDF5_ERR(status, errorString)

    dset_id =  H5Dopen(file_id, "mat", H5P_DEFAULT);
    CHECK_HDF5_ERR(dset_id, errorString)

    hid_t filespace = H5Dget_space( dset_id );
    CHECK_HDF5_ERR(filespace, errorString)

    hsize_t dims[2], maxDims[2];
    status = H5Sget_simple_extent_dims(filespace, dims, maxDims);
    CHECK_HDF5_ERR(status, errorString)

    status = H5Sclose(filespace);
    CHECK_HDF5_ERR(status, errorString)

    this->numrows = static_cast<unsigned long>(dims[1]);
    this->numcols = static_cast<unsigned long>(dims[0]);

    // Create property list for collective dataset write.
    plist_id = H5Pcreate(H5P_DATASET_XFER);
    if(plist_id == -1)
        throw gException(errorString);

    status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
    CHECK_HDF5_ERR(status, errorString)

}
Пример #10
0
void pHdf5IoDataModel::fileOpen(const QString &file_name, FileMode mode)
{
    //if file is already open tell it to the user and return
    if(!d->file_is_open) {

        //if we didn't set the communicator error
        if(d->comm==nullptr) {
            dtkError() << __func__ << "communicator not set";
        }

        // H5P_FILE_ACCESS applies to H5Fcreate and H5Fopen
        d->prop_list_id = H5Pcreate(H5P_FILE_ACCESS);

        MPI_Info info = MPI_INFO_NULL;
        MPI_Comm comm = *static_cast<MPI_Comm *>(d->comm->data());
        H5Pset_fapl_mpio(d->prop_list_id, comm, info);

        switch (mode) {
        case dtkIoDataModel::Trunc:
            d->file_id = H5Fcreate (file_name.toUtf8().constData(), H5F_ACC_TRUNC,
                                   H5P_DEFAULT, d->prop_list_id);

            break;
        case dtkIoDataModel::NotExisting:
            d->file_id = H5Fcreate(file_name.toUtf8().constData(), H5F_ACC_EXCL,
                                   H5P_DEFAULT, d->prop_list_id);
            break;
        case dtkIoDataModel::ReadOnly:
            d->file_id = H5Fopen(file_name.toUtf8().constData(), H5F_ACC_RDONLY,
                                 d->prop_list_id);
            break;
        case dtkIoDataModel::ReadWrite:
            d->file_id = H5Fopen(file_name.toUtf8().constData(), H5F_ACC_RDWR,
                                 d->prop_list_id);
            break;
        default:
            dtkError() << "unsupported fileMode";
        };

        //close the property list for file
        H5Pclose(d->prop_list_id);
        if(d->file_id<0) {
            dtkError() << "error in fileOpen for file_name " << file_name;
        }
        else {
            //if the file is correctly open, create a propery list to collectively write datasets
            d->file_is_open = true;
            d->prop_list_id = H5Pcreate(H5P_DATASET_XFER);
            H5Pset_dxpl_mpio(d->prop_list_id, H5FD_MPIO_COLLECTIVE);
        }
    }
    else {
        qDebug() << "File" << file_name << "is already open, please close it before opening a new one";
    }
}
Пример #11
0
hid_t ASDF_create_new_file(const char *filename, MPI_Comm comm) {
  hid_t plist_id, file_id;

  CHK_H5(plist_id = H5Pcreate(H5P_FILE_ACCESS));
  CHK_H5(H5Pset_fapl_mpio(plist_id, comm, MPI_INFO_NULL));
  /* Create the file collectively.*/
  CHK_H5(file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id));
  CHK_H5(H5Pclose(plist_id));

  return file_id;
}
/*-------------------------------------------------------------------------
 * Function:	main
 *
 * Purpose:	Part 2 of a two-part H5Fflush() test.
 *
 * Return:	Success:	0
 *
 *		Failure:	1
 *
 * Programmer:	Robb Matzke
 *              Friday, October 23, 1998
 *
 * Modifications:
 * 		Leon Arber
 * 		Sept. 26, 2006, expand to check for case where the was file not flushed.
 *
 *-------------------------------------------------------------------------
 */
int
main(int argc, char* argv[])
{
    hid_t fapl1, fapl2;
    H5E_auto2_t func;

    char	name[1024];
    const char *envval = NULL;

    int mpi_size, mpi_rank;
    MPI_Comm comm  = MPI_COMM_WORLD;
    MPI_Info info  = MPI_INFO_NULL;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(comm, &mpi_size);
    MPI_Comm_rank(comm, &mpi_rank);

    fapl1 = H5Pcreate(H5P_FILE_ACCESS);
    H5Pset_fapl_mpio(fapl1, comm, info);

    fapl2 = H5Pcreate(H5P_FILE_ACCESS);
    H5Pset_fapl_mpio(fapl2, comm, info);


    if(mpi_rank == 0)
	TESTING("H5Fflush (part2 with flush)");

    /* Don't run this test using the core or split file drivers */
    envval = HDgetenv("HDF5_DRIVER");
    if (envval == NULL)
        envval = "nomatch";
    if (HDstrcmp(envval, "core") && HDstrcmp(envval, "split")) {
	/* Check the case where the file was flushed */
	h5_fixname(FILENAME[0], fapl1, name, sizeof name);
	if(check_file(name, fapl1))
	{
	    H5_FAILED()
	    goto error;
	}
	else if(mpi_rank == 0)
Пример #13
0
/*
 * test file access by communicator besides COMM_WORLD.
 * Split COMM_WORLD into two, one (even_comm) contains the original
 * processes of even ranks.  The other (odd_comm) contains the original
 * processes of odd ranks.  Processes in even_comm creates a file, then
 * cloose it, using even_comm.  Processes in old_comm just do a barrier
 * using odd_comm.  Then they all do a barrier using COMM_WORLD.
 * If the file creation and cloose does not do correct collective action
 * according to the communicator argument, the processes will freeze up
 * sooner or later due to barrier mixed up.
 */
void
test_split_comm_access(char filenames[][PATH_MAX])
{
    MPI_Comm comm;
    MPI_Info info = MPI_INFO_NULL;
    int color, mrc;
    int newrank, newprocs;
    hid_t fid;			/* file IDs */
    hid_t acc_tpl;		/* File access properties */
    herr_t ret;			/* generic return value */

    if (verbose)
	printf("Independent write test on file %s %s\n",
	    filenames[0], filenames[1]);

    color = mpi_rank%2;
    mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mpi_rank, &comm);
    assert(mrc==MPI_SUCCESS);
    MPI_Comm_size(comm,&newprocs);
    MPI_Comm_rank(comm,&newrank);

    if (color){
	/* odd-rank processes */
	mrc = MPI_Barrier(comm);
	assert(mrc==MPI_SUCCESS);
    }else{
	/* even-rank processes */
	/* setup file access template */
	acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
	assert(acc_tpl != FAIL);

	/* set Parallel access with communicator */
	ret = H5Pset_fapl_mpio(acc_tpl, comm, info);
	assert(ret != FAIL);

	/* create the file collectively */
	fid=H5Fcreate(filenames[color],H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
	assert(fid != FAIL);
	MESG("H5Fcreate succeed");

	/* Release file-access template */
	ret=H5Pclose(acc_tpl);
	assert(ret != FAIL);

	ret=H5Fclose(fid);
	assert(ret != FAIL);
    }
    if (mpi_rank == 0){
	mrc = MPI_File_delete(filenames[color], info);
	assert(mrc==MPI_SUCCESS);
    }
}
Пример #14
0
/**
* \brief Opens an HDF file at the given path
* \param[in]  path    Path to the HDF file to open
* \param[out] file_id Pointer to an hid_t
* \returns Status code
* \retval 1 Failure
* \retval 0 Success
* \sa ch5_close
*/
int ch5_open(const char *path, hid_t *file_id) {
  hid_t plist = H5P_DEFAULT;
#ifdef HAVE_MPIIO
  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPIO_INFO_NULL);
#endif
  hid_t id = H5Fopen(path, H5F_ACC_RDWR, plist);
#ifdef HAVE_MPIIO
  H5Pclose(plist_id);
#endif
  if (id < 0) return 1;
  *file_id = id;
  return 0;
}
Пример #15
0
/**
* \brief Creates a new HDF file at the given path
* \note Will overwrite/truncate an existing file at the given path
* \param[in]  path    Path to the HDF file to create
* \param[out] file_id Pointer to an hid_t
* \returns Status code
* \retval 1 Failure
* \retval 0 Success
* \sa ch5_close
*/
int ch5_create(const char *path, hid_t *file_id) {
  hid_t plist = H5P_DEFAULT;
#ifdef HAVE_MPIIO
  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPIO_INFO_NULL);
#endif
  hid_t id = H5Fcreate(path, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
#ifdef HAVE_MPIIO
  H5Pclose(plist_id);
#endif
  if (id < 0) return 1;
  *file_id = id;
  ch5_meta_set_name( *file_id, path );
  return 0;
}
Пример #16
0
op_set op_decl_set_hdf5(char const *file, char const *name)
{
  //create new communicator
  int my_rank, comm_size;
  MPI_Comm_dup(MPI_COMM_WORLD, &OP_MPI_HDF5_WORLD);
  MPI_Comm_rank(OP_MPI_HDF5_WORLD, &my_rank);
  MPI_Comm_size(OP_MPI_HDF5_WORLD, &comm_size);

  //MPI variables
  MPI_Info info  = MPI_INFO_NULL;

  //HDF5 APIs definitions
  hid_t       file_id; //file identifier
  hid_t plist_id;  //property list identifier
  hid_t dset_id; //dataset identifier

  //Set up file access property list with parallel I/O access
  plist_id = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id, OP_MPI_HDF5_WORLD, info);

  file_id = H5Fopen(file, H5F_ACC_RDONLY, plist_id );
  H5Pclose(plist_id);

  //Create the dataset with default properties and close dataspace.
  dset_id = H5Dopen(file_id, name, H5P_DEFAULT);

  //Create property list for collective dataset write.
  plist_id = H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);

  int g_size = 0;
  //read data
  H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, plist_id, &g_size);

  H5Pclose(plist_id);
  H5Dclose(dset_id);

  H5Fclose(file_id);

  //calculate local size of set for this mpi process
  int l_size = compute_local_size (g_size, comm_size, my_rank);
  MPI_Comm_free(&OP_MPI_HDF5_WORLD);

  return op_decl_set(l_size,  name);
}
Пример #17
0
  int mpi_mesh_grdecl::open_file_mpi_hdf5(const char *file_name)
  {
    //Set up file access property list with parallel I/O access
    hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); // Creates a new property as an instance of a property li_startt class.

    //Each process of the MPI communicator creates an access template and sets i_end up wi_endh MPI parallel access information. Thi_start i_start2 done wi_endh the H5Pcreate call to obtain the file access property li_startt and the H5Pset_fapl_mpio call to set up parallel I/O access.
    H5Pset_fapl_mpio(plist_id, mpi_comm, mpi_info);

    // Open a new file and release property list identifier
    hid_t file_id = H5Fopen (file_name, H5F_ACC_RDONLY, H5P_DEFAULT); // H5F_ACC_RDONLY - Allow read-only access to file.
    if (file_id < 0)
      {
        printf("Error: Can't open file %s!\n", file_name);
        return -1;
      }
    H5Pclose (plist_id);
    return file_id;
  }
Пример #18
0
/*----------------------------------------------------------------------------
 * Name:        h5pset_fapl_mpio_c
 * Purpose:     Call H5Pset_fapl_mpio to set mode for parallel I/O and the user
 *              supplied communicator and info object
 * Inputs:      prp_id - property list identifier
 *              comm   - MPI communicator
 *              info   - MPI info object
 * Returns:     0 on success, -1 on failure
 * Programmer:  Elena Pourmal
 *              Thursday, October 26, 2000
 * Modifications:
 *---------------------------------------------------------------------------*/
int_f
nh5pset_fapl_mpio_c(hid_t_f *prp_id, int_f* comm, int_f* info)
{
     int ret_value = -1;
     hid_t c_prp_id;
     herr_t ret;
     MPI_Comm c_comm;
     MPI_Info c_info;
     c_comm = MPI_Comm_f2c(*comm);
     c_info = MPI_Info_f2c(*info);

     /*
      * Call H5Pset_mpi function.
      */
     c_prp_id = *prp_id;
     ret = H5Pset_fapl_mpio(c_prp_id, c_comm, c_info);
     if (ret < 0) return ret_value;
     ret_value = 0;
     return ret_value;
}
Пример #19
0
void saveParticleComp_Int(int *data,char *fileName,char *dataName,int totalCnt,int cnt,int offSet)
{
  int i,j,k;
  int myrank, nTasks;
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm_size(MPI_COMM_WORLD, &nTasks);

  hid_t file_id,dset_id,plist_id,tic_id;
  herr_t status;
  hid_t total_file_space,subfilespace,filespace,memspace,ticspace;
  hsize_t dimsf[1],count[1],offset[1];

  plist_id=H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id,MPI_COMM_WORLD,MPI_INFO_NULL);
//    H5Pset_fclose_degree(plist_id,H5F_CLOSE_SEMI);
//    MPI_Barrier(MPI_COMM_WORLD);

  file_id=H5Fopen(fileName,H5F_ACC_RDWR,plist_id);
  H5Pclose(plist_id);

  dimsf[0]=totalCnt;
  filespace=H5Screate_simple(1,dimsf,NULL);

  count[0]=cnt;
  offset[0]=offSet;
  memspace=H5Screate_simple(1,count,NULL);

  dset_id=H5Dcreate2(file_id,dataName,H5T_NATIVE_INT,filespace,H5P_DEFAULT,H5P_DEFAULT,H5P_DEFAULT);
  subfilespace=H5Dget_space(dset_id);
  H5Sselect_hyperslab(subfilespace,H5S_SELECT_SET,offset,NULL,count,NULL);
  plist_id=H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(plist_id,H5FD_MPIO_INDEPENDENT);
  status = H5Dwrite(dset_id, H5T_NATIVE_INT,memspace,subfilespace,plist_id,data);
  H5Pclose(plist_id);
  H5Sclose(subfilespace);
  H5Dclose(dset_id);

  H5Sclose(memspace);
  H5Sclose(filespace);
  H5Fclose(file_id);
}
	/*!
	 *
	 * \brief Write a set of particle position and properties into HDF5
	 *
	 * \tparam Pos Vector of positions type
	 * \taparam Prp Vector of properties type
	 * \tparam prp list of properties to output
	 *
	 * \param pos Vector with the positions
	 * \param prp Vector with the properties
	 * \param stop size of the vector to output
	 *
	 */
	template<typename VPos, typename VPrp, int ... prp > bool write(const std::string & file, openfpm::vector<VPos> & v_pos, openfpm::vector<VPrp> & v_prp, size_t stop)
	{
		Vcluster & v_cl = create_vcluster();

		// Open and HDF5 file in parallel

		hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
		H5Pset_fapl_mpio(plist_id, v_cl.getMPIComm(), MPI_INFO_NULL);
		file_id = H5Fcreate(file.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
		H5Pclose(plist_id);

		// Single coordinate positional vector
		openfpm::vector<typename VPos::coord_type> x_n;
		x_n.resize(stop);

		//for each component, fill x_n
		for (size_t i = 0 ; i < VPos::dims ; i++)
		{
			//
			for (size_t j = 0 ; j < stop ; j++)
				x_n.get(j) = v_pos.template get<0>(j)[i];

			std::stringstream str;
			str << "x" << i;

			HDF5CreateDataSet<typename VPos::coord_type>(file_id,str.str(),x_n.getPointer(),stop*sizeof(typename VPos::coord_type));
		}

		// Now we write the properties

		typedef typename to_boost_vmpl<prp ... >::type v_prp_seq;
		H5_prop_out<openfpm::vector<VPrp>,has_attributes<VPrp>::value> f(file_id,v_prp,stop);

		boost::mpl::for_each_ref<v_prp_seq>(f);

		H5Fclose(file_id);

		return true;
	}
Пример #21
0
void restoreData(float *data,char *fileName,char *dataName,int totalCnt,int cnt,int *cntOffSet)
{
  hid_t file_id,dset_id,plist_id,group_id;
  herr_t status;
  hid_t subfilespace,filespace,memspace;
  hsize_t dimsf[1],count[1],offset[1];

  int myrank, nTasks;    
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm_size(MPI_COMM_WORLD, &nTasks);

  plist_id=H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id,MPI_COMM_WORLD,MPI_INFO_NULL);
//  H5Pset_fclose_degree(plist_id,H5F_CLOSE_SEMI);
//  MPI_Barrier(MPI_COMM_WORLD);

  file_id=H5Fopen(fileName,H5F_ACC_RDWR,plist_id);
  H5Pclose(plist_id);
  dimsf[0]=totalCnt;     
  filespace=H5Screate_simple(1,dimsf,NULL);

  count[0]=cnt;
  offset[0]=cntOffSet[myrank];
  memspace=H5Screate_simple(1,count,NULL);

  dset_id=H5Dopen2(file_id,dataName,H5P_DEFAULT);
  subfilespace=H5Dget_space(dset_id);
  H5Sselect_hyperslab(subfilespace,H5S_SELECT_SET,offset,NULL,count,NULL);
  plist_id=H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(plist_id,H5FD_MPIO_COLLECTIVE);
  status = H5Dread(dset_id, H5T_NATIVE_FLOAT,memspace,subfilespace,plist_id,data);
  H5Pclose(plist_id);
  H5Sclose(subfilespace);
  H5Dclose(dset_id);

  H5Sclose(memspace);
  H5Sclose(filespace);
  H5Fclose(file_id);
}
Пример #22
0
void LifeV::HDF5IO::openFile (const std::string& fileName,
                              const commPtr_Type& comm,
                              const bool& existing)
{
    hid_t plistId;
    MPI_Comm mpiComm = comm->Comm();
    MPI_Info info = MPI_INFO_NULL;

    // Set up file access property list with parallel I/O access
    plistId = H5Pcreate (H5P_FILE_ACCESS);
    H5Pset_fapl_mpio (plistId, mpiComm, info);

    // Create/open a file collectively and release property list identifier.
    if (existing)
    {
        M_fileId = H5Fopen (fileName.c_str(), H5F_ACC_RDONLY, plistId);
    }
    else
    {
        M_fileId = H5Fcreate (fileName.c_str(), H5F_ACC_TRUNC,
                              H5P_DEFAULT, plistId);
    }
    H5Pclose (plistId);
}
Пример #23
0
void PHDF5fileClass::CreatePHDF5file(double *L, int *dglob, int *dlocl, bool bp){

  hid_t   acc_t;
  hid_t   Ldataspace;
  hid_t   Ldataset;
  hid_t   ndataspace;
  hid_t   ndataset;
  herr_t  status;
  hsize_t d[1];

  /* ---------------------------------------------- */
  /* 0- Initialize the some of the class parameters */
  /* ---------------------------------------------- */

  bparticles  = bp;
  for (int i=0; i<ndim; i++){
    LxLyLz[i]    = L[i];
    dim[i]       = (hsize_t)dglob[i];
    chdim[i]     = (hsize_t)dlocl[i];
  }

  /* ----------------------------------------------------- */
  /* 1- Set the access template for the parallel HDF5 file */
  /* ----------------------------------------------------- */

  acc_t = H5Pcreate(H5P_FILE_ACCESS);

  /* --------------------------------------- */
  /* 2- Tell HDF5 that we want to use MPI-IO */
  /* --------------------------------------- */

  H5Pset_fapl_mpio(acc_t, comm, MPI_INFO_NULL);

  /* ------------------------------------------------------- */
  /* 3- Load file identifier and release the access template */
  /* ------------------------------------------------------- */

  file_id = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, acc_t);
  H5Pclose(acc_t);

  /* -------------------- */
  /* 4- Set up the groups */
  /* -------------------- */

  for (int i=0; i<ngrp; i++){
    if (!(grpnames[i]=="Particles"&&!bparticles)){
      hid_t grp;
      grp = H5Gcreate2(file_id, grpnames[i].c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
      H5Gclose(grp);
    }
  }

  /* -------------------------------------- */
  /* Create and fill the Parameters dataset */
  /* -------------------------------------- */

  d[0] = 3;

  status = H5LTmake_dataset(file_id, "/Parameters/LxLyLz", 1, d, H5T_NATIVE_DOUBLE, LxLyLz);
  status = H5LTmake_dataset(file_id, "/Parameters/ncell" , 1, d, H5T_NATIVE_INT   , dglob);

}
Пример #24
0
Файл: hdf5.c Проект: leobago/fti
/*-------------------------------------------------------------------------*/
int FTI_RecoverHDF5(FTIT_configuration* FTI_Conf, FTIT_execution* FTI_Exec, FTIT_checkpoint* FTI_Ckpt,
                   FTIT_dataset* FTI_Data)
{
    char str[FTI_BUFS], fn[FTI_BUFS];
    snprintf(fn, FTI_BUFS, "%s/%s", FTI_Ckpt[FTI_Exec->ckptLvel].dir, FTI_Exec->meta[FTI_Exec->ckptLvel].ckptFile);
    if( FTI_Exec->h5SingleFile ) {
        snprintf( fn, FTI_BUFS, "%s/%s-ID%08d.h5", FTI_Conf->h5SingleFileDir, FTI_Conf->h5SingleFilePrefix, FTI_Exec->ckptID );
    }

    sprintf(str, "Trying to load FTI checkpoint file (%s)...", fn);
    FTI_Print(str, FTI_DBUG);
    
    hid_t file_id;
    
    //Open hdf5 file
    if( FTI_Exec->h5SingleFile ) { 
        hid_t plid = H5Pcreate( H5P_FILE_ACCESS );
        H5Pset_fapl_mpio( plid, FTI_COMM_WORLD, MPI_INFO_NULL );
        file_id = H5Fopen( fn, H5F_ACC_RDONLY, plid );
        H5Pclose( plid );
    } else {
        file_id = H5Fopen(fn, H5F_ACC_RDONLY, H5P_DEFAULT);
    }
    if (file_id < 0) {
        FTI_Print("Could not open FTI checkpoint file.", FTI_EROR);
        return FTI_NREC;
    }
    FTI_Exec->H5groups[0]->h5groupID = file_id;
    FTIT_H5Group* rootGroup = FTI_Exec->H5groups[0];

    int i;
    for (i = 0; i < FTI_Exec->H5groups[0]->childrenNo; i++) {
        FTI_OpenGroup(FTI_Exec->H5groups[rootGroup->childrenID[i]], file_id, FTI_Exec->H5groups);
    }
    
    for (i = 0; i < FTI_Exec->nbVar; i++) {
        FTI_CreateComplexType(FTI_Data[i].type, FTI_Exec->FTI_Type);
    }
    
    if( FTI_Exec->h5SingleFile ) { 
        FTI_OpenGlobalDatasets( FTI_Exec );
    }

    for (i = 0; i < FTI_Exec->nbVar; i++) {
        herr_t res;
        if( FTI_Exec->h5SingleFile ) { 
            res = FTI_ReadSharedFileData( FTI_Data[i] );
        } else {
            res = FTI_ReadHDF5Var(&FTI_Data[i]);
        }
        if (res < 0) {
            FTI_Print("Could not read FTI checkpoint file.", FTI_EROR);
            int j;
            for (j = 0; j < FTI_Exec->H5groups[0]->childrenNo; j++) {
                FTI_CloseGroup(FTI_Exec->H5groups[rootGroup->childrenID[j]], FTI_Exec->H5groups);
            }
            H5Fclose(file_id);
            return FTI_NREC;
        }
    }
    for (i = 0; i < FTI_Exec->nbVar; i++) {
        FTI_CloseComplexType(FTI_Data[i].type, FTI_Exec->FTI_Type);
    }

    int j;
    for (j = 0; j < FTI_Exec->H5groups[0]->childrenNo; j++) {
        FTI_CloseGroup(FTI_Exec->H5groups[rootGroup->childrenID[j]], FTI_Exec->H5groups);
    }
    
    if( FTI_Exec->h5SingleFile ) { 
        FTI_CloseGlobalDatasets( FTI_Exec );
    }

    FTI_Exec->H5groups[0]->h5groupID = -1;
    if (H5Fclose(file_id) < 0) {
        FTI_Print("Could not close FTI checkpoint file.", FTI_EROR);
        return FTI_NREC;
    }
    FTI_Exec->reco = 0;
    return FTI_SCES;
}
Пример #25
0
Файл: hdf5.c Проект: leobago/fti
/*-------------------------------------------------------------------------*/
int FTI_WriteHDF5(FTIT_configuration* FTI_Conf, FTIT_execution* FTI_Exec,
        FTIT_topology* FTI_Topo, FTIT_checkpoint* FTI_Ckpt,
        FTIT_dataset* FTI_Data)
{
    FTI_Print("I/O mode: HDF5.", FTI_DBUG);
    char str[FTI_BUFS], fn[FTI_BUFS];
    int level = FTI_Exec->ckptLvel;
    if (level == 4 && FTI_Ckpt[4].isInline) { //If inline L4 save directly to global directory
        snprintf(fn, FTI_BUFS, "%s/%s", FTI_Conf->gTmpDir, FTI_Exec->meta[0].ckptFile);
    }
    else {
        snprintf(fn, FTI_BUFS, "%s/%s", FTI_Conf->lTmpDir, FTI_Exec->meta[0].ckptFile);
    }
    if( FTI_Exec->h5SingleFile ) {
        snprintf( fn, FTI_BUFS, "%s/%s-ID%08d.h5", FTI_Conf->h5SingleFileDir, FTI_Conf->h5SingleFilePrefix, FTI_Exec->ckptID );
    }

    hid_t file_id;
    
    //Creating new hdf5 file
    if( FTI_Exec->h5SingleFile ) { 
        hid_t plid = H5Pcreate( H5P_FILE_ACCESS );
        H5Pset_fapl_mpio(plid, FTI_COMM_WORLD, MPI_INFO_NULL);
        file_id = H5Fcreate(fn, H5F_ACC_TRUNC, H5P_DEFAULT, plid);       
        H5Pclose( plid );
    } else {
        file_id = H5Fcreate(fn, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
    }
    if (file_id < 0) {
        sprintf(str, "FTI checkpoint file (%s) could not be opened.", fn);
        FTI_Print(str, FTI_EROR);

        return FTI_NSCS;
    }
    FTI_Exec->H5groups[0]->h5groupID = file_id;
    FTIT_H5Group* rootGroup = FTI_Exec->H5groups[0];

    int i;
    for (i = 0; i < rootGroup->childrenNo; i++) {
        FTI_CreateGroup(FTI_Exec->H5groups[rootGroup->childrenID[i]], file_id, FTI_Exec->H5groups);
    }

    // write data into ckpt file

    // create datatypes
    for (i = 0; i < FTI_Exec->nbVar; i++) {
        int toCommit = 0;
        if (FTI_Data[i].type->h5datatype < 0) {
            toCommit = 1;
        }
        sprintf(str, "Calling CreateComplexType [%d] with hid_t %ld", FTI_Data[i].type->id, (long)FTI_Data[i].type->h5datatype);
        FTI_Print(str, FTI_DBUG);
        FTI_CreateComplexType(FTI_Data[i].type, FTI_Exec->FTI_Type);
        if (toCommit == 1) {
            char name[FTI_BUFS];
            if (FTI_Data[i].type->structure == NULL) {
                //this is the array of bytes with no name
                sprintf(name, "Type%d", FTI_Data[i].type->id);
            } else {
                strncpy(name, FTI_Data[i].type->structure->name, FTI_BUFS);
            }
            herr_t res = H5Tcommit(FTI_Data[i].type->h5group->h5groupID, name, FTI_Data[i].type->h5datatype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
            if (res < 0) {
                sprintf(str, "Datatype #%d could not be commited", FTI_Data[i].id);
                FTI_Print(str, FTI_EROR);
                int j;
                for (j = 0; j < FTI_Exec->H5groups[0]->childrenNo; j++) {
                    FTI_CloseGroup(FTI_Exec->H5groups[rootGroup->childrenID[j]], FTI_Exec->H5groups);
                }
                H5Fclose(file_id);
                return FTI_NSCS;
            }
        }
    }

    if( FTI_Exec->h5SingleFile ) { 
        FTI_CreateGlobalDatasets( FTI_Exec );
    }

    // write data
    for (i = 0; i < FTI_Exec->nbVar; i++) {
        int res;
        if( FTI_Exec->h5SingleFile ) { 
            res = FTI_WriteSharedFileData( FTI_Data[i] );
        } else {
            res = FTI_WriteHDF5Var(&FTI_Data[i]); 
        }
        if ( res != FTI_SCES ) {
            sprintf(str, "Dataset #%d could not be written", FTI_Data[i].id);
            FTI_Print(str, FTI_EROR);
            int j;
            for (j = 0; j < FTI_Exec->H5groups[0]->childrenNo; j++) {
                FTI_CloseGroup(FTI_Exec->H5groups[rootGroup->childrenID[j]], FTI_Exec->H5groups);
            }
            H5Fclose(file_id);
            return FTI_NSCS;
        }
    }
    
    for (i = 0; i < FTI_Exec->nbVar; i++) {
        FTI_CloseComplexType(FTI_Data[i].type, FTI_Exec->FTI_Type);
    }

    int j;
    for (j = 0; j < FTI_Exec->H5groups[0]->childrenNo; j++) {
        FTI_CloseGroup(FTI_Exec->H5groups[rootGroup->childrenID[j]], FTI_Exec->H5groups);
    }
    
    if( FTI_Exec->h5SingleFile ) { 
        FTI_CloseGlobalDatasets( FTI_Exec );
    }

    // close file
    FTI_Exec->H5groups[0]->h5groupID = -1;
    if (H5Fclose(file_id) < 0) {
        FTI_Print("FTI checkpoint file could not be closed.", FTI_EROR);
        return FTI_NSCS;
    }


    return FTI_SCES;
}
Пример #26
0
/*
 * Open a file through the HDF5 interface.
 */
static void *HDF5_Open(char *testFileName, IOR_param_t * param)
{
        hid_t accessPropList, createPropList;
        hsize_t memStart[NUM_DIMS],
            dataSetDims[NUM_DIMS],
            memStride[NUM_DIMS],
            memCount[NUM_DIMS], memBlock[NUM_DIMS], memDataSpaceDims[NUM_DIMS];
        int tasksPerDataSet;
        unsigned fd_mode = (unsigned)0;
        hid_t *fd;
        MPI_Comm comm;
        MPI_Info mpiHints = MPI_INFO_NULL;

        fd = (hid_t *) malloc(sizeof(hid_t));
        if (fd == NULL)
                ERR("malloc() failed");
        /*
         * HDF5 uses different flags than those for POSIX/MPIIO
         */
        if (param->open == WRITE) {     /* WRITE flags */
                param->openFlags = IOR_TRUNC;
        } else {                /* READ or check WRITE/READ flags */
                param->openFlags = IOR_RDONLY;
        }

        /* set IOR file flags to HDF5 flags */
        /* -- file open flags -- */
        if (param->openFlags & IOR_RDONLY) {
                fd_mode |= H5F_ACC_RDONLY;
        }
        if (param->openFlags & IOR_WRONLY) {
                fprintf(stdout, "File write only not implemented in HDF5\n");
        }
        if (param->openFlags & IOR_RDWR) {
                fd_mode |= H5F_ACC_RDWR;
        }
        if (param->openFlags & IOR_APPEND) {
                fprintf(stdout, "File append not implemented in HDF5\n");
        }
        if (param->openFlags & IOR_CREAT) {
                fd_mode |= H5F_ACC_CREAT;
        }
        if (param->openFlags & IOR_EXCL) {
                fd_mode |= H5F_ACC_EXCL;
        }
        if (param->openFlags & IOR_TRUNC) {
                fd_mode |= H5F_ACC_TRUNC;
        }
        if (param->openFlags & IOR_DIRECT) {
                fprintf(stdout, "O_DIRECT not implemented in HDF5\n");
        }

        /* set up file creation property list */
        createPropList = H5Pcreate(H5P_FILE_CREATE);
        HDF5_CHECK(createPropList, "cannot create file creation property list");
        /* set size of offset and length used to address HDF5 objects */
        HDF5_CHECK(H5Pset_sizes
                   (createPropList, sizeof(hsize_t), sizeof(hsize_t)),
                   "cannot set property list properly");

        /* set up file access property list */
        accessPropList = H5Pcreate(H5P_FILE_ACCESS);
        HDF5_CHECK(accessPropList, "cannot create file access property list");

        /*
         * someday HDF5 implementation will allow subsets of MPI_COMM_WORLD
         */
        /* store MPI communicator info for the file access property list */
        if (param->filePerProc) {
                comm = MPI_COMM_SELF;
        } else {
                comm = testComm;
        }

        SetHints(&mpiHints, param->hintsFileName);
        /*
         * note that with MP_HINTS_FILTERED=no, all key/value pairs will
         * be in the info object.  The info object that is attached to
         * the file during MPI_File_open() will only contain those pairs
         * deemed valid by the implementation.
         */
        /* show hints passed to file */
        if (rank == 0 && param->showHints) {
                fprintf(stdout, "\nhints passed to access property list {\n");
                ShowHints(&mpiHints);
                fprintf(stdout, "}\n");
        }
        HDF5_CHECK(H5Pset_fapl_mpio(accessPropList, comm, mpiHints),
                   "cannot set file access property list");

        /* set alignment */
        HDF5_CHECK(H5Pset_alignment(accessPropList, param->setAlignment,
                                    param->setAlignment),
                   "cannot set alignment");

        /* open file */
        if (param->open == WRITE) {     /* WRITE */
                *fd = H5Fcreate(testFileName, fd_mode,
                                createPropList, accessPropList);
                HDF5_CHECK(*fd, "cannot create file");
        } else {                /* READ or CHECK */
                *fd = H5Fopen(testFileName, fd_mode, accessPropList);
                HDF5_CHECK(*fd, "cannot open file");
        }

        /* show hints actually attached to file handle */
        if (param->showHints || (1) /* WEL - this needs fixing */ ) {
                if (rank == 0
                    && (param->showHints) /* WEL - this needs fixing */ ) {
                        WARN("showHints not working for HDF5");
                }
        } else {
                MPI_Info mpiHintsCheck = MPI_INFO_NULL;
                hid_t apl;
                apl = H5Fget_access_plist(*fd);
                HDF5_CHECK(H5Pget_fapl_mpio(apl, &comm, &mpiHintsCheck),
                           "cannot get info object through HDF5");
                if (rank == 0) {
                        fprintf(stdout,
                                "\nhints returned from opened file (HDF5) {\n");
                        ShowHints(&mpiHintsCheck);
                        fprintf(stdout, "}\n");
                        if (1 == 1) {   /* request the MPIIO file handle and its hints */
                                MPI_File *fd_mpiio;
                                HDF5_CHECK(H5Fget_vfd_handle
                                           (*fd, apl, (void **)&fd_mpiio),
                                           "cannot get MPIIO file handle");
                                MPI_CHECK(MPI_File_get_info
                                          (*fd_mpiio, &mpiHintsCheck),
                                          "cannot get info object through MPIIO");
                                fprintf(stdout,
                                        "\nhints returned from opened file (MPIIO) {\n");
                                ShowHints(&mpiHintsCheck);
                                fprintf(stdout, "}\n");
                        }
                }
                MPI_CHECK(MPI_Barrier(testComm), "barrier error");
        }

        /* this is necessary for resetting various parameters
           needed for reopening and checking the file */
        newlyOpenedFile = TRUE;

        HDF5_CHECK(H5Pclose(createPropList),
                   "cannot close creation property list");
        HDF5_CHECK(H5Pclose(accessPropList),
                   "cannot close access property list");

        /* create property list for serial/parallel access */
        xferPropList = H5Pcreate(H5P_DATASET_XFER);
        HDF5_CHECK(xferPropList, "cannot create transfer property list");

        /* set data transfer mode */
        if (param->collective) {
                HDF5_CHECK(H5Pset_dxpl_mpio(xferPropList, H5FD_MPIO_COLLECTIVE),
                           "cannot set collective data transfer mode");
        } else {
                HDF5_CHECK(H5Pset_dxpl_mpio
                           (xferPropList, H5FD_MPIO_INDEPENDENT),
                           "cannot set independent data transfer mode");
        }

        /* set up memory data space for transfer */
        memStart[0] = (hsize_t) 0;
        memCount[0] = (hsize_t) 1;
        memStride[0] = (hsize_t) (param->transferSize / sizeof(IOR_size_t));
        memBlock[0] = (hsize_t) (param->transferSize / sizeof(IOR_size_t));
        memDataSpaceDims[0] = (hsize_t) param->transferSize;
        memDataSpace = H5Screate_simple(NUM_DIMS, memDataSpaceDims, NULL);
        HDF5_CHECK(memDataSpace, "cannot create simple memory data space");

        /* define hyperslab for memory data space */
        HDF5_CHECK(H5Sselect_hyperslab(memDataSpace, H5S_SELECT_SET,
                                       memStart, memStride, memCount,
                                       memBlock), "cannot create hyperslab");

        /* set up parameters for fpp or different dataset count */
        if (param->filePerProc) {
                tasksPerDataSet = 1;
        } else {
                if (param->individualDataSets) {
                        /* each task in segment has single data set */
                        tasksPerDataSet = 1;
                } else {
                        /* share single data set across all tasks in segment */
                        tasksPerDataSet = param->numTasks;
                }
        }
        dataSetDims[0] = (hsize_t) ((param->blockSize / sizeof(IOR_size_t))
                                    * tasksPerDataSet);

        /* create a simple data space containing information on size
           and shape of data set, and open it for access */
        dataSpace = H5Screate_simple(NUM_DIMS, dataSetDims, NULL);
        HDF5_CHECK(dataSpace, "cannot create simple data space");

        return (fd);
}
Пример #27
0
void _io_write_prim_h5mpi(const char *fname, const char **pnames, const double *data)
// -----------------------------------------------------------------------------
// This function uses a collective MPI-IO procedure to write the contents of
// 'data' to the HDF5 file named 'fname', which is assumed to have been created
// already. The dataset with name 'dname', which is being written to, must not
// exist already. Chunking is enabled as per the module-wide ChunkSize variable,
// and is disabled by default. Recommended chunk size is local subdomain
// size. This will result in optimized read/write on the same decomposition
// layout, but poor performance for different access patterns, for example the
// slabs used by cluster-FFT functions.
//
//                                   WARNING!
//
// All processors must define the same chunk size, the behavior of this function
// is not defined otherwise. This implies that chunking should be disabled when
// running on a strange number of cores, and subdomain sizes are non-uniform.
// -----------------------------------------------------------------------------
{
  hsize_t ndp1 = n_dims + 1;
  hsize_t *a_nint = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_ntot = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_strt = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *stride = (hsize_t*) malloc(ndp1*sizeof(hsize_t));

  int i;
  for (i=0; i<n_dims; ++i) {
    a_nint[i] = A_nint[i]; // Selection size, target and destination
    l_ntot[i] = L_ntot[i]; // Memory space total size
    l_strt[i] = L_strt[i]; // Memory space selection start
    stride[i] = 1;
  }
  a_nint[ndp1 - 1] = 1;
  l_ntot[ndp1 - 1] = n_prim;
  stride[ndp1 - 1] = n_prim;

  // Here we create the following property lists:
  //
  // file access property list   ........ for the call to H5Fopen
  // dset creation property list ........ for the call to H5Dcreate
  // dset transfer property list ........ for the call to H5Dwrite
  // ---------------------------------------------------------------------------
  hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
  hid_t dcpl = H5Pcreate(H5P_DATASET_CREATE);
  hid_t dxpl = H5Pcreate(H5P_DATASET_XFER);

  // Here we define collective (MPI) access to the file with alignment
  // properties optimized for the local file system, according to DiskBlockSize.
  // ---------------------------------------------------------------------------
  if (EnableChunking) {
    H5Pset_chunk(dcpl, n_dims, ChunkSize);
  }
  if (EnableAlignment) {
    H5Pset_alignment(fapl, AlignThreshold, DiskBlockSize);
  }

  H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
  H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);

  hid_t file = H5Fopen(fname, H5F_ACC_RDWR, fapl);
  const int overwrite = H5Lexists(file, "prim", H5P_DEFAULT);
  hid_t prim = overwrite ? H5Gopen(file, "prim", H5P_DEFAULT) :
    H5Gcreate(file, "prim", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
  hid_t mspc = H5Screate_simple(ndp1  , l_ntot, NULL);
  hid_t fspc = H5Screate_simple(n_dims, G_ntot, NULL);
  // Call signature to H5Sselect_hyperslab is (start, stride, count, chunk)
  // ---------------------------------------------------------------------------
  const clock_t start_all = clock();

  for (i=0; i<n_prim; ++i) {

    hid_t dset = overwrite ? H5Dopen(prim, pnames[i], H5P_DEFAULT) : 
      H5Dcreate(prim, pnames[i], H5T_NATIVE_DOUBLE, fspc,
		H5P_DEFAULT, dcpl, H5P_DEFAULT);

    l_strt[ndp1 - 1] = i;
    H5Sselect_hyperslab(mspc, H5S_SELECT_SET, l_strt, stride, a_nint, NULL);
    H5Sselect_hyperslab(fspc, H5S_SELECT_SET, G_strt,   NULL, A_nint, NULL);
    H5Dwrite(dset, H5T_NATIVE_DOUBLE, mspc, fspc, dxpl, data);
    H5Dclose(dset);
  }
  if (iolog) {
    const double sec = (double)(clock() - start_all) / CLOCKS_PER_SEC;
    fprintf(iolog, "[h5mpi] write to %s took %f minutes\n", fname, sec/60.0);
    fflush(iolog);
  }

  free(a_nint);
  free(l_ntot);
  free(l_strt);

  // Always close the hid_t handles in the reverse order they were opened in.
  // ---------------------------------------------------------------------------
  H5Sclose(fspc);
  H5Sclose(mspc);
  H5Gclose(prim);
  H5Fclose(file);
  H5Pclose(dxpl);
  H5Pclose(dcpl);
  H5Pclose(fapl);
}
Пример #28
0
/*-------------------------------------------------------------------------
 * Function:    main
 *
 * Purpose:    Part 1 of a two-part H5Fflush() test.
 *
 * Return:    Success:    0
 *
 *        Failure:    1
 *
 * Programmer:    Robb Matzke
 *              Friday, October 23, 1998
 *
 * Modifications:
 *         Leon Arber
 *         Sept. 26, 2006, expand test to check for failure if H5Fflush is not called.
 *
 *
 *-------------------------------------------------------------------------
 */
int
main(int argc, char* argv[])
{
    hid_t file1, file2, fapl;
    MPI_File    *mpifh_p = NULL;
    char    name[1024];
    const char  *envval = NULL;
    int mpi_size, mpi_rank;
    MPI_Comm comm  = MPI_COMM_WORLD;
    MPI_Info info  = MPI_INFO_NULL;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(comm, &mpi_size);
    MPI_Comm_rank(comm, &mpi_rank);

    fapl = H5Pcreate(H5P_FILE_ACCESS);
    H5Pset_fapl_mpio(fapl, comm, info);

    if(mpi_rank == 0)
    TESTING("H5Fflush (part1)");
    envval = HDgetenv("HDF5_DRIVER");
    if(envval == NULL)
        envval = "nomatch";
    if(HDstrcmp(envval, "split")) {
    /* Create the file */
    h5_fixname(FILENAME[0], fapl, name, sizeof name);
    file1 = create_file(name, fapl);
    /* Flush and exit without closing the library */
    if(H5Fflush(file1, H5F_SCOPE_GLOBAL) < 0) goto error;

    /* Create the other file which will not be flushed */
    h5_fixname(FILENAME[1], fapl, name, sizeof name);
    file2 = create_file(name, fapl);


    if(mpi_rank == 0)
        PASSED();
    fflush(stdout);
    fflush(stderr);
    } /* end if */
    else {
        SKIPPED();
        puts("    Test not compatible with current Virtual File Driver");
    } /* end else */

    /*
     * Some systems like AIX do not like files not closed when MPI_Finalize
     * is called.  So, we need to get the MPI file handles, close them by hand.
     * Then the _exit is still needed to stop at_exit from happening in some systems.
     * Note that MPIO VFD returns the address of the file-handle in the VFD struct
     * because MPI_File_close wants to modify the file-handle variable.
     */

    /* close file1 */
    if(H5Fget_vfd_handle(file1, fapl, (void **)&mpifh_p) < 0) {
    printf("H5Fget_vfd_handle for file1 failed\n");
    goto error;
    } /* end if */
    if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
    printf("MPI_File_close for file1 failed\n");
    goto error;
    } /* end if */
    /* close file2 */
    if(H5Fget_vfd_handle(file2, fapl, (void **)&mpifh_p) < 0) {
    printf("H5Fget_vfd_handle for file2 failed\n");
    goto error;
    } /* end if */
    if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
    printf("MPI_File_close for file2 failed\n");
    goto error;
    } /* end if */

    fflush(stdout);
    fflush(stderr);
    HD_exit(0);

error:
    fflush(stdout);
    fflush(stderr);
    HD_exit(1);
}
/*-------------------------------------------------------------------------
 * Function:    test_fapl_mpio_dup
 *
 * Purpose:     Test if fapl_mpio property list keeps a duplicate of the
 * 		communicator and INFO objects given when set; and returns
 * 		duplicates of its components when H5Pget_fapl_mpio is called.
 *
 * Return:      Success:        None
 *
 *              Failure:        Abort
 *
 * Programmer:  Albert Cheng
 *              January 9, 2003
 *
 * Modifications:
 *-------------------------------------------------------------------------
 */
void
test_fapl_mpio_dup(void)
{
    int mpi_size, mpi_rank;
    MPI_Comm comm, comm_tmp;
    int mpi_size_old, mpi_rank_old;
    int mpi_size_tmp, mpi_rank_tmp;
    MPI_Info info = MPI_INFO_NULL;
    MPI_Info info_tmp = MPI_INFO_NULL;
    int mrc;			/* MPI return value */
    hid_t acc_pl;		/* File access properties */
    herr_t ret;			/* hdf5 return value */
    int nkeys, nkeys_tmp;

    if (VERBOSE_MED)
	printf("Verify fapl_mpio duplicates communicator and INFO objects\n");

    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
    if (VERBOSE_MED)
	printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);

    /* Create a new communicator that has the same processes as MPI_COMM_WORLD.
     * Use MPI_Comm_split because it is simplier than MPI_Comm_create
     */
    mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split");
    MPI_Comm_size(comm,&mpi_size_old);
    MPI_Comm_rank(comm,&mpi_rank_old);
    if (VERBOSE_MED)
	printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);

    /* create a new INFO object with some trivial information. */
    mrc = MPI_Info_create(&info);
    VRFY((mrc==MPI_SUCCESS), "MPI_Info_create");
    mrc = MPI_Info_set(info, "hdf_info_name", "XYZ");
    VRFY((mrc==MPI_SUCCESS), "MPI_Info_set");
    if (MPI_INFO_NULL != info){
	mrc=MPI_Info_get_nkeys(info, &nkeys);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info);

    acc_pl = H5Pcreate (H5P_FILE_ACCESS);
    VRFY((acc_pl >= 0), "H5P_FILE_ACCESS");

    ret = H5Pset_fapl_mpio(acc_pl, comm, info);
    VRFY((ret >= 0), "");

    /* Case 1:
     * Free the created communicator and INFO object.
     * Check if the access property list is still valid and can return
     * valid communicator and INFO object.
     */
    mrc = MPI_Comm_free(&comm);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
    if (MPI_INFO_NULL!=info){
	mrc = MPI_Info_free(&info);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }

    ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
    VRFY((ret >= 0), "H5Pget_fapl_mpio");
    MPI_Comm_size(comm_tmp,&mpi_size_tmp);
    MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
    if (VERBOSE_MED)
	printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
	mpi_rank_tmp, mpi_size_tmp);
    VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
    VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
    if (MPI_INFO_NULL != info_tmp){
	mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
	VRFY((nkeys_tmp==nkeys), "new and old nkeys equal");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info_tmp);

    /* Case 2:
     * Free the retrieved communicator and INFO object.
     * Check if the access property list is still valid and can return
     * valid communicator and INFO object.
     * Also verify the NULL argument option.
     */
    mrc = MPI_Comm_free(&comm_tmp);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
    if (MPI_INFO_NULL!=info_tmp){
	mrc = MPI_Info_free(&info_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }

    /* check NULL argument options. */
    ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL);
    VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only");
    mrc = MPI_Comm_free(&comm_tmp);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");

    ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp);
    VRFY((ret >= 0), "H5Pget_fapl_mpio Info only");
    if (MPI_INFO_NULL!=info_tmp){
	mrc = MPI_Info_free(&info_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }

    ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL);
    VRFY((ret >= 0), "H5Pget_fapl_mpio neither");

    /* now get both and check validity too. */
    /* Donot free the returned objects which are used in the next case. */
    ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
    VRFY((ret >= 0), "H5Pget_fapl_mpio");
    MPI_Comm_size(comm_tmp,&mpi_size_tmp);
    MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
    if (VERBOSE_MED)
	printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
	mpi_rank_tmp, mpi_size_tmp);
    VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
    VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
    if (MPI_INFO_NULL != info_tmp){
	mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
	VRFY((nkeys_tmp==nkeys), "new and old nkeys equal");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info_tmp);

    /* Case 3:
     * Close the property list and verify the retrieved communicator and INFO
     * object are still valid.
     */
    H5Pclose(acc_pl);
    MPI_Comm_size(comm_tmp,&mpi_size_tmp);
    MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
    if (VERBOSE_MED)
	printf("After Property list closed: rank/size of comm are %d/%d\n",
	mpi_rank_tmp, mpi_size_tmp);
    if (MPI_INFO_NULL != info_tmp){
	mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info_tmp);

    /* clean up */
    mrc = MPI_Comm_free(&comm_tmp);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
    if (MPI_INFO_NULL!=info_tmp){
	mrc = MPI_Info_free(&info_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }
}
Пример #30
0
hid_t seissol::checkpoint::h5::Wavefield::initFile(int odd, const char* filename)
{
	hid_t h5file;

	if (loaded()) {
		// Open the old file
		h5file = open(filename, false);
		checkH5Err(h5file);

		// Time
		m_h5time[odd] = H5Aopen(h5file, "time", H5P_DEFAULT);
		checkH5Err(m_h5time[odd]);

		// Wavefield writer
		m_h5timestepWavefield[odd] = H5Aopen(h5file, "timestep_wavefield", H5P_DEFAULT);
		checkH5Err(m_h5timestepWavefield[odd]);

		// Data
		m_h5data[odd] = H5Dopen(h5file, "values", H5P_DEFAULT);
		checkH5Err(m_h5data[odd]);
	} else {
		// Create the file
		hid_t h5plist = H5Pcreate(H5P_FILE_ACCESS);
		checkH5Err(h5plist);
		checkH5Err(H5Pset_libver_bounds(h5plist, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST));
		hsize_t align = utils::Env::get<hsize_t>("SEISSOL_CHECKPOINT_ALIGNMENT", 0);
		if (align > 0)
			checkH5Err(H5Pset_alignment(h5plist, 1, align));
#ifdef USE_MPI
		MPIInfo info;
		checkH5Err(H5Pset_fapl_mpio(h5plist, seissol::MPI::mpi.comm(), info.get()));
#endif // USE_MPI

		h5file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, h5plist);
		checkH5Err(h5file);
		checkH5Err(H5Pclose(h5plist));

		// Create scalar dataspace for attributes
		hid_t h5spaceScalar = H5Screate(H5S_SCALAR);
		checkH5Err(h5spaceScalar);

		// Time
		m_h5time[odd] = H5Acreate(h5file, "time", H5T_IEEE_F64LE, h5spaceScalar,
				H5P_DEFAULT, H5P_DEFAULT);
		checkH5Err(m_h5time[odd]);

		// Partitions
		hid_t h5partitions = H5Acreate(h5file, "partitions", H5T_STD_I32LE, h5spaceScalar,
				H5P_DEFAULT, H5P_DEFAULT);
		checkH5Err(h5partitions);
		int p = partitions();
		checkH5Err(H5Awrite(h5partitions, H5T_NATIVE_INT, &p));
		checkH5Err(H5Aclose(h5partitions));

		// Wavefield writer
		m_h5timestepWavefield[odd] = H5Acreate(h5file, "timestep_wavefield",
				H5T_STD_I32LE, h5spaceScalar, H5P_DEFAULT, H5P_DEFAULT);
		checkH5Err(m_h5timestepWavefield[odd]);
		int t = 0;
		checkH5Err(H5Awrite(m_h5timestepWavefield[odd], H5T_NATIVE_INT, &t));

		checkH5Err(H5Sclose(h5spaceScalar));

		// Variable
		h5plist = H5Pcreate(H5P_DATASET_CREATE);
		checkH5Err(h5plist);
		checkH5Err(H5Pset_layout(h5plist, H5D_CONTIGUOUS));
		checkH5Err(H5Pset_alloc_time(h5plist, H5D_ALLOC_TIME_EARLY));
		m_h5data[odd] = H5Dcreate(h5file, "values", H5T_IEEE_F64LE, m_h5fSpaceData,
				H5P_DEFAULT, h5plist, H5P_DEFAULT);
		checkH5Err(m_h5data[odd]);
		checkH5Err(H5Pclose(h5plist));
	}

	return h5file;
}