Example #1
0
void OHDF5mpipp::registerHDF5DataSet(HDF5DataSet& dataset, char* name)
{
  //hsize_t dimsext[2] = {1,1}; 
  //dataset.memspace = H5Screate_simple (RANK, dimsext, NULL);
  
  int chunk_size = buf_size/dataset.sizeof_entry;
  
  std::cout << "chunk_size=" << chunk_size << std::endl;
  std::cout << "dataset.all_window_size=" << dataset.all_window_size << std::endl;
  
  hsize_t maxdims[2]={H5S_UNLIMITED,1};
  hsize_t dims[2]={dataset.all_window_size, 1};
  hsize_t chunk_dims[2]={5*chunk_size,1};			//numberOfValues is to small
  /* Create the data space with unlimited dimensions. */
  
  dataset.plist_id = H5Pcreate(H5P_DATASET_XFER);
  if (logger_type==nestio::Standard || logger_type==nestio::Buffered)
    H5Pset_dxpl_mpio(dataset.plist_id, H5FD_MPIO_INDEPENDENT);
  else
    H5Pset_dxpl_mpio(dataset.plist_id, H5FD_MPIO_COLLECTIVE);
  
  //hid_t filespace=H5Screate_simple (RANK, dims, maxdims);
  dataset.filespace=H5Screate_simple (RANK, dims, maxdims);
  
  /* Modify dataset creation properties, i.e. enable chunking  */
  
  hid_t prop=H5Pcreate (H5P_DATASET_CREATE);
  status = H5Pset_chunk (prop, RANK, chunk_dims);
  /*
     * Create the compound datatype for the file.  Because the standard
     * types we are using for the file may have different sizes than
     * the corresponding native types, we must manually calculate the
     * offset of each member.
     */

  hid_t filetype = H5Tcreate (H5T_COMPOUND, 3*8+dataset.max_numberOfValues*8);
  status = H5Tinsert (filetype, "id", 0, H5T_STD_I64BE);
  status = H5Tinsert (filetype, "neuron id", 8, H5T_STD_I64BE);
  status = H5Tinsert (filetype, "timestamp", 16, H5T_STD_I64BE);
  for (int i=0; i<dataset.max_numberOfValues; i++) {
    std::stringstream ss;
    ss << "V" << i;
    status = H5Tinsert (filetype, ss.str().c_str(), 24+i*8, H5T_IEEE_F64BE); //third argument: offset
  }

  /* Create a new dataset within the file using chunk 
      creation properties.  */
  
  std::cout << "H5Dcreate2 name=" << name << " max_numberOfValues=" << dataset.max_numberOfValues << std::endl;

  dataset.dset_id=H5Dcreate2 (file, name, filetype, dataset.filespace,
	    H5P_DEFAULT, prop, H5P_DEFAULT);
  
  status = H5Pclose(prop);
  status = H5Tclose(filetype);
  //status = H5Sclose (filespace);
}
        DCParallelDataSet(const std::string name) :
        DCDataSet(name)
        {
            dsetWriteProperties = H5Pcreate(H5P_DATASET_XFER);
            H5Pset_dxpl_mpio(dsetWriteProperties, H5FD_MPIO_COLLECTIVE);

            dsetReadProperties = H5Pcreate(H5P_DATASET_XFER);
            H5Pset_dxpl_mpio(dsetReadProperties, H5FD_MPIO_COLLECTIVE);

            checkExistence = false;
        }
/****if* H5FDmpiof/h5pset_dxpl_mpio_c
 * NAME
 *        h5pset_dxpl_mpio_c
 * PURPOSE
 *     Call H5Pset_dxpl_mpio to set transfer mode of the dataset
 *              trasfer property list
 * INPUTS
 *      prp_id - property list identifier
 *              data_xfer_mode - transfer mode
 * RETURNS
 *     0 on success, -1 on failure
 * AUTHOR
 *  Elena Pourmal
 *              Thursday, October 26, 2000
 * HISTORY
 *
 * SOURCE
*/
int_f
nh5pset_dxpl_mpio_c(hid_t_f *prp_id, int_f* data_xfer_mode)
/******/
{
     int ret_value = -1;
     hid_t c_prp_id;
     herr_t ret;
     H5FD_mpio_xfer_t c_data_xfer_mode;
/*
     switch (*data_xfer_mode) {

        case H5FD_MPIO_INDEPENDENT_F:
             c_data_xfer_mode = H5FD_MPIO_INDEPENDENT;
             break;

        case H5FD_MPIO_COLLECTIVE_F:
             c_data_xfer_mode = H5FD_MPIO_COLLECTIVE;
             break;
        default:
          return ret_value;
      }
*/
     c_data_xfer_mode = (H5FD_mpio_xfer_t)*data_xfer_mode;
     /*
      * Call H5Pset_dxpl_mpio function.
      */
     c_prp_id = *prp_id;
     ret = H5Pset_dxpl_mpio(c_prp_id, c_data_xfer_mode);
     if (ret < 0) return ret_value;
     ret_value = 0;
     return ret_value;
}
Example #4
0
  void hdf_archive::set_access_plist(bool use_collective, Communicate* comm)
  {
    access_id=H5P_DEFAULT;
    if(comm && comm->size()>1) //for parallel communicator
    {
      if(use_collective)
      {
#if defined(H5_HAVE_PARALLEL) && defined(ENABLE_PHDF5)
        MPI_Info info=MPI_INFO_NULL;
        access_id = H5Pcreate(H5P_FILE_ACCESS);
        H5Pset_fapl_mpio(access_id,comm->getMPI(),info);
        xfer_plist = H5Pcreate(H5P_DATASET_XFER);
        H5Pset_dxpl_mpio(xfer_plist,H5FD_MPIO_COLLECTIVE);
#else
        use_collective=false;//cannot use collective
#endif
      }
      //true, if this task does not need to participate in I/O
      Mode.set(IS_PARALLEL,use_collective);
      Mode.set(NOIO,comm->rank()&&!use_collective);
    }
    else
    {
      Mode.set(IS_PARALLEL,false);
      Mode.set(NOIO,false);
    }
  }
Example #5
0
static void flash_tune_plist(hid_t * dxfer_template)
{
   int ierr;
   *dxfer_template = H5Pcreate(H5P_DATASET_XFER);
  /* ---------------------------------------------------------------------
      platform dependent code goes here -- the access template must be
      tuned for a particular filesystem blocksize.  some of these 
      numbers are guesses / experiments, others come from the file system
      documentation.

      The sieve_buf_size should be equal a multiple of the disk block size
     ---------------------------------------------------------------------- */

  /* H5Pset_preserve now obsolete, does nothing */
#ifdef IBM
  ierr = H5Pset_dxpl_mpio(*dxfer_template, H5FD_MPIO_INDEPENDENT);
  ierr = H5Pset_btree_ratios(*dxfer_template, 0.334, 0.333, 0.333); 

  /* for best performance, set this to 0 */
  ierr = H5Pset_preserve(*dxfer_template, 0u);  
#endif

#ifdef TFLOPS
  ierr = H5Pset_dxpl_mpio(*dxfer_template, H5FD_MPIO_COLLECTIVE);
  ierr = H5Pset_preserve(*dxfer_template, 0u);  
#endif

#ifdef CHIBA
  ierr = H5Pset_dxpl_mpio(*dxfer_template, H5FD_MPIO_INDEPENDENT);
  ierr = H5Pset_preserve(*dxfer_template, 0u);  
#endif

#ifdef BGL
  ierr = H5Pset_dxpl_mpio(*dxfer_template, H5FD_MPIO_COLLECTIVE);
  /* TODO: need tuning options for BGL */
#endif

#ifdef SGI
  ierr = H5Pset_dxpl_mpio(*dxfer_template, H5FD_MPIO_INDEPENDENT);
  ierr = H5Pset_preserve(*dxfer_template, 0u);  
#endif

  /* ----------------------------------------------------------------------
      end of platform dependent data-transfer property list settings
     ---------------------------------------------------------------------- */
}
Example #6
0
File: io.c Project: darien0/cow
void cow_domain_setcollective(cow_domain *d, int mode)
{
#if (COW_HDF5 && COW_HDF5_MPI)
  if (mode && !cow_mpirunning()) {
    printf("[%s] requested collective without MPI running: "
	   "revert to independent\n", MODULE);
    mode = 0;
  }
  if (mode) {
    printf("[%s] setting HDF5 io mode to collective\n", MODULE);
    H5Pset_dxpl_mpio(d->dxpl, H5FD_MPIO_COLLECTIVE);
  }
  else {
    printf("[%s] setting HDF5 io mode to independent\n", MODULE);
    H5Pset_dxpl_mpio(d->dxpl, H5FD_MPIO_INDEPENDENT);
  }
#endif
}
Example #7
0
void BigArray<T>::loadNC(const std::string &fileName)
{
    dataFileName = fileName;

    connection.disconnect();
    connection = releaseSignal().connect(boost::bind(&gurls::BigArray<T>::close, this));

    std::string errorString = "Error opening file " + fileName + ":";


    // Set up file access property list with parallel I/O access
    plist_id = H5Pcreate(H5P_FILE_ACCESS);
    if(plist_id == -1)
        throw gException(errorString);

    herr_t status;

#ifdef USE_MPIIO
    status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
    status = H5Pset_fapl_mpiposix(plist_id, MPI_COMM_WORLD, false);
#endif
    CHECK_HDF5_ERR(status, errorString)

    // Create a new file collectively and release property list identifier.
    file_id = H5Fopen(fileName.c_str(), H5F_ACC_RDWR, plist_id);
    CHECK_HDF5_ERR(file_id, errorString)

    status = H5Pclose(plist_id);
    CHECK_HDF5_ERR(status, errorString)

    dset_id =  H5Dopen(file_id, "mat", H5P_DEFAULT);
    CHECK_HDF5_ERR(dset_id, errorString)

    hid_t filespace = H5Dget_space( dset_id );
    CHECK_HDF5_ERR(filespace, errorString)

    hsize_t dims[2], maxDims[2];
    status = H5Sget_simple_extent_dims(filespace, dims, maxDims);
    CHECK_HDF5_ERR(status, errorString)

    status = H5Sclose(filespace);
    CHECK_HDF5_ERR(status, errorString)

    this->numrows = static_cast<unsigned long>(dims[1]);
    this->numcols = static_cast<unsigned long>(dims[0]);

    // Create property list for collective dataset write.
    plist_id = H5Pcreate(H5P_DATASET_XFER);
    if(plist_id == -1)
        throw gException(errorString);

    status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
    CHECK_HDF5_ERR(status, errorString)

}
Example #8
0
void pHdf5IoDataModel::fileOpen(const QString &file_name, FileMode mode)
{
    //if file is already open tell it to the user and return
    if(!d->file_is_open) {

        //if we didn't set the communicator error
        if(d->comm==nullptr) {
            dtkError() << __func__ << "communicator not set";
        }

        // H5P_FILE_ACCESS applies to H5Fcreate and H5Fopen
        d->prop_list_id = H5Pcreate(H5P_FILE_ACCESS);

        MPI_Info info = MPI_INFO_NULL;
        MPI_Comm comm = *static_cast<MPI_Comm *>(d->comm->data());
        H5Pset_fapl_mpio(d->prop_list_id, comm, info);

        switch (mode) {
        case dtkIoDataModel::Trunc:
            d->file_id = H5Fcreate (file_name.toUtf8().constData(), H5F_ACC_TRUNC,
                                   H5P_DEFAULT, d->prop_list_id);

            break;
        case dtkIoDataModel::NotExisting:
            d->file_id = H5Fcreate(file_name.toUtf8().constData(), H5F_ACC_EXCL,
                                   H5P_DEFAULT, d->prop_list_id);
            break;
        case dtkIoDataModel::ReadOnly:
            d->file_id = H5Fopen(file_name.toUtf8().constData(), H5F_ACC_RDONLY,
                                 d->prop_list_id);
            break;
        case dtkIoDataModel::ReadWrite:
            d->file_id = H5Fopen(file_name.toUtf8().constData(), H5F_ACC_RDWR,
                                 d->prop_list_id);
            break;
        default:
            dtkError() << "unsupported fileMode";
        };

        //close the property list for file
        H5Pclose(d->prop_list_id);
        if(d->file_id<0) {
            dtkError() << "error in fileOpen for file_name " << file_name;
        }
        else {
            //if the file is correctly open, create a propery list to collectively write datasets
            d->file_is_open = true;
            d->prop_list_id = H5Pcreate(H5P_DATASET_XFER);
            H5Pset_dxpl_mpio(d->prop_list_id, H5FD_MPIO_COLLECTIVE);
        }
    }
    else {
        qDebug() << "File" << file_name << "is already open, please close it before opening a new one";
    }
}
Example #9
0
void LifeV::HDF5IO::openTable (const std::string& tableName,
                               hsize_t tableDimensions[])
{
    tableHandle& currentTable = M_tableList[tableName];

#ifdef H5_USE_16_API
    currentTable.dataset = H5Dopen (M_fileId, tableName.c_str());
#else
    currentTable.dataset = H5Dopen (M_fileId, tableName.c_str(), H5P_DEFAULT);
#endif
    currentTable.filespace = H5Dget_space (currentTable.dataset);
    H5Sget_simple_extent_dims (currentTable.filespace, tableDimensions, NULL);
    currentTable.plist = H5Pcreate (H5P_DATASET_XFER);
    H5Pset_dxpl_mpio (currentTable.plist, H5FD_MPIO_COLLECTIVE);
}
Example #10
0
/*-------------------------------------------------------------------------
 * Function:    create_file
 *
 * Purpose:    Creates file used in part 1 of the test
 *
 * Return:    Success:    0
 *
 *        Failure:    1
 *
 * Programmer:    Leon Arber
 *              Sept. 26, 2006
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
static hid_t
create_file(char* name, hid_t fapl)
{
    hid_t    file, dcpl, space, dset, groups, grp, plist;
    hsize_t    ds_size[2] = {100, 100};
    hsize_t    ch_size[2] = {5, 5};
    hsize_t    i, j;



    if((file=H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) goto error;

    /* Create a chunked dataset */
    if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
    if(H5Pset_chunk(dcpl, 2, ch_size) < 0) goto error;
    if((space = H5Screate_simple(2, ds_size, NULL)) < 0) goto error;
    if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_FLOAT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
    goto error;

    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);


    /* Write some data */
    for(i = 0; i < ds_size[0]; i++) {
    /*
    * The extra cast in the following statement is a bug workaround
    * for the Win32 version 5.0 compiler.
    * 1998-11-06 ptl
    */
    for(j = 0; j < ds_size[1]; j++)
        the_data[i][j] = (double)(hssize_t)i/(hssize_t)(j+1);
    }
    if(H5Dwrite(dset, H5T_NATIVE_DOUBLE, space, space, plist, the_data) < 0) goto error;

    /* Create some groups */
    if((groups = H5Gcreate2(file, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
    for(i = 0; i < 100; i++) {
    sprintf(name, "grp%02u", (unsigned)i);
    if((grp = H5Gcreate2(groups, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
    if(H5Gclose(grp) < 0) goto error;
    }

    return file;

error:
    HD_exit(1);
}
Example #11
0
op_set op_decl_set_hdf5(char const *file, char const *name)
{
  //create new communicator
  int my_rank, comm_size;
  MPI_Comm_dup(MPI_COMM_WORLD, &OP_MPI_HDF5_WORLD);
  MPI_Comm_rank(OP_MPI_HDF5_WORLD, &my_rank);
  MPI_Comm_size(OP_MPI_HDF5_WORLD, &comm_size);

  //MPI variables
  MPI_Info info  = MPI_INFO_NULL;

  //HDF5 APIs definitions
  hid_t       file_id; //file identifier
  hid_t plist_id;  //property list identifier
  hid_t dset_id; //dataset identifier

  //Set up file access property list with parallel I/O access
  plist_id = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id, OP_MPI_HDF5_WORLD, info);

  file_id = H5Fopen(file, H5F_ACC_RDONLY, plist_id );
  H5Pclose(plist_id);

  //Create the dataset with default properties and close dataspace.
  dset_id = H5Dopen(file_id, name, H5P_DEFAULT);

  //Create property list for collective dataset write.
  plist_id = H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);

  int g_size = 0;
  //read data
  H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, plist_id, &g_size);

  H5Pclose(plist_id);
  H5Dclose(dset_id);

  H5Fclose(file_id);

  //calculate local size of set for this mpi process
  int l_size = compute_local_size (g_size, comm_size, my_rank);
  MPI_Comm_free(&OP_MPI_HDF5_WORLD);

  return op_decl_set(l_size,  name);
}
Example #12
0
void LifeV::HDF5IO::createTable (const std::string& tableName,
                                 hid_t& fileDataType,
                                 hsize_t tableDimensions[])
{
    tableHandle& currentTable = M_tableList[tableName];

    currentTable.filespace = H5Screate_simple (2, tableDimensions,
                                               tableDimensions);
#ifdef H5_USE_16_API
    currentTable.dataset = H5Dcreate (M_fileId, tableName.c_str(), fileDataType,
                                      currentTable.filespace, H5P_DEFAULT);
#else
    currentTable.dataset = H5Dcreate (M_fileId, tableName.c_str(), fileDataType,
                                      currentTable.filespace, H5P_DEFAULT,
                                      H5P_DEFAULT, H5P_DEFAULT);
#endif
    currentTable.plist = H5Pcreate (H5P_DATASET_XFER);
    H5Pset_dxpl_mpio (currentTable.plist, H5FD_MPIO_COLLECTIVE);
}
Example #13
0
void saveParticleComp_Int(int *data,char *fileName,char *dataName,int totalCnt,int cnt,int offSet)
{
  int i,j,k;
  int myrank, nTasks;
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm_size(MPI_COMM_WORLD, &nTasks);

  hid_t file_id,dset_id,plist_id,tic_id;
  herr_t status;
  hid_t total_file_space,subfilespace,filespace,memspace,ticspace;
  hsize_t dimsf[1],count[1],offset[1];

  plist_id=H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id,MPI_COMM_WORLD,MPI_INFO_NULL);
//    H5Pset_fclose_degree(plist_id,H5F_CLOSE_SEMI);
//    MPI_Barrier(MPI_COMM_WORLD);

  file_id=H5Fopen(fileName,H5F_ACC_RDWR,plist_id);
  H5Pclose(plist_id);

  dimsf[0]=totalCnt;
  filespace=H5Screate_simple(1,dimsf,NULL);

  count[0]=cnt;
  offset[0]=offSet;
  memspace=H5Screate_simple(1,count,NULL);

  dset_id=H5Dcreate2(file_id,dataName,H5T_NATIVE_INT,filespace,H5P_DEFAULT,H5P_DEFAULT,H5P_DEFAULT);
  subfilespace=H5Dget_space(dset_id);
  H5Sselect_hyperslab(subfilespace,H5S_SELECT_SET,offset,NULL,count,NULL);
  plist_id=H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(plist_id,H5FD_MPIO_INDEPENDENT);
  status = H5Dwrite(dset_id, H5T_NATIVE_INT,memspace,subfilespace,plist_id,data);
  H5Pclose(plist_id);
  H5Sclose(subfilespace);
  H5Dclose(dset_id);

  H5Sclose(memspace);
  H5Sclose(filespace);
  H5Fclose(file_id);
}
Example #14
0
void restoreData(float *data,char *fileName,char *dataName,int totalCnt,int cnt,int *cntOffSet)
{
  hid_t file_id,dset_id,plist_id,group_id;
  herr_t status;
  hid_t subfilespace,filespace,memspace;
  hsize_t dimsf[1],count[1],offset[1];

  int myrank, nTasks;    
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm_size(MPI_COMM_WORLD, &nTasks);

  plist_id=H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id,MPI_COMM_WORLD,MPI_INFO_NULL);
//  H5Pset_fclose_degree(plist_id,H5F_CLOSE_SEMI);
//  MPI_Barrier(MPI_COMM_WORLD);

  file_id=H5Fopen(fileName,H5F_ACC_RDWR,plist_id);
  H5Pclose(plist_id);
  dimsf[0]=totalCnt;     
  filespace=H5Screate_simple(1,dimsf,NULL);

  count[0]=cnt;
  offset[0]=cntOffSet[myrank];
  memspace=H5Screate_simple(1,count,NULL);

  dset_id=H5Dopen2(file_id,dataName,H5P_DEFAULT);
  subfilespace=H5Dget_space(dset_id);
  H5Sselect_hyperslab(subfilespace,H5S_SELECT_SET,offset,NULL,count,NULL);
  plist_id=H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(plist_id,H5FD_MPIO_COLLECTIVE);
  status = H5Dread(dset_id, H5T_NATIVE_FLOAT,memspace,subfilespace,plist_id,data);
  H5Pclose(plist_id);
  H5Sclose(subfilespace);
  H5Dclose(dset_id);

  H5Sclose(memspace);
  H5Sclose(filespace);
  H5Fclose(file_id);
}
Example #15
0
int PHDF5fileClass::WritePHDF5dataset(string grpname, string datasetname, double ***data, int nx, int ny, int nz){

  /* -------------------------- */
  /* Local variables and arrays */
  /* -------------------------- */

  string dname;
  double *buffer;

  hid_t const h5type = H5T_NATIVE_DOUBLE;

  hid_t glob_dspace;
  hid_t locl_dspace;
  hid_t dataset_prop;
  hid_t dataset;
  hid_t dataspace;
  hid_t dataset_xfer;

  /* --------------------------------- */
  /* Check that dimensions are correct */
  /* --------------------------------- */

  if (bparticles && grpname.c_str()=="Particles"){
    cout << " WARNING(phdf5): Particle data is not going to be written as the 'bparticles' flag is currently turn to FALSE" << endl;
    return (2);
  }

  for (int i=0; i<ndim; i++){
    if (dim[i]%chdim[i]!=0){
      cout << " ERROR(phdf5): Grid size is not a multiple of the chunk size in the " << i << " dimension." << endl;
      cout << "         Glob: " << dim[0] << " " << dim[1] << " " << dim[2] << endl;
      cout << "         Locl: " << chdim[0] << " " << chdim[1] << " " << chdim[2] << endl;
      return 1;
    }
  }

  /* ----------------------- */
  /* Copy raw data to buffer */
  /* ----------------------- */

  if (nx!=chdim[0] || ny!=chdim[1] || nz!=chdim[2]){
    cout << " ERROR(phdf5): data size is not equal to HDF5 chunk size " << endl;
    return 1;
  }

  buffer = new double[nx*ny*nz];
  int l = 0;
  for (int i = 0; i < nx; i++)
    for (int j = 0; j < ny; j++)
      for (int k = 0; k < nz; k++)
        buffer[l++] = data[i][j][k];

  /* -------------------------------------------------------- */
  /* 5- Set the stride, count and block values for each chunk */
  /*    And set the offset for each chunk                     */
  /* -------------------------------------------------------- */

  hsize_t *stride = new hsize_t[ndim];
  hsize_t *count  = new hsize_t[ndim];
  hsize_t *block  = new hsize_t[ndim];
  hsize_t *offset = new hsize_t[ndim];

  for (int i=0; i<ndim; i++){
    stride[i] = 1;
    count[i]  = 1;
    block[i]  = chdim[i];
    offset[i] = mpicoord[i]*chdim[i];
  }

  /* ---------------------------------- */
  /* 6- Create data spaces for our data */
  /* ---------------------------------- */

  glob_dspace = H5Screate_simple(ndim, dim,   NULL);
  locl_dspace = H5Screate_simple(ndim, chdim, NULL);

  /* --------------------------------------- */
  /* 7- Create the dataset for the HDF5 file */
  /* --------------------------------------- */

  dataset_prop = H5Pcreate(H5P_DATASET_CREATE);

  H5Pset_chunk(dataset_prop, ndim, chdim);

  dname   = "/"+grpname+"/"+datasetname;
  dataset = H5Dcreate2(file_id, dname.c_str(), h5type, glob_dspace, H5P_DEFAULT, dataset_prop, H5P_DEFAULT);

  H5Pclose(dataset_prop);

  dataspace = H5Dget_space(dataset);
  H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);

  /* --------------------------------- */
  /* 8- Set the parallel transfer mode */
  /* --------------------------------- */

  dataset_xfer = H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(dataset_xfer, H5FD_MPIO_COLLECTIVE);

  /* ---------------------------- */
  /* 9- Write data to the dataset */
  /* ---------------------------- */

  H5Dwrite(dataset, h5type, locl_dspace, dataspace, dataset_xfer, buffer);

  delete [] buffer;

  /* ------------------------------------------------------ */
  /* Close dataset related variables created with H5*create */
  /* ------------------------------------------------------ */

  H5Pclose(dataset_xfer);
  H5Dclose(dataset);
  H5Sclose(locl_dspace);

  /* ---------------------------------------------------- */
  /* Close the remaining variables created with H5*create */
  /* ---------------------------------------------------- */

  delete [] stride;
  delete [] count;
  delete [] block;
  delete [] offset;

}
Example #16
0
int
main (int argc, char **argv)
{
    /*
     * HDF5 APIs definitions
     */ 	
    hid_t       file_id, dset_id;         /* file and dataset identifiers */
    hid_t       filespace, memspace;      /* file and memory dataspace identifiers */
    hsize_t     dimsf[2];                 /* dataset dimensions */
    int         *data;                    /* pointer to data buffer to write */
    hsize_t	count[2];	          /* hyperslab selection parameters */
    hsize_t	offset[2];
    hid_t	plist_id;                 /* property list identifier */
    int         i;
    herr_t	status;

    /*
     * MPI variables
     */
    int mpi_size, mpi_rank;
    MPI_Comm comm  = MPI_COMM_WORLD;
    MPI_Info info  = MPI_INFO_NULL;

    /*
     * Initialize MPI
     */
    MPI_Init(&argc, &argv);
    MPI_Comm_size(comm, &mpi_size);
    MPI_Comm_rank(comm, &mpi_rank);  
 
    /* 
     * Set up file access property list with parallel I/O access
     */
     plist_id = H5Pcreate(H5P_FILE_ACCESS);
     H5Pset_fapl_mpio(plist_id, comm, info);

    /*
     * Create a new file collectively and release property list identifier.
     */
    file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
    H5Pclose(plist_id);
   

    /*
     * Create the dataspace for the dataset.
     */
    dimsf[0] = NX;
    dimsf[1] = NY;
    filespace = H5Screate_simple(RANK, dimsf, NULL); 

    /*
     * Create the dataset with default properties and close filespace.
     */
    dset_id = H5Dcreate(file_id, DATASETNAME, H5T_NATIVE_INT, filespace,
			H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    H5Sclose(filespace);

    /* 
     * Each process defines dataset in memory and writes it to the hyperslab
     * in the file.
     */
    count[0] = dimsf[0]/mpi_size;
    count[1] = dimsf[1];
    offset[0] = mpi_rank * count[0];
    offset[1] = 0;
    memspace = H5Screate_simple(RANK, count, NULL);

    /*
     * Select hyperslab in the file.
     */
    filespace = H5Dget_space(dset_id);
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);

    /*
     * Initialize data buffer 
     */
    data = (int *) malloc(sizeof(int)*count[0]*count[1]);
    for (i=0; i < count[0]*count[1]; i++) {
        data[i] = mpi_rank + 10;
    }

    /*
     * Create property list for collective dataset write.
     */
    plist_id = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
    
    status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace,
		      plist_id, data);
    free(data);

    /*
     * Close/release resources.
     */
    H5Dclose(dset_id);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Pclose(plist_id);
    H5Fclose(file_id);
 
    MPI_Finalize();

    return 0;
}     
Example #17
0
/*
 * Open a file through the HDF5 interface.
 */
static void *HDF5_Open(char *testFileName, IOR_param_t * param)
{
        hid_t accessPropList, createPropList;
        hsize_t memStart[NUM_DIMS],
            dataSetDims[NUM_DIMS],
            memStride[NUM_DIMS],
            memCount[NUM_DIMS], memBlock[NUM_DIMS], memDataSpaceDims[NUM_DIMS];
        int tasksPerDataSet;
        unsigned fd_mode = (unsigned)0;
        hid_t *fd;
        MPI_Comm comm;
        MPI_Info mpiHints = MPI_INFO_NULL;

        fd = (hid_t *) malloc(sizeof(hid_t));
        if (fd == NULL)
                ERR("malloc() failed");
        /*
         * HDF5 uses different flags than those for POSIX/MPIIO
         */
        if (param->open == WRITE) {     /* WRITE flags */
                param->openFlags = IOR_TRUNC;
        } else {                /* READ or check WRITE/READ flags */
                param->openFlags = IOR_RDONLY;
        }

        /* set IOR file flags to HDF5 flags */
        /* -- file open flags -- */
        if (param->openFlags & IOR_RDONLY) {
                fd_mode |= H5F_ACC_RDONLY;
        }
        if (param->openFlags & IOR_WRONLY) {
                fprintf(stdout, "File write only not implemented in HDF5\n");
        }
        if (param->openFlags & IOR_RDWR) {
                fd_mode |= H5F_ACC_RDWR;
        }
        if (param->openFlags & IOR_APPEND) {
                fprintf(stdout, "File append not implemented in HDF5\n");
        }
        if (param->openFlags & IOR_CREAT) {
                fd_mode |= H5F_ACC_CREAT;
        }
        if (param->openFlags & IOR_EXCL) {
                fd_mode |= H5F_ACC_EXCL;
        }
        if (param->openFlags & IOR_TRUNC) {
                fd_mode |= H5F_ACC_TRUNC;
        }
        if (param->openFlags & IOR_DIRECT) {
                fprintf(stdout, "O_DIRECT not implemented in HDF5\n");
        }

        /* set up file creation property list */
        createPropList = H5Pcreate(H5P_FILE_CREATE);
        HDF5_CHECK(createPropList, "cannot create file creation property list");
        /* set size of offset and length used to address HDF5 objects */
        HDF5_CHECK(H5Pset_sizes
                   (createPropList, sizeof(hsize_t), sizeof(hsize_t)),
                   "cannot set property list properly");

        /* set up file access property list */
        accessPropList = H5Pcreate(H5P_FILE_ACCESS);
        HDF5_CHECK(accessPropList, "cannot create file access property list");

        /*
         * someday HDF5 implementation will allow subsets of MPI_COMM_WORLD
         */
        /* store MPI communicator info for the file access property list */
        if (param->filePerProc) {
                comm = MPI_COMM_SELF;
        } else {
                comm = testComm;
        }

        SetHints(&mpiHints, param->hintsFileName);
        /*
         * note that with MP_HINTS_FILTERED=no, all key/value pairs will
         * be in the info object.  The info object that is attached to
         * the file during MPI_File_open() will only contain those pairs
         * deemed valid by the implementation.
         */
        /* show hints passed to file */
        if (rank == 0 && param->showHints) {
                fprintf(stdout, "\nhints passed to access property list {\n");
                ShowHints(&mpiHints);
                fprintf(stdout, "}\n");
        }
        HDF5_CHECK(H5Pset_fapl_mpio(accessPropList, comm, mpiHints),
                   "cannot set file access property list");

        /* set alignment */
        HDF5_CHECK(H5Pset_alignment(accessPropList, param->setAlignment,
                                    param->setAlignment),
                   "cannot set alignment");

        /* open file */
        if (param->open == WRITE) {     /* WRITE */
                *fd = H5Fcreate(testFileName, fd_mode,
                                createPropList, accessPropList);
                HDF5_CHECK(*fd, "cannot create file");
        } else {                /* READ or CHECK */
                *fd = H5Fopen(testFileName, fd_mode, accessPropList);
                HDF5_CHECK(*fd, "cannot open file");
        }

        /* show hints actually attached to file handle */
        if (param->showHints || (1) /* WEL - this needs fixing */ ) {
                if (rank == 0
                    && (param->showHints) /* WEL - this needs fixing */ ) {
                        WARN("showHints not working for HDF5");
                }
        } else {
                MPI_Info mpiHintsCheck = MPI_INFO_NULL;
                hid_t apl;
                apl = H5Fget_access_plist(*fd);
                HDF5_CHECK(H5Pget_fapl_mpio(apl, &comm, &mpiHintsCheck),
                           "cannot get info object through HDF5");
                if (rank == 0) {
                        fprintf(stdout,
                                "\nhints returned from opened file (HDF5) {\n");
                        ShowHints(&mpiHintsCheck);
                        fprintf(stdout, "}\n");
                        if (1 == 1) {   /* request the MPIIO file handle and its hints */
                                MPI_File *fd_mpiio;
                                HDF5_CHECK(H5Fget_vfd_handle
                                           (*fd, apl, (void **)&fd_mpiio),
                                           "cannot get MPIIO file handle");
                                MPI_CHECK(MPI_File_get_info
                                          (*fd_mpiio, &mpiHintsCheck),
                                          "cannot get info object through MPIIO");
                                fprintf(stdout,
                                        "\nhints returned from opened file (MPIIO) {\n");
                                ShowHints(&mpiHintsCheck);
                                fprintf(stdout, "}\n");
                        }
                }
                MPI_CHECK(MPI_Barrier(testComm), "barrier error");
        }

        /* this is necessary for resetting various parameters
           needed for reopening and checking the file */
        newlyOpenedFile = TRUE;

        HDF5_CHECK(H5Pclose(createPropList),
                   "cannot close creation property list");
        HDF5_CHECK(H5Pclose(accessPropList),
                   "cannot close access property list");

        /* create property list for serial/parallel access */
        xferPropList = H5Pcreate(H5P_DATASET_XFER);
        HDF5_CHECK(xferPropList, "cannot create transfer property list");

        /* set data transfer mode */
        if (param->collective) {
                HDF5_CHECK(H5Pset_dxpl_mpio(xferPropList, H5FD_MPIO_COLLECTIVE),
                           "cannot set collective data transfer mode");
        } else {
                HDF5_CHECK(H5Pset_dxpl_mpio
                           (xferPropList, H5FD_MPIO_INDEPENDENT),
                           "cannot set independent data transfer mode");
        }

        /* set up memory data space for transfer */
        memStart[0] = (hsize_t) 0;
        memCount[0] = (hsize_t) 1;
        memStride[0] = (hsize_t) (param->transferSize / sizeof(IOR_size_t));
        memBlock[0] = (hsize_t) (param->transferSize / sizeof(IOR_size_t));
        memDataSpaceDims[0] = (hsize_t) param->transferSize;
        memDataSpace = H5Screate_simple(NUM_DIMS, memDataSpaceDims, NULL);
        HDF5_CHECK(memDataSpace, "cannot create simple memory data space");

        /* define hyperslab for memory data space */
        HDF5_CHECK(H5Sselect_hyperslab(memDataSpace, H5S_SELECT_SET,
                                       memStart, memStride, memCount,
                                       memBlock), "cannot create hyperslab");

        /* set up parameters for fpp or different dataset count */
        if (param->filePerProc) {
                tasksPerDataSet = 1;
        } else {
                if (param->individualDataSets) {
                        /* each task in segment has single data set */
                        tasksPerDataSet = 1;
                } else {
                        /* share single data set across all tasks in segment */
                        tasksPerDataSet = param->numTasks;
                }
        }
        dataSetDims[0] = (hsize_t) ((param->blockSize / sizeof(IOR_size_t))
                                    * tasksPerDataSet);

        /* create a simple data space containing information on size
           and shape of data set, and open it for access */
        dataSpace = H5Screate_simple(NUM_DIMS, dataSetDims, NULL);
        HDF5_CHECK(dataSpace, "cannot create simple data space");

        return (fd);
}
Example #18
0
/*
     This should handle properly the cases where PetscInt is 32 or 64 and hsize_t is 32 or 64. These means properly casting with
   checks back and forth between the two types of variables.
*/
PetscErrorCode VecLoad_HDF5(Vec xin, PetscViewer viewer)
{
  hid_t          file_id, group, dset_id, filespace, memspace, plist_id;
  hsize_t        rdim, dim;
  hsize_t        dims[4], count[4], offset[4];
  herr_t         status;
  PetscInt       n, N, bs = 1, bsInd, lenInd, low, timestep;
  PetscScalar    *x;
  const char     *vecname;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  ierr = PetscViewerHDF5OpenGroup(viewer, &file_id, &group);CHKERRQ(ierr);
  ierr = PetscViewerHDF5GetTimestep(viewer, &timestep);CHKERRQ(ierr);
  ierr = VecGetBlockSize(xin,&bs);CHKERRQ(ierr);
  /* Create the dataset with default properties and close filespace */
  ierr = PetscObjectGetName((PetscObject)xin,&vecname);CHKERRQ(ierr);
#if (H5_VERS_MAJOR * 10000 + H5_VERS_MINOR * 100 + H5_VERS_RELEASE >= 10800)
  dset_id = H5Dopen2(group, vecname, H5P_DEFAULT);
#else
  dset_id = H5Dopen(group, vecname);
#endif
  if (dset_id == -1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not H5Dopen() with Vec named %s",vecname);
  /* Retrieve the dataspace for the dataset */
  filespace = H5Dget_space(dset_id);
  if (filespace == -1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not H5Dget_space()");
  dim = 0;
  if (timestep >= 0) ++dim;
  ++dim;
  if (bs >= 1) ++dim;
#if defined(PETSC_USE_COMPLEX)
  ++dim;
#endif
  rdim = H5Sget_simple_extent_dims(filespace, dims, NULL);
#if defined(PETSC_USE_COMPLEX)
  bsInd = rdim-2;
#else
  bsInd = rdim-1;
#endif
  lenInd = timestep >= 0 ? 1 : 0;
  if (rdim != dim) {
    if (rdim == dim+1 && bs == -1) bs = dims[bsInd];
    else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Dimension of array in file %d not %d as expected",rdim,dim);
  } else if (bs >= 1 && bs != (PetscInt) dims[bsInd]) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_FILE_UNEXPECTED, "Block size %d specified for vector does not match blocksize in file %d",bs,dims[bsInd]);

  /* Set Vec sizes,blocksize,and type if not already set */
  if ((xin)->map->n < 0 && (xin)->map->N < 0) {
    ierr = VecSetSizes(xin, PETSC_DECIDE, dims[lenInd]*bs);CHKERRQ(ierr);
  }
  /* If sizes and type already set,check if the vector global size is correct */
  ierr = VecGetSize(xin, &N);CHKERRQ(ierr);
  if (N/bs != (PetscInt) dims[lenInd]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Vector in file different length (%d) then input vector (%d)", (PetscInt) dims[lenInd], N/bs);

  /* Each process defines a dataset and reads it from the hyperslab in the file */
  ierr = VecGetLocalSize(xin, &n);CHKERRQ(ierr);
  dim  = 0;
  if (timestep >= 0) {
    count[dim] = 1;
    ++dim;
  }
  ierr = PetscHDF5IntCast(n/bs,count + dim);CHKERRQ(ierr);
  ++dim;
  if (bs >= 1) {
    count[dim] = bs;
    ++dim;
  }
#if defined(PETSC_USE_COMPLEX)
  count[dim] = 2;
  ++dim;
#endif
  memspace = H5Screate_simple(dim, count, NULL);
  if (memspace == -1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not H5Screate_simple()");

  /* Select hyperslab in the file */
  ierr = VecGetOwnershipRange(xin, &low, NULL);CHKERRQ(ierr);
  dim  = 0;
  if (timestep >= 0) {
    offset[dim] = timestep;
    ++dim;
  }
  ierr = PetscHDF5IntCast(low/bs,offset + dim);CHKERRQ(ierr);
  ++dim;
  if (bs >= 1) {
    offset[dim] = 0;
    ++dim;
  }
#if defined(PETSC_USE_COMPLEX)
  offset[dim] = 0;
  ++dim;
#endif
  status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);CHKERRQ(status);

  /* Create property list for collective dataset read */
  plist_id = H5Pcreate(H5P_DATASET_XFER);
  if (plist_id == -1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not H5Pcreate()");
#if defined(PETSC_HAVE_H5PSET_FAPL_MPIO)
  status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);CHKERRQ(status);
#endif
  /* To write dataset independently use H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT) */

  ierr   = VecGetArray(xin, &x);CHKERRQ(ierr);
  status = H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, x);CHKERRQ(status);
  ierr   = VecRestoreArray(xin, &x);CHKERRQ(ierr);

  /* Close/release resources */
  if (group != file_id) {
    status = H5Gclose(group);CHKERRQ(status);
  }
  status = H5Pclose(plist_id);CHKERRQ(status);
  status = H5Sclose(filespace);CHKERRQ(status);
  status = H5Sclose(memspace);CHKERRQ(status);
  status = H5Dclose(dset_id);CHKERRQ(status);

  ierr = VecAssemblyBegin(xin);CHKERRQ(ierr);
  ierr = VecAssemblyEnd(xin);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Example #19
0
int main(int argc, char **argv){
  int      mpi_size, mpi_rank;
  MPI_Comm comm = MPI_COMM_WORLD;
  MPI_Info info;
  MPI_Init(&argc, &argv);
  MPI_Comm_size(comm, &mpi_size);
  MPI_Comm_rank(comm, &mpi_rank);

  int c;
  opterr = 0;
  strncpy(output, "/global/cscratch1/sd/jialin/hdf-data/climate/temp1.h5",NAME_MAX);
  strncpy(filename, "./fake_xyz_default.h5", NAME_MAX);
  strncpy(cb_buffer_size,"16777216", NAME_MAX);
  strncpy(cb_nodes, "16", NAME_MAX);


  int col=1;//collective read/write
  //input args: i: inputfilename,o:outputfilename b: collective_buffersize, n: collective_buffernodes, k:iscollective, v:datasetname   
  while ((c = getopt (argc, argv, "i:o:b:n:k:v:")) != -1)
    switch (c)
      {
      case 'i':
	strncpy(filename, optarg, NAME_MAX);
	break;
      case 'o':
        strncpy(output, optarg, NAME_MAX);
        break;
      case 'b':
	strncpy(cb_buffer_size,optarg, NAME_MAX);
	break;
      case 'n':
	strncpy(cb_nodes, optarg, NAME_MAX);
	break;
      case 'k':
	col = strtol(optarg, NULL, 10);
      case 'v':
	strncpy(DATASETNAME, optarg, NAME_MAX);
      default:
	break;
      }

  MPI_Info_create(&info); 
  MPI_Info_set(info, "cb_buffer_size", cb_buffer_size);
  MPI_Info_set(info, "cb_nodes", cb_nodes);
 

  //Open file/dataset
  hid_t fapl,file,dataset;
  fapl = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(fapl, comm, info);
  file= H5Fopen(filename, H5F_ACC_RDONLY, fapl);
  H5Pclose(fapl);
  dataset= H5Dopen(file, DATASETNAME,H5P_DEFAULT);

  hid_t datatype,dataspace;
  H5T_class_t class;                 /* data type class */
  H5T_order_t order;                 /* data order */
  size_t      size;                  /* size of data*/    
  int i, status_n,rank;
  dataspace = H5Dget_space(dataset);    /* dataspace handle */
  rank      = H5Sget_simple_extent_ndims(dataspace);
  hsize_t     dims_out[rank];
  status_n  = H5Sget_simple_extent_dims(dataspace, dims_out, NULL);
  hsize_t offset[rank];
  hsize_t count[rank];


  //parallelize along x dims
  hsize_t rest;
  rest = dims_out[0] % mpi_size;
  if(mpi_rank != (mpi_size -1)){
    count[0] = dims_out[0]/mpi_size;
  }else{
    count[0] = dims_out[0]/mpi_size + rest;
  }
  offset[0] = dims_out[0]/mpi_size*mpi_rank;
  //select all for other dims
  for(i=1; i<rank; i++){
   offset[i] = 0;
   count[i] = dims_out[i];
  }
  
  //specify the selection in the dataspace for each rank
  H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL);
  
  hsize_t rankmemsize=1;
  hid_t memspace;
  for(i=0; i<rank; i++){  
   rankmemsize*=count[i];
  }
 
  memspace = H5Screate_simple(rank,count,NULL);
  float totalsizegb=mpi_size * rankmemsize *size / 1024.0 / 1024.0 / 1024.0;
  
  if(mpi_rank==0)
  printf("rankmemsize:%d, %dBytes\n",rankmemsize, rankmemsize*sizeof(double));
  double * data_t=(double *)malloc( rankmemsize * sizeof(double));
  if(data_t == NULL){
    printf("Memory allocation fails mpi_rank = %d",mpi_rank);
    for (i=0; i< rank; i++){
    printf("Dim %d: %d, ",i,count[i]);
    }
    return -1;
  }
  
  MPI_Barrier(comm);
  double tr0 = MPI_Wtime();  
  if(mpi_rank == 0){
    if(col==1)
    printf("IO: Collective Read\n");
    else 
    printf("IO: Independent Read\n");
  }
  hid_t plist=H5P_DEFAULT;
  if(col==1){
   plist = H5Pcreate(H5P_DATASET_XFER);
   H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
  }
  else {
   plist = H5Pcreate(H5P_DATASET_XFER);
   H5Pset_dxpl_mpio(plist,H5FD_MPIO_INDEPENDENT);
  }

  H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace,dataspace, plist, data_t);

  printf("rank %d,start0 %lld count0 %lld,start1 %lld count1 %lld\n",mpi_rank,offset[0],count[0],offset[1],count[1]);
 
  H5Pclose(plist); 
  MPI_Barrier(comm);
  double tr1 = MPI_Wtime()-tr0;
  if(mpi_rank==0){ 
   printf("\nRank %d, read time %.2fs\n",mpi_rank,tr1);
   for(i=0; i<rank; i++){
    printf("Start_%d:%d Count_%d:%d\n",i,offset[i],i,count[i]);
   }
   printf("\n");
   printf("Total Loading %f GB with %d mpi processes\n",totalsizegb,mpi_size);
  }

  //close object handle of file 1
  //H5Tclose(datatype);
  H5Sclose(dataspace);
  H5Sclose(memspace);
  H5Dclose(dataset);
  
  //H5Fclose(file);
  MPI_Barrier(comm);
  if(mpi_rank==0) printf("Closing File %s ok,\nOpening File %s \n",filename,output);
  //start to write to new places
  //printf("%d:%d,%d\n",mpi_rank,offset[0],count[0]); 
  hid_t plist_id2, file_id2,dataspace_id2, dataset_id2;
  //new file access property
  plist_id2 = H5Pcreate(H5P_FILE_ACCESS);
  //mpiio
  H5Pset_fapl_mpio(plist_id2, comm, info);
  //create new file
  file_id2 = H5Fcreate(output, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id2);
  //file_id2=H5Fcreate(output, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
  H5Pclose(plist_id2);
  //in mem data space for new file, rank, dims_out, same
  dataspace_id2 = H5Screate_simple(rank, dims_out, NULL);
  /*if(class == H5T_INTEGER)
     dataset_id2 = H5Dcreate(file_id2,DATASETNAME, H5T_NATIVE_INT, dataspace_id2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);     
  else if(class == H5T_FLOAT){
     if(size==4) dataset_id2=H5Dcreate(file_id2,DATASETNAME, H5T_NATIVE_FLOAT, dataspace_id2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
     else if(size==8) dataset_id2=H5Dcreate(file_id2,DATASETNAME, H5T_NATIVE_DOUBLE, dataspace_id2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
  }
  */
  //dataset_id2=H5Dcreate(file_id2,DATASETNAME, H5T_NATIVE_DOUBLE, dataspace_id2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
  H5Sclose(dataspace_id2);
  //dataspace_id2=H5Dget_space(dataset_id2); 
  //herr_t eit=H5Sselect_hyperslab(dataspace_id2, H5S_SELECT_SET, offset, NULL, count, NULL);
  MPI_Barrier(comm);
  double tw0 = MPI_Wtime();
  if(mpi_rank == 0){
    //if(eit<0) printf("hyperslab error in file 2\n");
    if(col==1)
    printf("IO: Collective Write\n");
    else
    printf("IO: Independent Write\n"); 
  }
 
  hid_t memspace_id2 = H5Screate_simple(rank, count, NULL);
  hid_t plist_id3 = H5P_DEFAULT;
  if(col==1){
   plist_id3 = H5Pcreate(H5P_DATASET_XFER);
   H5Pset_dxpl_mpio(plist_id3, H5FD_MPIO_COLLECTIVE);
  }
/*
  if(class == H5T_INTEGER)
	H5Dwrite(dataset_id2, H5T_NATIVE_INT, memspace_id2, dataspace_id2, plist_id3, data_t);
  else if(class == H5T_FLOAT){
     if(size==4) H5Dwrite(dataset_id2, H5T_NATIVE_FLOAT, memspace_id2, dataspace_id2, plist_id3, data_t); 
     else if(size==8) H5Dwrite(dataset_id2, H5T_NATIVE_DOUBLE, memspace_id2, dataspace_id2, plist_id3, data_t); 
  }
  H5Pclose(plist_id3);
*/
 //H5Dwrite(dataset_id2, H5T_NATIVE_DOUBLE, memspace_id2, dataspace_id2, H5P_DEFAULT, data_t); 
 if(mpi_rank==0||mpi_rank==1){
   printf("rank %d:",mpi_rank);
   for(i=0;i<10;i++)
   //printf("%.2lf ", *(data_t+i));
   printf("\n");
  }  
  MPI_Barrier(comm);
  double tw1 = MPI_Wtime()-tw0;
  if(mpi_rank==0||mpi_rank==mpi_size-1)
  {
	printf("rank %d,write time %.2fs\n",mpi_rank,tw1);
	printf("Total Writing %f GB with %d mpi processes \n",totalsizegb,mpi_size);
  }
  /*
  if(data_t!=NULL){
   free(data_t);
   if(mpi_rank==0) printf("data free ok\n");
  }
  */
  //clean up object handle
  /*H5Tclose(datatype);
  H5Sclose(dataspace);
  H5Sclose(memspace);
  H5Dclose(dataset);
  H5Fclose(file);
  */
/*
  H5Sclose(dataspace_id2);
  H5Sclose(memspace_id2);
  H5Dclose(dataset_id2);
  H5Fclose(file_id2);
*/
  MPI_Finalize();
}
Example #20
0
File: h5mpi.c Project: darien0/Mara
void _io_write_prim_h5mpi(const char *fname, const char **pnames, const double *data)
// -----------------------------------------------------------------------------
// This function uses a collective MPI-IO procedure to write the contents of
// 'data' to the HDF5 file named 'fname', which is assumed to have been created
// already. The dataset with name 'dname', which is being written to, must not
// exist already. Chunking is enabled as per the module-wide ChunkSize variable,
// and is disabled by default. Recommended chunk size is local subdomain
// size. This will result in optimized read/write on the same decomposition
// layout, but poor performance for different access patterns, for example the
// slabs used by cluster-FFT functions.
//
//                                   WARNING!
//
// All processors must define the same chunk size, the behavior of this function
// is not defined otherwise. This implies that chunking should be disabled when
// running on a strange number of cores, and subdomain sizes are non-uniform.
// -----------------------------------------------------------------------------
{
  hsize_t ndp1 = n_dims + 1;
  hsize_t *a_nint = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_ntot = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_strt = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *stride = (hsize_t*) malloc(ndp1*sizeof(hsize_t));

  int i;
  for (i=0; i<n_dims; ++i) {
    a_nint[i] = A_nint[i]; // Selection size, target and destination
    l_ntot[i] = L_ntot[i]; // Memory space total size
    l_strt[i] = L_strt[i]; // Memory space selection start
    stride[i] = 1;
  }
  a_nint[ndp1 - 1] = 1;
  l_ntot[ndp1 - 1] = n_prim;
  stride[ndp1 - 1] = n_prim;

  // Here we create the following property lists:
  //
  // file access property list   ........ for the call to H5Fopen
  // dset creation property list ........ for the call to H5Dcreate
  // dset transfer property list ........ for the call to H5Dwrite
  // ---------------------------------------------------------------------------
  hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
  hid_t dcpl = H5Pcreate(H5P_DATASET_CREATE);
  hid_t dxpl = H5Pcreate(H5P_DATASET_XFER);

  // Here we define collective (MPI) access to the file with alignment
  // properties optimized for the local file system, according to DiskBlockSize.
  // ---------------------------------------------------------------------------
  if (EnableChunking) {
    H5Pset_chunk(dcpl, n_dims, ChunkSize);
  }
  if (EnableAlignment) {
    H5Pset_alignment(fapl, AlignThreshold, DiskBlockSize);
  }

  H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
  H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);

  hid_t file = H5Fopen(fname, H5F_ACC_RDWR, fapl);
  const int overwrite = H5Lexists(file, "prim", H5P_DEFAULT);
  hid_t prim = overwrite ? H5Gopen(file, "prim", H5P_DEFAULT) :
    H5Gcreate(file, "prim", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
  hid_t mspc = H5Screate_simple(ndp1  , l_ntot, NULL);
  hid_t fspc = H5Screate_simple(n_dims, G_ntot, NULL);
  // Call signature to H5Sselect_hyperslab is (start, stride, count, chunk)
  // ---------------------------------------------------------------------------
  const clock_t start_all = clock();

  for (i=0; i<n_prim; ++i) {

    hid_t dset = overwrite ? H5Dopen(prim, pnames[i], H5P_DEFAULT) : 
      H5Dcreate(prim, pnames[i], H5T_NATIVE_DOUBLE, fspc,
		H5P_DEFAULT, dcpl, H5P_DEFAULT);

    l_strt[ndp1 - 1] = i;
    H5Sselect_hyperslab(mspc, H5S_SELECT_SET, l_strt, stride, a_nint, NULL);
    H5Sselect_hyperslab(fspc, H5S_SELECT_SET, G_strt,   NULL, A_nint, NULL);
    H5Dwrite(dset, H5T_NATIVE_DOUBLE, mspc, fspc, dxpl, data);
    H5Dclose(dset);
  }
  if (iolog) {
    const double sec = (double)(clock() - start_all) / CLOCKS_PER_SEC;
    fprintf(iolog, "[h5mpi] write to %s took %f minutes\n", fname, sec/60.0);
    fflush(iolog);
  }

  free(a_nint);
  free(l_ntot);
  free(l_strt);

  // Always close the hid_t handles in the reverse order they were opened in.
  // ---------------------------------------------------------------------------
  H5Sclose(fspc);
  H5Sclose(mspc);
  H5Gclose(prim);
  H5Fclose(file);
  H5Pclose(dxpl);
  H5Pclose(dcpl);
  H5Pclose(fapl);
}
Example #21
0
File: h5mpi.c Project: darien0/Mara
void _io_read_prim_h5mpi(const char *fname, const char **pnames, double *data)
{
  hsize_t ndp1 = n_dims + 1;
  hsize_t *a_nint = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_ntot = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *l_strt = (hsize_t*) malloc(ndp1*sizeof(hsize_t));
  hsize_t *stride = (hsize_t*) malloc(ndp1*sizeof(hsize_t));

  int i;
  for (i=0; i<n_dims; ++i) {
    a_nint[i] = A_nint[i]; // Selection size, target and destination
    l_ntot[i] = L_ntot[i]; // Memory space total size
    l_strt[i] = L_strt[i]; // Memory space selection start
    stride[i] = 1;
  }
  a_nint[ndp1 - 1] = 1;
  l_ntot[ndp1 - 1] = n_prim;
  stride[ndp1 - 1] = n_prim;

  // Here we create the following property lists:
  //
  // file access property list   ........ for the call to H5Fopen
  // dset transfer property list ........ for the call to H5Dread
  // ---------------------------------------------------------------------------
  hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
  hid_t dxpl = H5Pcreate(H5P_DATASET_XFER);

  // Here we define collective (MPI) access to the file with alignment
  // properties optimized for the local file system, according to DiskBlockSize.
  // ---------------------------------------------------------------------------
  H5Pset_alignment(fapl, AlignThreshold, DiskBlockSize);
  H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
  H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);

  hid_t file = H5Fopen(fname, H5F_ACC_RDONLY, fapl);
  hid_t prim = H5Gopen(file, "prim", H5P_DEFAULT);
  hid_t mspc = H5Screate_simple(ndp1  , l_ntot, NULL);
  hid_t fspc = H5Screate_simple(n_dims, G_ntot, NULL);

  // Call signature to H5Sselect_hyperslab is (start, stride, count, chunk)
  // ---------------------------------------------------------------------------
  const clock_t start_all = clock();

  for (i=0; i<n_prim; ++i) {
    hid_t dset = H5Dopen(prim, pnames[i], H5P_DEFAULT);
    l_strt[ndp1 - 1] = i;
    H5Sselect_hyperslab(mspc, H5S_SELECT_SET, l_strt, stride, a_nint, NULL);
    H5Sselect_hyperslab(fspc, H5S_SELECT_SET, G_strt,   NULL, A_nint, NULL);
    H5Dread(dset, H5T_NATIVE_DOUBLE, mspc, fspc, dxpl, data);
    H5Dclose(dset);
  }
  if (iolog) {
    const double sec = (double)(clock() - start_all) / CLOCKS_PER_SEC;
    fprintf(iolog, "[h5mpi] read from %s took %f minutes\n", fname, sec/60.0);
    fflush(iolog);
  }

  free(a_nint);
  free(l_ntot);
  free(l_strt);

  // Always close the hid_t handles in the reverse order they were opened in.
  // ---------------------------------------------------------------------------
  H5Sclose(fspc);
  H5Sclose(mspc);
  H5Gclose(prim);
  H5Fclose(file);
  H5Pclose(dxpl);
  H5Pclose(fapl);
}
/*-------------------------------------------------------------------------
 * Function:	check_file
 *
 * Purpose:	Part 2 of a two-part H5Fflush() test.
 *
 * Return:	Success:	0
 *
 *		Failure:	1
 *
 * Programmer:	Leon Arber
 *              Sept. 26, 2006.
 *
 *-------------------------------------------------------------------------
 */
static int
check_file(char* name, hid_t fapl)
{
    hid_t	file, space, dset, groups, grp, plist;
    hsize_t	ds_size[2];
    double	error;
    hsize_t	i, j;

    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
    if((file = H5Fopen(name, H5F_ACC_RDONLY, fapl)) < 0) goto error;

    /* Open the dataset */
    if((dset = H5Dopen2(file, "dset", H5P_DEFAULT)) < 0) goto error;
    if((space = H5Dget_space(dset)) < 0) goto error;
    if(H5Sget_simple_extent_dims(space, ds_size, NULL) < 0) goto error;
    assert(100==ds_size[0] && 100==ds_size[1]);

    /* Read some data */
    if (H5Dread(dset, H5T_NATIVE_DOUBLE, space, space, plist,
		the_data) < 0) goto error;
    for (i=0; i<ds_size[0]; i++) {
	for (j=0; j<ds_size[1]; j++) {
	    /*
	     * The extra cast in the following statement is a bug workaround
	     * for the Win32 version 5.0 compiler.
	     * 1998-11-06 ptl
	     */
	    error = fabs(the_data[i][j]-(double)(hssize_t)i/((hssize_t)j+1));
	    if (error>0.0001) {
		H5_FAILED();
		printf("    dset[%lu][%lu] = %g\n",
			(unsigned long)i, (unsigned long)j, the_data[i][j]);
		printf("    should be %g\n",
			(double)(hssize_t)i/(hssize_t)(j+1));
		goto error;
	    }
	}
    }

    /* Open some groups */
    if((groups = H5Gopen2(file, "some_groups", H5P_DEFAULT)) < 0) goto error;
    for(i = 0; i < 100; i++) {
	sprintf(name, "grp%02u", (unsigned)i);
	if((grp = H5Gopen2(groups, name, H5P_DEFAULT)) < 0) goto error;
	if(H5Gclose(grp) < 0) goto error;
    }

    if(H5Gclose(groups) < 0) goto error;
    if(H5Dclose(dset) < 0) goto error;
    if(H5Fclose(file) < 0) goto error;
    if(H5Pclose(plist) < 0) goto error;
    if(H5Sclose(space) < 0) goto error;

    return 0;

error:
    H5E_BEGIN_TRY {
        H5Pclose(plist);
        H5Gclose(groups);
        H5Dclose(dset);
        H5Fclose(file);
        H5Sclose(space);
    } H5E_END_TRY;
    return 1;
}
Example #23
0
void BigArray<T>::init(std::string& fileName, unsigned long r, unsigned long c)
{
    dataFileName = fileName;

    connection.disconnect();
    connection = releaseSignal().connect(boost::bind(&gurls::BigArray<T>::close, this));

    std::string errorString = "Error creating file " + fileName + ":";

    // Set up file access property list with parallel I/O access
    plist_id = H5Pcreate(H5P_FILE_ACCESS);
    if(plist_id == -1)
        throw gException(errorString);

    herr_t status;

#ifdef USE_MPIIO
    status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
    status = H5Pset_fapl_mpiposix(plist_id, MPI_COMM_WORLD, false);
#endif

    CHECK_HDF5_ERR(status, errorString)

    // Create a new file collectively and release property list identifier.
    file_id = H5Fcreate(fileName.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
    CHECK_HDF5_ERR(file_id, errorString)

    status = H5Pclose(plist_id);
    CHECK_HDF5_ERR(status, errorString)


    // Create the dataspace for the dataset.
    hsize_t dims[2];
    dims[0] = static_cast<hsize_t>(c);
    dims[1] = static_cast<hsize_t>(r);
    hid_t filespace = H5Screate_simple(2, dims, NULL);
    CHECK_HDF5_ERR(filespace, errorString)


    hid_t plist_dset_id = H5Pcreate(H5P_DATASET_CREATE);
    if(plist_dset_id == -1)
        throw gException(errorString);

    dset_id = H5Dcreate(file_id, "mat", getHdfType<T>(), filespace, H5P_DEFAULT, plist_dset_id, H5P_DEFAULT);
    CHECK_HDF5_ERR(dset_id, errorString)

    status = H5Pclose(plist_dset_id);
    CHECK_HDF5_ERR(status, errorString)

    status = H5Sclose(filespace);
    CHECK_HDF5_ERR(status, errorString)

    this->numrows = r;
    this->numcols = c;

    // Create property list for collective dataset write.
    plist_id = H5Pcreate(H5P_DATASET_XFER);
    if(plist_id == -1)
        throw gException(errorString);

    status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
    CHECK_HDF5_ERR(status, errorString)

    flush();
}
 void setWriteIndependent()
 {
     H5Pset_dxpl_mpio(dsetWriteProperties, H5FD_MPIO_INDEPENDENT);
 }
Example #25
0
void
phdf5readAll(char *filename)
{
    hid_t fid1;			/* HDF5 file IDs */
    hid_t acc_tpl1;		/* File access templates */
    hid_t xfer_plist;		/* Dataset transfer properties list */
    hid_t file_dataspace;	/* File dataspace ID */
    hid_t mem_dataspace;	/* memory dataspace ID */
    hid_t dataset1, dataset2;	/* Dataset ID */
    DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2];	/* data buffer */
    DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2];	/* expected data buffer */

    hsize_t start[SPACE1_RANK];			/* for hyperslab setting */
    hsize_t count[SPACE1_RANK], stride[SPACE1_RANK];	/* for hyperslab setting */

    herr_t ret;         	/* Generic return value */

    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;

    if (verbose)
			printf("Collective read test on file %s\n", filename);

    /* -------------------
     * OPEN AN HDF5 FILE
     * -------------------*/
    /* setup file access template with parallel IO access. */
    acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
    assert(acc_tpl1 != FAIL);
    MESG("H5Pcreate access succeed");
    /* set Parallel access with communicator */
    ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
    assert(ret != FAIL);
    MESG("H5Pset_fapl_mpio succeed");

    /* open the file collectively */
    fid1=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl1);
    assert(fid1 != FAIL);
    MESG("H5Fopen succeed");

    /* Release file-access template */
    ret=H5Pclose(acc_tpl1);
    assert(ret != FAIL);


    /* --------------------------
     * Open the datasets in it
     * ------------------------- */
    /* open the dataset1 collectively */
    dataset1 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
    assert(dataset1 != FAIL);
    MESG("H5Dopen2 succeed");

    /* open another dataset collectively */
    dataset2 = H5Dopen2(fid1, DATASETNAME2, H5P_DEFAULT);
    assert(dataset2 != FAIL);
    MESG("H5Dopen2 2 succeed");

    /*
     * Set up dimensions of the slab this process accesses.
     */

    /* Dataset1: each process takes a block of columns. */
    slab_set(start, count, stride, BYCOL);
if (verbose)
    printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
	(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill dataset with test data */
    dataset_fill(start, count, stride, &data_origin1[0][0]);
    MESG("data_array initialized");
    if (verbose){
	MESG("data_array created");
	dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* read data collectively */
    ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dread succeed");

    /* verify the read data with original expected data */
    ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
    assert(ret != FAIL);

    /* release all temporary handles. */
    /* Could have used them for dataset2 but it is cleaner */
    /* to create them again.*/
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);

    /* Dataset2: each process takes a block of rows. */
    slab_set(start, count, stride, BYROW);
if (verbose)
    printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
	(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill dataset with test data */
    dataset_fill(start, count, stride, &data_origin1[0][0]);
    MESG("data_array initialized");
    if (verbose){
	MESG("data_array created");
	dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* read data independently */
    ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dread succeed");

    /* verify the read data with original expected data */
    ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
    assert(ret != FAIL);

    /* release all temporary handles. */
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);


    /*
     * All reads completed.  Close datasets collectively
     */
    ret=H5Dclose(dataset1);
    assert(ret != FAIL);
    MESG("H5Dclose1 succeed");
    ret=H5Dclose(dataset2);
    assert(ret != FAIL);
    MESG("H5Dclose2 succeed");

    /* close the file collectively */
    H5Fclose(fid1);
}
void writehdf5file(rundata_t rundata, double **dens, double ***vel) {
    /* identifiers */
    hid_t file_id, arr_group_id, dens_dataset_id, vel_dataset_id;
    hid_t dens_dataspace_id, vel_dataspace_id;
    hid_t loc_dens_dataspace_id, loc_vel_dataspace_id;
    hid_t globaldensspace,globalvelspace;
    hid_t dist_id;
    hid_t fap_id;

    /* sizes */
    hsize_t densdims[2], veldims[3];
    hsize_t locdensdims[2], locveldims[3];

    /* status */
    herr_t status;

    /* MPI-IO hints for performance */
    MPI_Info info;

    /* parameters of the hyperslab */
    hsize_t counts[3];
    hsize_t strides[3];
    hsize_t offsets[3];
    hsize_t blocks[3];

    /* set the MPI-IO hints for better performance on GPFS */
    MPI_Info_create(&info);
    MPI_Info_set(info,"IBM_largeblock_io","true");

    /* Set up the parallel environment for file access*/
    fap_id = H5Pcreate(H5P_FILE_ACCESS);
    /* Include the file access property with IBM hint */
    H5Pset_fapl_mpio(fap_id, MPI_COMM_WORLD, info);

    /* Set up the parallel environment */
    dist_id = H5Pcreate(H5P_DATASET_XFER);
    /* we'll be writing collectively */
    H5Pset_dxpl_mpio(dist_id, H5FD_MPIO_COLLECTIVE);

    /* Create a new file - truncate anything existing, use default properties */
    file_id = H5Fcreate(rundata.filename, H5F_ACC_TRUNC, H5P_DEFAULT, fap_id);

    /* HDF5 routines generally return a negative number on failure.  
     * Should check return values! */
    if (file_id < 0) {
        fprintf(stderr,"Could not open file %s\n", rundata.filename);
        return;
    }

    /* Create a new group within the new file */
    arr_group_id = H5Gcreate(file_id,"/ArrayData", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

    /* Give this group an attribute listing the time of calculation */
    {
        hid_t attr_id,attr_sp_id;
        struct tm *t;
        time_t now;
        int yyyymm;
        now = time(NULL);
        t = localtime(&now);
        yyyymm = (1900+t->tm_year)*100+t->tm_mon;     

        attr_sp_id = H5Screate(H5S_SCALAR);
        attr_id = H5Acreate(arr_group_id, "Calculated on (YYYYMM)", H5T_STD_U32LE, attr_sp_id, H5P_DEFAULT, H5P_DEFAULT);
        printf("yymm = %d\n",yyyymm);
        H5Awrite(attr_id, H5T_NATIVE_INT, &yyyymm);
        H5Aclose(attr_id);
        H5Sclose(attr_sp_id);
    }

    /* Create the data space for the two global datasets. */
    densdims[0] = rundata.globalnx; densdims[1] = rundata.globalny;
    veldims[0] = 2; veldims[1] = rundata.globalnx; veldims[2] = rundata.globalny;
    
    dens_dataspace_id = H5Screate_simple(2, densdims, NULL);
    vel_dataspace_id  = H5Screate_simple(3, veldims,  NULL);

    /* Create the datasets within the file. 
     * H5T_IEEE_F64LE is a standard (IEEE) double precision (64 bit) floating (F) data type
     * and will work on any machine.  H5T_NATIVE_DOUBLE would work too, but would give
     * different results on GPC and TCS */

    dens_dataset_id = H5Dcreate(file_id, "/ArrayData/dens", H5T_IEEE_F64LE, 
                                dens_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    vel_dataset_id  = H5Dcreate(file_id, "/ArrayData/vel",  H5T_IEEE_F64LE, 
                                vel_dataspace_id,  H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

    /* Now create the data space for our sub-regions.   These are the data spaces
     * of our actual local data in memory. */
    locdensdims[0] = rundata.localnx; locdensdims[1] = rundata.localny;
    locveldims[0] = 2; locveldims[1] = rundata.localnx; locveldims[2] = rundata.localny;
    
    loc_dens_dataspace_id = H5Screate_simple(2, locdensdims, NULL);
    loc_vel_dataspace_id  = H5Screate_simple(3, locveldims,  NULL);

    /*
     *
     * Now we have to figure out the `hyperslab' within the global
     * data that corresponds to our local data.
     *
     * Hyperslabs are described by an array of counts, strides, offsets,
     * and block sizes.
     *
     *       |-offx--|
     *       +-------|----|-------+   -+-
     *       |                    |    |
     *       |                    |   offy
     *       |                    |    |
     *       -       +----+       -   -+-
     *       |       |    |       |    |
     *       |       |    |       |  localny
     *       |       |    |       |    |
     *       -       +----+       -   -+-
     *       |                    |
     *       |                    |
     *       +-------|----|-------+
     *               localnx
     *
     *  In this case the blocksizes are (localnx,localny) and the offsets are 
     *  (offx,offy) = ((myx)/nxp*globalnx, (myy/nyp)*globalny)
     */

    offsets[0] = (rundata.globalnx/rundata.npx)*rundata.myx;
    offsets[1] = (rundata.globalny/rundata.npy)*rundata.myy;
    blocks[0]  = rundata.localnx;
    blocks[1]  = rundata.localny;
    strides[0] = strides[1] = 1;
    counts[0] = counts[1] = 1;

    /* select this subset of the density variable's space in the file */
    globaldensspace = H5Dget_space(dens_dataset_id);
    H5Sselect_hyperslab(globaldensspace,H5S_SELECT_SET, offsets, strides, counts, blocks);

    /* For the velocities, it's the same thing but there's a count of two,
     * (one for each velocity component) */

    offsets[1] = (rundata.globalnx/rundata.npx)*rundata.myx;
    offsets[2] = (rundata.globalny/rundata.npy)*rundata.myy;
    blocks[1]  = rundata.localnx;
    blocks[2]  = rundata.localny;
    strides[0] = strides[1] = strides[2] = 1;
    counts[0] = 2; counts[1] = counts[2] = 1;
    offsets[0] = 0;
    blocks[0] = 1;

    globalvelspace = H5Dget_space(vel_dataset_id);
    H5Sselect_hyperslab(globalvelspace,H5S_SELECT_SET, offsets, strides, counts, blocks);

    /* Write the data.  We're writing it from memory, where it is saved 
     * in NATIVE_DOUBLE format */
    status = H5Dwrite(dens_dataset_id, H5T_NATIVE_DOUBLE, loc_dens_dataspace_id, globaldensspace, dist_id, &(dens[0][0]));
    status = H5Dwrite(vel_dataset_id,  H5T_NATIVE_DOUBLE, loc_vel_dataspace_id, globalvelspace, dist_id, &(vel[0][0][0]));

    /* We'll create another group for related info and put some things in there */

    {
        hid_t other_group_id;
        hid_t timestep_id, timestep_space;
        hid_t comptime_id, comptime_space;
        hid_t author_id, author_space, author_type;
        char *authorname="Jonathan Dursi";
        int timestep=13;
        float comptime=81.773;

        /* create group */
        other_group_id = H5Gcreate(file_id,"/OtherStuff", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

        /* scalar space, data for integer timestep */
        timestep_space = H5Screate(H5S_SCALAR);
        timestep_id = H5Dcreate(other_group_id, "Timestep", H5T_STD_U32LE, 
                                timestep_space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
        status = H5Dwrite(timestep_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &timestep);
        H5Dclose(timestep_id);
        H5Sclose(timestep_space);
   
        /* scalar space, data for floating compute time */
        comptime_space = H5Screate(H5S_SCALAR);
        comptime_id = H5Dcreate(other_group_id, "Compute Time", H5T_IEEE_F32LE, 
                                comptime_space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
        status = H5Dwrite(comptime_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &comptime);
        H5Dclose(comptime_id);
        H5Sclose(comptime_space);
   
        /* scalar space, data for author name */
        author_space = H5Screate(H5S_SCALAR);
        author_type  = H5Tcopy(H5T_C_S1);   /* copy the character type.. */
        status = H5Tset_size (author_type, strlen(authorname));  /* and make it longer */
        author_id = H5Dcreate(other_group_id, "Simulator Name", author_type, author_space, 
                                 H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); 
        status = H5Dwrite(author_id, author_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, authorname);
        H5Dclose(author_id);
        H5Sclose(author_space);
        H5Tclose(author_type);

        H5Gclose(other_group_id);
    } 


    /* End access to groups & data sets and release resources used by them */
    status = H5Sclose(dens_dataspace_id);
    status = H5Dclose(dens_dataset_id);
    status = H5Sclose(vel_dataspace_id);
    status = H5Dclose(vel_dataset_id);
    status = H5Gclose(arr_group_id);
    status = H5Pclose(fap_id);
    status = H5Pclose(dist_id);
    
    /* Close the file */
    status = H5Fclose(file_id);
    return;
}
Example #27
0
int
main(int argc, char **argv)
{
   int p, my_rank;

#ifdef USE_MPE
   int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */

   MPI_Init(&argc, &argv);
   MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
   MPI_Comm_size(MPI_COMM_WORLD, &p);

#ifdef USE_MPE
   MPE_Init_log();
   s_init = MPE_Log_get_event_number(); 
   e_init = MPE_Log_get_event_number(); 
   s_define = MPE_Log_get_event_number(); 
   e_define = MPE_Log_get_event_number(); 
   s_write = MPE_Log_get_event_number(); 
   e_write = MPE_Log_get_event_number(); 
   s_close = MPE_Log_get_event_number(); 
   e_close = MPE_Log_get_event_number(); 
   MPE_Describe_state(s_init, e_init, "Init", "red");
   MPE_Describe_state(s_define, e_define, "Define", "yellow");
   MPE_Describe_state(s_write, e_write, "Write", "green");
   MPE_Describe_state(s_close, e_close, "Close", "purple");
   MPE_Start_log();
   MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */

   if (!my_rank)
      printf("*** Creating file for parallel I/O read, and rereading it...");
   {
      hid_t fapl_id, fileid, whole_spaceid, dsid, slice_spaceid, whole_spaceid1, xferid;
      hsize_t start[NDIMS], count[NDIMS];
      hsize_t dims[1];
      int data[SC1], data_in[SC1];
      int num_steps;
      double ftime;
      int write_us, read_us;
      int max_write_us, max_read_us;
      float write_rate, read_rate;
      int i, s;

      /* We will write the same slice of random data over and over to
       * fill the file. */
      for (i = 0; i < SC1; i++)
	 data[i] = rand();
      
#ifdef USE_MPE
      MPE_Log_event(e_init, 0, "end init");
      MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */

      /* Create file. */
      if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR;
      if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) ERR;
      if ((fileid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, 
			      fapl_id)) < 0) ERR;

      /* Create a space to deal with one slice in memory. */
      dims[0] = SC1;
      if ((slice_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR;

      /* Create a space to write all slices. */
      dims[0] = DIM2_LEN;
      if ((whole_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR;

      /* Create dataset. */
      if ((dsid = H5Dcreate1(fileid, VAR_NAME, H5T_NATIVE_INT, 
      whole_spaceid, H5P_DEFAULT)) < 0) ERR;

      /* Use collective write operations. */
      if ((xferid = H5Pcreate(H5P_DATASET_XFER)) < 0) ERR;
      if (H5Pset_dxpl_mpio(xferid, H5FD_MPIO_COLLECTIVE) < 0) ERR;

#ifdef USE_MPE
      MPE_Log_event(e_define, 0, "end define file");
      if (my_rank)
	 sleep(my_rank);
#endif /* USE_MPE */

      /* Write the data in num_step steps. */
      ftime = MPI_Wtime();      
      num_steps = (DIM2_LEN/SC1) / p;
      for (s = 0; s < num_steps; s++)
      {
#ifdef USE_MPE
	 MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */

	 /* Select hyperslab for write of one slice. */
	 start[0] = s * SC1 * p + my_rank * SC1;
	 count[0] = SC1;
	 if (H5Sselect_hyperslab(whole_spaceid, H5S_SELECT_SET, 
	 start, NULL, count, NULL) < 0) ERR;
	 
	 if (H5Dwrite(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid, 
	 xferid, data) < 0) ERR;

#ifdef USE_MPE
	 MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
      }
      write_us = (MPI_Wtime() - ftime) * MILLION;
      MPI_Reduce(&write_us, &max_write_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
      if (!my_rank)
      {
	 write_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_write_us;      
	 printf("\np=%d, write_rate=%g", p, write_rate);
      }
      
#ifdef USE_MPE
      MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

      /* Close. These collective operations will allow every process
       * to catch up. */
      if (H5Dclose(dsid) < 0 ||
      H5Sclose(whole_spaceid) < 0 ||
      H5Sclose(slice_spaceid) < 0 ||
      H5Pclose(fapl_id) < 0 ||
      H5Fclose(fileid) < 0)
	 ERR;

#ifdef USE_MPE
      MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

      /* Open the file. */
      if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR;
      if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) ERR;


      if (H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) ERR;
      if ((fileid = H5Fopen(FILE_NAME, H5F_ACC_RDONLY, fapl_id)) < 0) ERR;

      /* Create a space to deal with one slice in memory. */
      dims[0] = SC1;
      if ((slice_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR;

      /* Open the dataset. */
      if ((dsid = H5Dopen(fileid, VAR_NAME)) < 0) ERR;
      if ((whole_spaceid1 = H5Dget_space(dsid)) < 0) ERR;

      ftime = MPI_Wtime();      
      
      /* Read the data, a slice at a time. */
      for (s = 0; s < num_steps; s++)
      {
	 /* Select hyperslab for read of one slice. */
	 start[0] = s * SC1 * p + my_rank * SC1;
	 count[0] = SC1;
	 if (H5Sselect_hyperslab(whole_spaceid1, H5S_SELECT_SET, 
	 start, NULL, count, NULL) < 0) 
	 {
	    ERR;
	    return 2;
	 }

	 if (H5Dread(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid1, 
	 H5P_DEFAULT, data_in) < 0) 
	 {
	    ERR;
	    return 2;
	 }

/* 	 /\* Check the slice of data. *\/ */
/* 	 for (i = 0; i < SC1; i++) */
/* 	    if (data[i] != data_in[i])  */
/* 	    { */
/* 	       ERR; */
/* 	       return 2; */
/* 	    } */
      }
      read_us = (MPI_Wtime() - ftime) * MILLION;
      MPI_Reduce(&read_us, &max_read_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
      if (!my_rank)
      {
	 read_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_read_us;      
	 printf(", read_rate=%g\n", read_rate);
      }
      
      /* Close down. */
      if (H5Dclose(dsid) < 0 ||
      H5Sclose(slice_spaceid) < 0 ||
      H5Sclose(whole_spaceid1) < 0 ||
      H5Pclose(fapl_id) < 0 ||
      H5Fclose(fileid) < 0)
	 ERR;
   }
   if (!my_rank)
      SUMMARIZE_ERR;

   MPI_Finalize();

   if (!my_rank)
      FINAL_RESULTS;
   return 0;
}
Example #28
0
int main(int argc, char* argv[])
{
    char c;
    int ix, iy, iz, i;
    MPI_Comm mpicomm;
    MPI_Info mpiinfo;
    int mpirank;
    int mpisize;
    double *data3d, *data2d, *x, *y, *z, t;
    int localx, localy, localwidth, localheight;
    int maxwidth, maxheight;
    const char* filename = "output.h5";
    hid_t fileid, plist, filespace, memspace, dimvar, varid;
    hsize_t size[NDIMS], maxsize[NDIMS], chunksize[NDIMS];
    hsize_t start[NDIMS], count[NDIMS];
    char varname[32];

    mpicomm = MPI_COMM_WORLD;
    mpiinfo = MPI_INFO_NULL;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(mpicomm, &mpisize);
    MPI_Comm_rank(mpicomm, &mpirank);

    if(! mpirank) printf("Creating some data...\n");

    // Distribute our data values in a pism-y way
    GetLocalBounds(XSIZE, YSIZE, mpirank, mpisize,
                   &localx, &localy, &localwidth, &localheight);
    printf("Rank%02d: x=%d, y=%d, width=%d, height=%d\n",
           mpirank, localx, localy, localwidth, localheight);

    data2d = (double*)malloc(localwidth * localheight * sizeof(double));
    data3d = (double*)malloc(localwidth * localheight * ZSIZE * sizeof(double));
    x = (double*)malloc(localwidth * sizeof(double));
    y = (double*)malloc(localheight * sizeof(double));
    z = (double*)malloc(ZSIZE * sizeof(double));
    t = 0.0;
    for(ix = 0; ix < localwidth; ix++)
    {
        x[ix] = ix + localx;
        for(iy = 0; iy < localheight; iy++)
        {
            y[iy] = iy + localy;
            data2d[ix*localheight + iy] = (ix+localx)*localheight + iy+localy;
            for(iz = 0; iz < ZSIZE; iz++)
            {
                z[iz] = iz;
                data3d[ix*localheight*ZSIZE + iy*ZSIZE + iz] =
                    (ix+localx)*YSIZE*ZSIZE + (iy+localy)*ZSIZE + iz;
            }
        }
    }

    if(! mpirank) printf("Creating HDF5 file...\n");

    plist = H5Pcreate(H5P_FILE_ACCESS);
    H5Pset_fapl_mpio(plist, mpicomm, mpiinfo);

    // TODO: this seems like a good place to put optimizations, and indeed
    // PISM is adding several additional properties, like setting block sizes,
    // cache eviction policies, fs striping parameters, etc.

    fileid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
    H5Pclose(plist);

    if(! mpirank) printf("Setting up dimensions...\n");

    if(! mpirank) printf("Creating time dimension...\n");

    // Define the time dimension
    size[0] = 1;
    maxsize[0] = H5S_UNLIMITED;
    chunksize[0] = 1;

    filespace = H5Screate_simple(1, size, maxsize);
    plist = H5Pcreate(H5P_DATASET_CREATE);
    H5Pset_chunk(plist, 1, chunksize);  // It is strictly required to set chunksize when using
    // the low-level api.  Contiguous datasets are not allowed
    // to use the unlimited dimension.
    dimvar = H5Dcreate(fileid, TNAME, H5T_NATIVE_DOUBLE, filespace,
                       H5P_DEFAULT, plist, H5P_DEFAULT);
    H5Pclose(plist);
    H5DSset_scale(dimvar, TNAME);
    H5Dclose(dimvar);
    H5Sclose(filespace);

#ifdef OLD_WRITE_PATTERN
    if(! mpirank) printf("Writing time dimension...\n");
    dimvar = H5Dopen(fileid, TNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, size, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE); // TODO: Pism does this, but comments suggest it is questionable
    start[0] = 0;
    count[0] = 1;
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, &t);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);
#endif

    if(! mpirank) printf("Creating x dimension...\n");

    size[0] = XSIZE;

    filespace = H5Screate_simple(1, size, 0);
    dimvar = H5Dcreate(fileid, XNAME, H5T_NATIVE_DOUBLE, filespace,
                       H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    H5DSset_scale(dimvar, XNAME);
    H5Dclose(dimvar);
    H5Sclose(filespace);

#ifdef OLD_WRITE_PATTERN
    if(! mpirank) printf("Writing x dimension...\n");
    dimvar = H5Dopen(fileid, XNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, size, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
    start[0] = 0;
    count[0] = XSIZE;
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, x);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);
#endif

    if(! mpirank) printf("Creating y dimension...\n");

    size[0] = YSIZE;

    filespace = H5Screate_simple(1, size, 0);
    dimvar = H5Dcreate(fileid, YNAME, H5T_NATIVE_DOUBLE, filespace,
                       H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    H5DSset_scale(dimvar, YNAME);
    H5Dclose(dimvar);
    H5Sclose(filespace);

#ifdef OLD_WRITE_PATTERN
    if(! mpirank) printf("Writing y dimension...\n");
    dimvar = H5Dopen(fileid, YNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, size, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
    start[0] = 0;
    count[0] = YSIZE;
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, y);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);
#endif

    if(! mpirank) printf("Creating z dimension...\n");

    size[0] = ZSIZE;

    filespace = H5Screate_simple(1, size, 0);
    dimvar = H5Dcreate(fileid, ZNAME, H5T_NATIVE_DOUBLE, filespace,
                       H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    H5DSset_scale(dimvar, ZNAME);
    H5Dclose(dimvar);
    H5Sclose(filespace);

#ifdef OLD_WRITE_PATTERN
    if(! mpirank) printf("Writing z dimension...\n");
    dimvar = H5Dopen(fileid, ZNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, size, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
    start[0] = 0;
    count[0] = ZSIZE;
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, z);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);
#endif

    if(! mpirank) printf("Defining variables...\n");

    MPI_Allreduce(&localwidth, &maxwidth, 1, MPI_INT, MPI_MAX, mpicomm);
    MPI_Allreduce(&localheight, &maxheight, 1, MPI_INT, MPI_MAX, mpicomm);

    size[TDIM] = 1;
    size[XDIM] = XSIZE;
    size[YDIM] = YSIZE;
    size[ZDIM] = ZSIZE;

    maxsize[TDIM] = H5S_UNLIMITED;
    maxsize[XDIM] = XSIZE;
    maxsize[YDIM] = YSIZE;
    maxsize[ZDIM] = ZSIZE;

    chunksize[TDIM] = 1;
    chunksize[XDIM] = maxwidth;
    chunksize[YDIM] = maxheight;
    chunksize[ZDIM] = ZSIZE;  // Looks like pism might use 1 here...

    for(i = 0; i < NVARS; i++)
    {
        sprintf(varname, "var3d-%02d", i);
        plist = H5Pcreate(H5P_DATASET_CREATE);
        H5Pset_chunk(plist, NDIMS, chunksize);
        filespace = H5Screate_simple(NDIMS, size, maxsize);
        varid = H5Dcreate(fileid, varname, H5T_NATIVE_DOUBLE, filespace,
                          H5P_DEFAULT, plist, H5P_DEFAULT);
        H5Pclose(plist);
        H5Sclose(filespace);
        H5Dclose(varid);

        sprintf(varname, "var2d-%02d", i);
        plist = H5Pcreate(H5P_DATASET_CREATE);
        H5Pset_chunk(plist, NDIMS-1, chunksize);
        filespace = H5Screate_simple(NDIMS-1, size, maxsize);
        varid = H5Dcreate(fileid, varname, H5T_NATIVE_DOUBLE, filespace,
                          H5P_DEFAULT, plist, H5P_DEFAULT);
        H5Pclose(plist);
        H5Sclose(filespace);
        H5Dclose(varid);
    }

#ifndef OLD_WRITE_PATTERN
    if(! mpirank) printf("Writing time dimension...\n");
    start[0] = 0;
    count[0] = 1;
    dimvar = H5Dopen(fileid, TNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, count, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE); // TODO: Pism does this, but comments suggest it is questionable
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, &t);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);

    if(! mpirank) printf("Writing x dimension...\n");
    start[0] = 0;
    count[0] = XSIZE;
    dimvar = H5Dopen(fileid, XNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, count, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, x);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);

    if(! mpirank) printf("Writing y dimension...\n");
    start[0] = 0;
    count[0] = YSIZE;
    dimvar = H5Dopen(fileid, YNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, count, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, y);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);

    if(! mpirank) printf("Writing z dimension...\n");
    start[0] = 0;
    count[0] = ZSIZE;
    dimvar = H5Dopen(fileid, ZNAME, H5P_DEFAULT);
    filespace = H5Dget_space(dimvar);
    memspace = H5Screate_simple(1, count, 0);
    plist = H5Pcreate(H5P_DATASET_XFER);
    H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
    H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, z);
    H5Pclose(plist);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Dclose(dimvar);
#endif

    if(! mpirank) printf("Writing variable data...\n");

    for(i = 0; i < NVARS; i++)
    {
        sprintf(varname, "var3d-%02d", i);
        if(! mpirank) printf("Writing %s...\n", varname);
        start[TDIM] = 0;
        start[XDIM] = localx;
        start[YDIM] = localy;
        start[ZDIM] = 0;
        count[TDIM] = 1;
        count[XDIM] = localwidth;
        count[YDIM] = localheight;
        count[ZDIM] = ZSIZE;
        varid = H5Dopen(fileid, varname, H5P_DEFAULT);
        filespace = H5Dget_space(varid);
        memspace = H5Screate_simple(NDIMS, count, 0);
        plist = H5Pcreate(H5P_DATASET_XFER);
        H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
        H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
        H5Dwrite(varid, H5T_NATIVE_DOUBLE, memspace, filespace, plist, data3d);
        H5Pclose(plist);
        H5Sclose(filespace);
        H5Sclose(memspace);
        H5Dclose(varid);

        sprintf(varname, "var2d-%02d", i);
        if(! mpirank) printf("Writing %s...\n", varname);
        start[TDIM] = 0;
        start[XDIM] = localx;
        start[YDIM] = localy;
        count[TDIM] = 1;
        count[XDIM] = localwidth;
        count[YDIM] = localheight;
        varid = H5Dopen(fileid, varname, H5P_DEFAULT);
        filespace = H5Dget_space(varid);
        memspace = H5Screate_simple(NDIMS-1, count, 0);
        plist = H5Pcreate(H5P_DATASET_XFER);
        H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
        H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0);
        H5Dwrite(varid, H5T_NATIVE_DOUBLE, memspace, filespace, plist, data2d);
        H5Pclose(plist);
        H5Sclose(filespace);
        H5Sclose(memspace);
        H5Dclose(varid);
    }

    if(! mpirank) printf("Closing file...\n");

    H5Fclose(fileid);

    if(! mpirank) printf("Done.\n");

    free(data2d);
    free(data3d);
    free(x);
    free(y);
    free(z);

    MPI_Finalize();

    return 0;
}
Example #29
0
PetscErrorCode ISView_General_HDF5(IS is, PetscViewer viewer)
{
  hid_t           filespace;  /* file dataspace identifier */
  hid_t           chunkspace; /* chunk dataset property identifier */
  hid_t           plist_id;   /* property list identifier */
  hid_t           dset_id;    /* dataset identifier */
  hid_t           memspace;   /* memory dataspace identifier */
  hid_t           inttype;    /* int type (H5T_NATIVE_INT or H5T_NATIVE_LLONG) */
  hid_t           file_id, group;
  herr_t          status;
  hsize_t         dim, maxDims[3], dims[3], chunkDims[3], count[3],offset[3];
  PetscInt        bs, N, n, timestep, low;
  const PetscInt *ind;
  const char     *isname;
  PetscErrorCode  ierr;

  PetscFunctionBegin;
  ierr = ISGetBlockSize(is,&bs);CHKERRQ(ierr);
  ierr = PetscViewerHDF5OpenGroup(viewer, &file_id, &group);CHKERRQ(ierr);
  ierr = PetscViewerHDF5GetTimestep(viewer, &timestep);CHKERRQ(ierr);

  /* Create the dataspace for the dataset.
   *
   * dims - holds the current dimensions of the dataset
   *
   * maxDims - holds the maximum dimensions of the dataset (unlimited
   * for the number of time steps with the current dimensions for the
   * other dimensions; so only additional time steps can be added).
   *
   * chunkDims - holds the size of a single time step (required to
   * permit extending dataset).
   */
  dim = 0;
  if (timestep >= 0) {
    dims[dim]      = timestep+1;
    maxDims[dim]   = H5S_UNLIMITED;
    chunkDims[dim] = 1;
    ++dim;
  }
  ierr = ISGetSize(is, &N);CHKERRQ(ierr);
  ierr = ISGetLocalSize(is, &n);CHKERRQ(ierr);
  ierr = PetscHDF5IntCast(N/bs,dims + dim);CHKERRQ(ierr);

  maxDims[dim]   = dims[dim];
  chunkDims[dim] = dims[dim];
  ++dim;
  if (bs >= 1) {
    dims[dim]      = bs;
    maxDims[dim]   = dims[dim];
    chunkDims[dim] = dims[dim];
    ++dim;
  }
  filespace = H5Screate_simple(dim, dims, maxDims);
  if (filespace == -1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Cannot H5Screate_simple()");

#if defined(PETSC_USE_64BIT_INDICES)
  inttype = H5T_NATIVE_LLONG;
#else
  inttype = H5T_NATIVE_INT;
#endif

  /* Create the dataset with default properties and close filespace */
  ierr = PetscObjectGetName((PetscObject) is, &isname);CHKERRQ(ierr);
  if (!H5Lexists(group, isname, H5P_DEFAULT)) {
    /* Create chunk */
    chunkspace = H5Pcreate(H5P_DATASET_CREATE);
    if (chunkspace == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Pcreate()");
    status = H5Pset_chunk(chunkspace, dim, chunkDims);CHKERRQ(status);

#if (H5_VERS_MAJOR * 10000 + H5_VERS_MINOR * 100 + H5_VERS_RELEASE >= 10800)
    dset_id = H5Dcreate2(group, isname, inttype, filespace, H5P_DEFAULT, chunkspace, H5P_DEFAULT);
#else
    dset_id = H5Dcreate(group, isname, inttype, filespace, H5P_DEFAULT);
#endif
    if (dset_id == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Dcreate2()");
    status = H5Pclose(chunkspace);CHKERRQ(status);
  } else {
    dset_id = H5Dopen2(group, isname, H5P_DEFAULT);
    status  = H5Dset_extent(dset_id, dims);CHKERRQ(status);
  }
  status = H5Sclose(filespace);CHKERRQ(status);

  /* Each process defines a dataset and writes it to the hyperslab in the file */
  dim = 0;
  if (timestep >= 0) {
    count[dim] = 1;
    ++dim;
  }
  ierr = PetscHDF5IntCast(n/bs,count + dim);CHKERRQ(ierr);
  ++dim;
  if (bs >= 1) {
    count[dim] = bs;
    ++dim;
  }
  if (n > 0) {
    memspace = H5Screate_simple(dim, count, NULL);
    if (memspace == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Screate_simple()");
  } else {
    /* Can't create dataspace with zero for any dimension, so create null dataspace. */
    memspace = H5Screate(H5S_NULL);
    if (memspace == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Screate()");
  }

  /* Select hyperslab in the file */
  ierr = PetscLayoutGetRange(is->map, &low, NULL);CHKERRQ(ierr);
  dim  = 0;
  if (timestep >= 0) {
    offset[dim] = timestep;
    ++dim;
  }
  ierr = PetscHDF5IntCast(low/bs,offset + dim);CHKERRQ(ierr);
  ++dim;
  if (bs >= 1) {
    offset[dim] = 0;
    ++dim;
  }
  if (n > 0) {
    filespace = H5Dget_space(dset_id);
    if (filespace == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Dget_space()");
    status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);CHKERRQ(status);
  } else {
    /* Create null filespace to match null memspace. */
    filespace = H5Screate(H5S_NULL);
    if (filespace == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Screate(H5S_NULL)");
  }

  /* Create property list for collective dataset write */
  plist_id = H5Pcreate(H5P_DATASET_XFER);
  if (plist_id == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Pcreate()");
#if defined(PETSC_HAVE_H5PSET_FAPL_MPIO)
  status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);CHKERRQ(status);
#endif
  /* To write dataset independently use H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT) */

  ierr   = ISGetIndices(is, &ind);CHKERRQ(ierr);
  status = H5Dwrite(dset_id, inttype, memspace, filespace, plist_id, ind);CHKERRQ(status);
  status = H5Fflush(file_id, H5F_SCOPE_GLOBAL);CHKERRQ(status);
  ierr   = ISGetIndices(is, &ind);CHKERRQ(ierr);

  /* Close/release resources */
  if (group != file_id) {status = H5Gclose(group);CHKERRQ(status);}
  status = H5Pclose(plist_id);CHKERRQ(status);
  status = H5Sclose(filespace);CHKERRQ(status);
  status = H5Sclose(memspace);CHKERRQ(status);
  status = H5Dclose(dset_id);CHKERRQ(status);
  ierr = PetscInfo1(is, "Wrote IS object with name %s\n", isname);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Example #30
0
void
phdf5writeAll(char *filename)
{
    hid_t fid1;			/* HDF5 file IDs */
    hid_t acc_tpl1;		/* File access templates */
    hid_t xfer_plist;		/* Dataset transfer properties list */
    hid_t sid1;   		/* Dataspace ID */
    hid_t file_dataspace;	/* File dataspace ID */
    hid_t mem_dataspace;	/* memory dataspace ID */
    hid_t dataset1, dataset2;	/* Dataset ID */
    hsize_t dims1[SPACE1_RANK] =
	{SPACE1_DIM1,SPACE1_DIM2};	/* dataspace dim sizes */
    DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2];	/* data buffer */

    hsize_t start[SPACE1_RANK];			/* for hyperslab setting */
    hsize_t count[SPACE1_RANK], stride[SPACE1_RANK];	/* for hyperslab setting */

    herr_t ret;         	/* Generic return value */

    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;


		/* in support of H5Tuner Test */
		MPI_Comm comm_test = MPI_COMM_WORLD;
		MPI_Info info_test ;
		int i_test, nkeys_test, flag_test;
		char key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL+1];
		char *libtuner_file = getenv("LD_PRELOAD");
		/* in support of H5Tuner Test */

    if (verbose)
			printf("Collective write test on file %s\n", filename);

    /* -------------------
     * START AN HDF5 FILE
     * -------------------*/
    /* setup file access template with parallel IO access. */
    acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
    assert(acc_tpl1 != FAIL);
    MESG("H5Pcreate access succeed");
    /* set Parallel access with communicator */
    ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
    assert(ret != FAIL);
    MESG("H5Pset_fapl_mpio succeed");

    /* create the file collectively */
    fid1=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl1);
    assert(fid1 != FAIL);
    MESG("H5Fcreate succeed");

// ------------------------------------------------
// H5Tuner tests
// ------------------------------------------------

// Retrieve MPI parameters set via the H5Tuner
printf("\n\n--------------------------------------------------\n");
if ( (libtuner_file != NULL) && (strlen(libtuner_file) > 1) ){
	printf("Version of the H5Tuner loaded: \n%s\n", libtuner_file);
}
else {
	printf("No H5Tuner currently loaded.\n");
}
printf("--------------------------------------------------\n");


// Retrieve HDF5 Threshold and Alignment
hsize_t alignment[2];
size_t sieve_buf_size;
alignment[0]= 0; // threshold value
alignment[1]= 0; // alignment value
int ierr = H5Pget_alignment(acc_tpl1, &alignment[0], &alignment[1]);
printf("\n\n--------------------------------------------------\n");
printf("Testing values for Threshold and Alignment\n");
printf("--------------------------------------------------\n");
printf("Test value set to:88 \nRetrieved Threshold=%lu\n", alignment[0]);
printf("Test value set to:44 \nRetrieved Alignment=%lu\n", alignment[1]);
// Check Threshold
if ( alignment[0] == 88 ) {
	printf("PASSED: Threshold Test\n");
}
else {
	printf("FAILED: Threshold Test\n");
}
// Check Alignment
if ( alignment[1] == 44 ) {
	printf("PASSED: Alignment Test\n");
}
else {
	printf("FAILED: Alignment Test\n");
}
printf("--------------------------------------------------\n\n");

// Retrieve HDF5 sieve buffer size
ierr = H5Pget_sieve_buf_size(acc_tpl1, &sieve_buf_size);
printf("\n\n--------------------------------------------------\n");
printf("Testing values for Sieve Buffer Size\n");
printf("--------------------------------------------------\n");
printf("Test value set to:77 \nRetrieved Sieve Buffer Size=%lu\n", sieve_buf_size);
// Check sieve buffer size
if ( (int) sieve_buf_size == 77 ) {
	printf("PASSED: Sieve Buffer Size Test\n");
}
else {
	printf("FAILED: Sieve Buffer Size Test\n");
}
printf("--------------------------------------------------\n\n");

// Retrieve MPI parameters set via the H5Tuner
MPI_Info_create(&info_test);

ret = H5Pget_fapl_mpio(acc_tpl1, &comm_test, &info_test);
assert(ret != FAIL);
MESG("H5Pget_fapl_mpio succeed");


printf("-------------------------------------------------\n" );
printf("Testing parameters values via MPI_Info\n" );
printf("-------------------------------------------------\n" );
if(info_test == MPI_INFO_NULL) {
				printf("MPI info object is null. No keys are available.\n");
}
else {
	MPI_Info_get_nkeys(info_test, &nkeys_test);
	//printf("MPI info has %d keys\n", nkeys_test);
	if (nkeys_test <= 0) {
		printf("MPI info has no keys\n");
	}
	else {
		printf("MPI info has %d keys\n", nkeys_test);
		for ( i_test=0; i_test < nkeys_test; i_test++) {
			MPI_Info_get_nthkey( info_test, i_test, key );
			MPI_Info_get( info_test, key, MPI_MAX_INFO_VAL, value, &flag_test );
			printf( "Retrieved value for key %s is %s\n", key, value );
			//fflush(stdout);
		}
	}
	printf("-------------------------------------------------\n" );
	MPI_Info_free(&info_test);
}
// end of H5Tuner tests
// ---------------------------------------


    /* Release file-access template */
    ret=H5Pclose(acc_tpl1);
    assert(ret != FAIL);


    /* --------------------------
     * Define the dimensions of the overall datasets
     * and create the dataset
     * ------------------------- */
    /* setup dimensionality object */
    sid1 = H5Screate_simple (SPACE1_RANK, dims1, NULL);
    assert (sid1 != FAIL);
    MESG("H5Screate_simple succeed");


    /* create a dataset collectively */
    dataset1 = H5Dcreate2(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    assert(dataset1 != FAIL);
    MESG("H5Dcreate2 succeed");

    /* create another dataset collectively */
    dataset2 = H5Dcreate2(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    assert(dataset2 != FAIL);
    MESG("H5Dcreate2 2 succeed");

    /*
     * Set up dimensions of the slab this process accesses.
     */

    /* Dataset1: each process takes a block of rows. */
    slab_set(start, count, stride, BYROW);
		if (verbose)
    	printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
				(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill the local slab with some trivial data */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
			MESG("data_array created");
			dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* write data collectively */
    ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dwrite succeed");

    /* release all temporary handles. */
    /* Could have used them for dataset2 but it is cleaner */
    /* to create them again.*/
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);

    /* Dataset2: each process takes a block of columns. */
    slab_set(start, count, stride, BYCOL);
		if (verbose)
    	printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
				(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* put some trivial data in the data_array */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
			MESG("data_array created");
			dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill the local slab with some trivial data */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
			MESG("data_array created");
			dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* write data independently */
    ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dwrite succeed");

    /* release all temporary handles. */
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);


    /*
     * All writes completed.  Close datasets collectively
     */
    ret=H5Dclose(dataset1);
    assert(ret != FAIL);
    MESG("H5Dclose1 succeed");
    ret=H5Dclose(dataset2);
    assert(ret != FAIL);
    MESG("H5Dclose2 succeed");

    /* release all IDs created */
    H5Sclose(sid1);

    /* close the file collectively */
    H5Fclose(fid1);
}