void BufferedHDF2DArray<T>::Create(H5::CommonFG *_container, string _datasetName, unsigned int _rowLength) {
    container   = _container;
    datasetName = _datasetName;
    rowLength   = (unsigned int)_rowLength;
    //
    // Make life easy if the buffer is too small to fit a row --
    // resize it so that rows may be copied and written out in an
    // atomic unit.
    //
    if (this->bufferSize < rowLength) {
        // When the buffer size is greater than 0, the write buffer
        // should exist.
        if (this->bufferSize > 0) {
            assert(this->writeBuffer != NULL);
            delete[] this->writeBuffer;
        }
        this->writeBuffer = new T[rowLength];
        this->bufferSize = rowLength;
    }

    hsize_t dataSize[2]    = {0, hsize_t(rowLength)};
    hsize_t maxDataSize[2] = {H5S_UNLIMITED, hsize_t(rowLength)};
    H5::DataSpace fileSpace(2, dataSize, maxDataSize);
    H5::DSetCreatPropList cparms;

    /*
     * For some reason, chunking must be enabled when creating a dataset
     * that  has an unlimited dimension.  Of course, this is not
     * mentioned in the hdf5 c++ documentation, because that
     * docuemntation was written for people who enjoy learning how to
     * use an API by reading comments in source code.
     */
    hsize_t chunkDims[2] = {16384, hsize_t(rowLength)};
    cparms.setChunk( 2, chunkDims );
    TypedCreate(fileSpace, cparms);
    fileSpace.close();

    //
    // Set some flags that indicate this dataset is ready for writing.
    //
    fileDataSpaceInitialized = true;
    isInitialized = true;
}
Exemplo n.º 2
0
 //
 // This handles creation of all non-std::string types.  A specialization
 // for std::strings is provided below.
 //
 void Create(H5::H5Location &object, const std::string & atomName) {
     hsize_t defaultDims[] = {1};
     H5::DataSpace defaultDataSpace(1, defaultDims);
     TypedCreate(object, atomName, defaultDataSpace);
 }