Пример #1
0
    void DCAttribute::writeAttribute(const char* name, const hid_t type, hid_t parent,
                                     uint32_t ndims, const Dimensions dims, const void* src)
    throw (DCException)
    {
        hid_t attr = -1;
        if (H5Aexists(parent, name))
            attr = H5Aopen(parent, name, H5P_DEFAULT);
        else
        {
            hid_t dsp;
            if( ndims == 1 && dims.getScalarSize() == 1 )
                dsp = H5Screate(H5S_SCALAR);
            else
                dsp = H5Screate_simple( ndims, dims.getPointer(), dims.getPointer() );

            attr = H5Acreate(parent, name, type, dsp, H5P_DEFAULT, H5P_DEFAULT);
            H5Sclose(dsp);
        }

        if (attr < 0)
            throw DCException(getExceptionString(name, "Attribute could not be opened or created"));

        if (H5Awrite(attr, type, src) < 0)
        {
            H5Aclose(attr);
            throw DCException(getExceptionString(name, "Attribute could not be written"));
        }

        H5Aclose(attr);
    }
Пример #2
0
    void DCDataSet::write(Dimensions srcBuffer, Dimensions srcStride,
            Dimensions srcOffset, Dimensions srcData,
            Dimensions dstOffset, const void* data)
    throw (DCException)
    {
        log_msg(2, "DCDataSet::write (%s)", name.c_str());

        if (!opened)
            throw DCException(getExceptionString("write: Dataset has not been opened/created"));

        log_msg(3,
                " ndims = %llu\n"
                " logical_size = %s\n"
                " physical_size = %s\n"
                " src_buffer = %s\n"
                " src_stride = %s\n"
                " src_data = %s\n"
                " src_offset = %s\n"
                " dst_offset = %s\n",
                (long long unsigned) ndims,
                getLogicalSize().toString().c_str(),
                getPhysicalSize().toString().c_str(),
                srcBuffer.toString().c_str(),
                srcStride.toString().c_str(),
                srcData.toString().c_str(),
                srcOffset.toString().c_str(),
                dstOffset.toString().c_str());

        // swap dimensions if necessary
        srcBuffer.swapDims(ndims);
        srcStride.swapDims(ndims);
        srcData.swapDims(ndims);
        srcOffset.swapDims(ndims);
        dstOffset.swapDims(ndims);

        // dataspace to read from
        hid_t dsp_src;

        if (getLogicalSize().getScalarSize() != 0)
        {
            dsp_src = H5Screate_simple(ndims, srcBuffer.getPointer(), NULL);
            if (dsp_src < 0)
                throw DCException(getExceptionString("write: Failed to create source dataspace"));

            if (H5Sselect_hyperslab(dsp_src, H5S_SELECT_SET, srcOffset.getPointer(),
                    srcStride.getPointer(), srcData.getPointer(), NULL) < 0 ||
                    H5Sselect_valid(dsp_src) <= 0)
                throw DCException(getExceptionString("write: Invalid source hyperslap selection"));

            if (srcData.getScalarSize() == 0)
                H5Sselect_none(dsp_src);

            // dataspace to write to
            if (H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, dstOffset.getPointer(),
                    NULL, srcData.getPointer(), NULL) < 0 ||
                    H5Sselect_valid(dataspace) <= 0)
                throw DCException(getExceptionString("write: Invalid target hyperslap selection"));

            if (!data || (srcData.getScalarSize() == 0))
            {
                H5Sselect_none(dataspace);
                data = NULL;
            }

            // write data to the dataset

            if (H5Dwrite(dataset, this->datatype, dsp_src, dataspace, dsetWriteProperties, data) < 0)
                throw DCException(getExceptionString("write: Failed to write dataset"));

            H5Sclose(dsp_src);
        }
    }
Пример #3
0
    void DCDataSet::read(Dimensions dstBuffer,
            Dimensions dstOffset,
            Dimensions srcSize,
            Dimensions srcOffset,
            Dimensions& sizeRead,
            uint32_t& srcNDims,
            void* dst)
    throw (DCException)
    {
        log_msg(2, "DCDataSet::read (%s)", name.c_str());

        if (!opened)
            throw DCException(getExceptionString("read: Dataset has not been opened/created"));

        if (dstBuffer.getScalarSize() == 0)
            dstBuffer.set(srcSize);

        // dst buffer is allowed to be NULL
        // in this case, only the size of the dataset is returned
        // if the dataset is empty, return just its size as there is nothing to read
        if ((dst != NULL) && (getNDims() > 0))
        {
            log_msg(3,
                    " ndims = %llu\n"
                    " logical_size = %s\n"
                    " physical_size = %s\n"
                    " dstBuffer = %s\n"
                    " dstOffset = %s\n"
                    " srcSize = %s\n"
                    " srcOffset = %s\n",
                    (long long unsigned) ndims,
                    getLogicalSize().toString().c_str(),
                    getPhysicalSize().toString().c_str(),
                    dstBuffer.toString().c_str(),
                    dstOffset.toString().c_str(),
                    srcSize.toString().c_str(),
                    srcOffset.toString().c_str());

            dstBuffer.swapDims(ndims);
            dstOffset.swapDims(ndims);
            srcSize.swapDims(ndims);
            srcOffset.swapDims(ndims);

            hid_t dst_dataspace = H5Screate_simple(ndims, dstBuffer.getPointer(), NULL);
            if (dst_dataspace < 0)
                throw DCException(getExceptionString("read: Failed to create target dataspace"));

            if (H5Sselect_hyperslab(dst_dataspace, H5S_SELECT_SET, dstOffset.getPointer(), NULL,
                    srcSize.getPointer(), NULL) < 0 ||
                    H5Sselect_valid(dst_dataspace) <= 0)
                throw DCException(getExceptionString("read: Target dataspace hyperslab selection is not valid!"));

            if (H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, srcOffset.getPointer(), NULL,
                    srcSize.getPointer(), NULL) < 0 ||
                    H5Sselect_valid(dataspace) <= 0)
                throw DCException(getExceptionString("read: Source dataspace hyperslab selection is not valid!"));

            if (srcSize.getScalarSize() == 0)
                H5Sselect_none(dataspace);
            
            if (H5Dread(dataset, this->datatype, dst_dataspace, dataspace, dsetReadProperties, dst) < 0)
                throw DCException(getExceptionString("read: Failed to read dataset"));

            H5Sclose(dst_dataspace);

            srcSize.swapDims(ndims);
        }

        // swap dimensions if necessary
        sizeRead.set(srcSize);
        srcNDims = this->ndims;

        log_msg(3, " returns ndims = %llu", (long long unsigned) ndims);
        log_msg(3, " returns sizeRead = %s", sizeRead.toString().c_str());
    }
bool Parallel_SimpleDataTest::subtestFill(int32_t iteration,
        int currentMpiRank,
        const Dimensions mpiSize, const Dimensions mpiPos,
        uint32_t elements, MPI_Comm mpiComm)
{
    bool results_correct = true;
    DataCollector::FileCreationAttr fileCAttr;

#if defined TESTS_DEBUG
    if (currentMpiRank == 0)
        std::cout << "iteration: " << iteration << std::endl;
#endif

    // write data to file
    DataCollector::initFileCreationAttr(fileCAttr);
    fileCAttr.fileAccType = DataCollector::FAT_CREATE;
    fileCAttr.enableCompression = false;
    parallelDataCollector->open(HDF5_FILE, fileCAttr);

    int dataWrite = currentMpiRank + 1;
    uint32_t num_elements = (currentMpiRank + 1) * elements;
    Dimensions grid_size(num_elements, 1, 1);

#if defined TESTS_DEBUG
    std::cout << "[" << currentMpiRank << "] " << num_elements << " elements" << std::endl;
#endif

    Dimensions globalOffset, globalSize;
    parallelDataCollector->reserve(iteration, grid_size,
            &globalSize, &globalOffset, 1, ctInt, "reserved/reserved_data");

    int attrVal = currentMpiRank;
    parallelDataCollector->writeAttribute(iteration, ctInt, "reserved/reserved_data",
            "reserved_attr", &attrVal);

    uint32_t elements_written = 0;
    uint32_t global_max_elements = mpiSize.getScalarSize() * elements;
    for (size_t i = 0; i < global_max_elements; ++i)
    {
        Dimensions write_size(1, 1, 1);
        if (i >= num_elements)
            write_size.set(0, 0, 0);

        Dimensions write_offset(globalOffset + Dimensions(elements_written, 0, 0));

        parallelDataCollector->append(iteration, write_size, 1,
                write_offset, "reserved/reserved_data", &dataWrite);

        if (i < num_elements)
            elements_written++;
    }

    MPI_CHECK(MPI_Barrier(mpiComm));

    attrVal = -1;
    parallelDataCollector->readAttribute(iteration, "reserved/reserved_data",
            "reserved_attr", &attrVal, NULL);

    CPPUNIT_ASSERT(attrVal == currentMpiRank);

    parallelDataCollector->close();

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test written data using various mechanisms
    fileCAttr.fileAccType = DataCollector::FAT_READ;
    // need a complete filename here
    std::stringstream filename_stream;
    filename_stream << HDF5_FILE << "_" << iteration << ".h5";

    Dimensions size_read;
    Dimensions full_grid_size = globalSize;


    // test using SerialDataCollector
    if (currentMpiRank == 0)
    {
        int *data_read = new int[full_grid_size.getScalarSize()];
        memset(data_read, 0, sizeof (int) * full_grid_size.getScalarSize());

        DataCollector *dataCollector = new SerialDataCollector(1);
        dataCollector->open(filename_stream.str().c_str(), fileCAttr);

        dataCollector->read(iteration, "reserved/reserved_data",
                size_read, data_read);
        dataCollector->close();
        delete dataCollector;

        CPPUNIT_ASSERT(size_read == full_grid_size);
        CPPUNIT_ASSERT(size_read[1] == size_read[2] == 1);

        int this_rank = 0;
        uint32_t elements_this_rank = num_elements;
        for (uint32_t i = 0; i < size_read.getScalarSize(); ++i)
        {
            if (i == elements_this_rank)
            {
                this_rank++;
                elements_this_rank += num_elements * (this_rank + 1);
            }

            CPPUNIT_ASSERT(data_read[i] == this_rank + 1);
        }

        delete[] data_read;
    }

    MPI_CHECK(MPI_Barrier(mpiComm));

    return results_correct;
}
bool Parallel_SimpleDataTest::subtestWriteRead(int32_t iteration,
        int currentMpiRank,
        const Dimensions mpiSize, const Dimensions mpiPos,
        const Dimensions gridSize, uint32_t dimensions, MPI_Comm mpiComm)
{
    bool results_correct = true;
    DataCollector::FileCreationAttr fileCAttr;
    std::set<std::string> datasetNames;

#if defined TESTS_DEBUG
    if (currentMpiRank == 0)
        std::cout << "iteration: " << iteration << std::endl;
#endif

    size_t bufferSize = gridSize[0] * gridSize[1] * gridSize[2];

    // write data to file
    DataCollector::initFileCreationAttr(fileCAttr);
    fileCAttr.fileAccType = DataCollector::FAT_CREATE;
    fileCAttr.enableCompression = false;
    parallelDataCollector->open(HDF5_FILE, fileCAttr);

    int *dataWrite = new int[bufferSize];

    for (uint32_t i = 0; i < bufferSize; i++)
        dataWrite[i] = currentMpiRank + 1;

    parallelDataCollector->write(iteration, ctInt, dimensions, gridSize,
            "deep/folder/data", dataWrite);
    datasetNames.insert("deep/folder/data");
    parallelDataCollector->write(iteration, ctInt, dimensions, gridSize,
            "deep/folder/data2", dataWrite);
    datasetNames.insert("deep/folder/data2");
    parallelDataCollector->write(iteration, ctInt, dimensions, gridSize,
            "another_dataset", dataWrite);
    datasetNames.insert("another_dataset");
    parallelDataCollector->close();

    delete[] dataWrite;
    dataWrite = NULL;

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test written data using various mechanisms
    fileCAttr.fileAccType = DataCollector::FAT_READ;
    // need a complete filename here
    std::stringstream filename_stream;
    filename_stream << HDF5_FILE << "_" << iteration << ".h5";

    Dimensions size_read;
    Dimensions full_grid_size = gridSize * mpiSize;
    int *data_read = new int[full_grid_size.getScalarSize()];
    memset(data_read, 0, sizeof (int) * full_grid_size.getScalarSize());

    // test using SerialDataCollector
    if (currentMpiRank == 0)
    {
        DataCollector *dataCollector = new SerialDataCollector(1);
        dataCollector->open(filename_stream.str().c_str(), fileCAttr);

        dataCollector->read(iteration, "deep/folder/data", size_read, data_read);
        dataCollector->close();
        delete dataCollector;

        CPPUNIT_ASSERT(size_read == full_grid_size);
        CPPUNIT_ASSERT(testData(mpiSize, gridSize, data_read));
    }

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test using full read per process
    memset(data_read, 0, sizeof (int) * full_grid_size.getScalarSize());
    ParallelDataCollector *readCollector = new ParallelDataCollector(mpiComm,
            MPI_INFO_NULL, mpiSize, 1);

    readCollector->open(HDF5_FILE, fileCAttr);
    
    /* test entries listing */
    {
        DataCollector::DCEntry *entries = NULL;
        size_t numEntries = 0;

        int32_t *ids = NULL;
        size_t numIDs = 0;
        readCollector->getEntryIDs(NULL, &numIDs);
        /* there might be old files, but we are at least at the current iteration */
        CPPUNIT_ASSERT(numIDs >= iteration + 1);
        ids = new int32_t[numIDs];
        readCollector->getEntryIDs(ids, NULL);

        readCollector->getEntriesForID(iteration, NULL, &numEntries);
        CPPUNIT_ASSERT(numEntries == 3);
        entries = new DataCollector::DCEntry[numEntries];
        readCollector->getEntriesForID(iteration, entries, NULL);

        CPPUNIT_ASSERT(numEntries == datasetNames.size());
        for (uint32_t i = 0; i < numEntries; ++i)
        {
            /* test that listed datasets match expected dataset names*/
            CPPUNIT_ASSERT(datasetNames.find(entries[i].name) != datasetNames.end());
        }

        delete[] entries;
        delete[] ids;
    }
    
    readCollector->read(iteration, "deep/folder/data", size_read, data_read);
    readCollector->close();

    CPPUNIT_ASSERT(size_read == full_grid_size);
    CPPUNIT_ASSERT(testData(mpiSize, gridSize, data_read));
    delete[] data_read;

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test using parallel read
    data_read = new int[gridSize.getScalarSize()];
    memset(data_read, 0, sizeof (int) * gridSize.getScalarSize());

    const Dimensions globalOffset = gridSize * mpiPos;
    readCollector->open(HDF5_FILE, fileCAttr);
    readCollector->read(iteration, gridSize, globalOffset, "deep/folder/data",
            size_read, data_read);
    readCollector->close();
    delete readCollector;

    CPPUNIT_ASSERT(size_read == gridSize);

    for (size_t k = 0; k < gridSize[2]; ++k)
    {
        for (size_t j = 0; j < gridSize[1]; ++j)
        {
            for (size_t i = 0; i < gridSize[0]; ++i)
            {
                size_t index = k * gridSize[1] * gridSize[0] +
                        j * gridSize[0] + i;

                if (data_read[index] != currentMpiRank + 1)
                {
#if defined TESTS_DEBUG
                    std::cout << index << ": " << data_read[index] <<
                            " != expected " << currentMpiRank + 1 << std::endl;
#endif
                    results_correct = false;
                    break;
                }
            }
            if (!results_correct)
                break;
        }
        if (!results_correct)
            break;
    }

    delete[] data_read;

    MPI_CHECK(MPI_Barrier(mpiComm));

    return results_correct;
}