示例#1
0
int detectFileMPISize(Options& options, Dimensions &fileMPISizeDim)
{
    int result = RESULT_OK;

    DataCollector *dc = NULL;
#if (SPLASH_SUPPORTED_PARALLEL==1)
    if (options.parallelFile)
        dc = new ParallelDataCollector(MPI_COMM_WORLD, MPI_INFO_NULL,
            Dimensions(options.mpiSize, 1, 1), 1);
    else
#endif
        dc = new SerialDataCollector(1);

    DataCollector::FileCreationAttr fileCAttr;
    DataCollector::initFileCreationAttr(fileCAttr);
    fileCAttr.fileAccType = DataCollector::FAT_READ;

    try
    {
        dc->open(options.filename.c_str(), fileCAttr);
        dc->getMPISize(fileMPISizeDim);
        dc->close();
    } catch (DCException e)
    {
        std::cerr << "[0] Detecting file MPI size failed!" << std::endl <<
                e.what() << std::endl;
        fileMPISizeDim.set(0, 0, 0);
        result = RESULT_ERROR;
    }

    delete dc;
    dc = NULL;

    return result;
}
bool Parallel_SimpleDataTest::subtestFill(int32_t iteration,
        int currentMpiRank,
        const Dimensions mpiSize, const Dimensions mpiPos,
        uint32_t elements, MPI_Comm mpiComm)
{
    bool results_correct = true;
    DataCollector::FileCreationAttr fileCAttr;

#if defined TESTS_DEBUG
    if (currentMpiRank == 0)
        std::cout << "iteration: " << iteration << std::endl;
#endif

    // write data to file
    DataCollector::initFileCreationAttr(fileCAttr);
    fileCAttr.fileAccType = DataCollector::FAT_CREATE;
    fileCAttr.enableCompression = false;
    parallelDataCollector->open(HDF5_FILE, fileCAttr);

    int dataWrite = currentMpiRank + 1;
    uint32_t num_elements = (currentMpiRank + 1) * elements;
    Dimensions grid_size(num_elements, 1, 1);

#if defined TESTS_DEBUG
    std::cout << "[" << currentMpiRank << "] " << num_elements << " elements" << std::endl;
#endif

    Dimensions globalOffset, globalSize;
    parallelDataCollector->reserve(iteration, grid_size,
            &globalSize, &globalOffset, 1, ctInt, "reserved/reserved_data");

    int attrVal = currentMpiRank;
    parallelDataCollector->writeAttribute(iteration, ctInt, "reserved/reserved_data",
            "reserved_attr", &attrVal);

    uint32_t elements_written = 0;
    uint32_t global_max_elements = mpiSize.getScalarSize() * elements;
    for (size_t i = 0; i < global_max_elements; ++i)
    {
        Dimensions write_size(1, 1, 1);
        if (i >= num_elements)
            write_size.set(0, 0, 0);

        Dimensions write_offset(globalOffset + Dimensions(elements_written, 0, 0));

        parallelDataCollector->append(iteration, write_size, 1,
                write_offset, "reserved/reserved_data", &dataWrite);

        if (i < num_elements)
            elements_written++;
    }

    MPI_CHECK(MPI_Barrier(mpiComm));

    attrVal = -1;
    parallelDataCollector->readAttribute(iteration, "reserved/reserved_data",
            "reserved_attr", &attrVal, NULL);

    CPPUNIT_ASSERT(attrVal == currentMpiRank);

    parallelDataCollector->close();

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test written data using various mechanisms
    fileCAttr.fileAccType = DataCollector::FAT_READ;
    // need a complete filename here
    std::stringstream filename_stream;
    filename_stream << HDF5_FILE << "_" << iteration << ".h5";

    Dimensions size_read;
    Dimensions full_grid_size = globalSize;


    // test using SerialDataCollector
    if (currentMpiRank == 0)
    {
        int *data_read = new int[full_grid_size.getScalarSize()];
        memset(data_read, 0, sizeof (int) * full_grid_size.getScalarSize());

        DataCollector *dataCollector = new SerialDataCollector(1);
        dataCollector->open(filename_stream.str().c_str(), fileCAttr);

        dataCollector->read(iteration, "reserved/reserved_data",
                size_read, data_read);
        dataCollector->close();
        delete dataCollector;

        CPPUNIT_ASSERT(size_read == full_grid_size);
        CPPUNIT_ASSERT(size_read[1] == size_read[2] == 1);

        int this_rank = 0;
        uint32_t elements_this_rank = num_elements;
        for (uint32_t i = 0; i < size_read.getScalarSize(); ++i)
        {
            if (i == elements_this_rank)
            {
                this_rank++;
                elements_this_rank += num_elements * (this_rank + 1);
            }

            CPPUNIT_ASSERT(data_read[i] == this_rank + 1);
        }

        delete[] data_read;
    }

    MPI_CHECK(MPI_Barrier(mpiComm));

    return results_correct;
}
bool Parallel_SimpleDataTest::subtestWriteRead(int32_t iteration,
        int currentMpiRank,
        const Dimensions mpiSize, const Dimensions mpiPos,
        const Dimensions gridSize, uint32_t dimensions, MPI_Comm mpiComm)
{
    bool results_correct = true;
    DataCollector::FileCreationAttr fileCAttr;
    std::set<std::string> datasetNames;

#if defined TESTS_DEBUG
    if (currentMpiRank == 0)
        std::cout << "iteration: " << iteration << std::endl;
#endif

    size_t bufferSize = gridSize[0] * gridSize[1] * gridSize[2];

    // write data to file
    DataCollector::initFileCreationAttr(fileCAttr);
    fileCAttr.fileAccType = DataCollector::FAT_CREATE;
    fileCAttr.enableCompression = false;
    parallelDataCollector->open(HDF5_FILE, fileCAttr);

    int *dataWrite = new int[bufferSize];

    for (uint32_t i = 0; i < bufferSize; i++)
        dataWrite[i] = currentMpiRank + 1;

    parallelDataCollector->write(iteration, ctInt, dimensions, gridSize,
            "deep/folder/data", dataWrite);
    datasetNames.insert("deep/folder/data");
    parallelDataCollector->write(iteration, ctInt, dimensions, gridSize,
            "deep/folder/data2", dataWrite);
    datasetNames.insert("deep/folder/data2");
    parallelDataCollector->write(iteration, ctInt, dimensions, gridSize,
            "another_dataset", dataWrite);
    datasetNames.insert("another_dataset");
    parallelDataCollector->close();

    delete[] dataWrite;
    dataWrite = NULL;

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test written data using various mechanisms
    fileCAttr.fileAccType = DataCollector::FAT_READ;
    // need a complete filename here
    std::stringstream filename_stream;
    filename_stream << HDF5_FILE << "_" << iteration << ".h5";

    Dimensions size_read;
    Dimensions full_grid_size = gridSize * mpiSize;
    int *data_read = new int[full_grid_size.getScalarSize()];
    memset(data_read, 0, sizeof (int) * full_grid_size.getScalarSize());

    // test using SerialDataCollector
    if (currentMpiRank == 0)
    {
        DataCollector *dataCollector = new SerialDataCollector(1);
        dataCollector->open(filename_stream.str().c_str(), fileCAttr);

        dataCollector->read(iteration, "deep/folder/data", size_read, data_read);
        dataCollector->close();
        delete dataCollector;

        CPPUNIT_ASSERT(size_read == full_grid_size);
        CPPUNIT_ASSERT(testData(mpiSize, gridSize, data_read));
    }

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test using full read per process
    memset(data_read, 0, sizeof (int) * full_grid_size.getScalarSize());
    ParallelDataCollector *readCollector = new ParallelDataCollector(mpiComm,
            MPI_INFO_NULL, mpiSize, 1);

    readCollector->open(HDF5_FILE, fileCAttr);
    
    /* test entries listing */
    {
        DataCollector::DCEntry *entries = NULL;
        size_t numEntries = 0;

        int32_t *ids = NULL;
        size_t numIDs = 0;
        readCollector->getEntryIDs(NULL, &numIDs);
        /* there might be old files, but we are at least at the current iteration */
        CPPUNIT_ASSERT(numIDs >= iteration + 1);
        ids = new int32_t[numIDs];
        readCollector->getEntryIDs(ids, NULL);

        readCollector->getEntriesForID(iteration, NULL, &numEntries);
        CPPUNIT_ASSERT(numEntries == 3);
        entries = new DataCollector::DCEntry[numEntries];
        readCollector->getEntriesForID(iteration, entries, NULL);

        CPPUNIT_ASSERT(numEntries == datasetNames.size());
        for (uint32_t i = 0; i < numEntries; ++i)
        {
            /* test that listed datasets match expected dataset names*/
            CPPUNIT_ASSERT(datasetNames.find(entries[i].name) != datasetNames.end());
        }

        delete[] entries;
        delete[] ids;
    }
    
    readCollector->read(iteration, "deep/folder/data", size_read, data_read);
    readCollector->close();

    CPPUNIT_ASSERT(size_read == full_grid_size);
    CPPUNIT_ASSERT(testData(mpiSize, gridSize, data_read));
    delete[] data_read;

    MPI_CHECK(MPI_Barrier(mpiComm));

    // test using parallel read
    data_read = new int[gridSize.getScalarSize()];
    memset(data_read, 0, sizeof (int) * gridSize.getScalarSize());

    const Dimensions globalOffset = gridSize * mpiPos;
    readCollector->open(HDF5_FILE, fileCAttr);
    readCollector->read(iteration, gridSize, globalOffset, "deep/folder/data",
            size_read, data_read);
    readCollector->close();
    delete readCollector;

    CPPUNIT_ASSERT(size_read == gridSize);

    for (size_t k = 0; k < gridSize[2]; ++k)
    {
        for (size_t j = 0; j < gridSize[1]; ++j)
        {
            for (size_t i = 0; i < gridSize[0]; ++i)
            {
                size_t index = k * gridSize[1] * gridSize[0] +
                        j * gridSize[0] + i;

                if (data_read[index] != currentMpiRank + 1)
                {
#if defined TESTS_DEBUG
                    std::cout << index << ": " << data_read[index] <<
                            " != expected " << currentMpiRank + 1 << std::endl;
#endif
                    results_correct = false;
                    break;
                }
            }
            if (!results_correct)
                break;
        }
        if (!results_correct)
            break;
    }

    delete[] data_read;

    MPI_CHECK(MPI_Barrier(mpiComm));

    return results_correct;
}
void FilenameTest::runTest(const char* filename, const char* fullFilename)
{
    CPPUNIT_ASSERT(!fileExists(fullFilename));

    DataCollector::FileCreationAttr attr;
    DataCollector::initFileCreationAttr(attr);
    attr.fileAccType = DataCollector::FAT_WRITE;

    // write first dataset to file (create file)
    dataCollector->open(filename, attr);
    int data1 = rand();

    dataCollector->write(1, ctInt, 1, Selection(Dimensions(1, 1, 1)), "data", &data1);
    dataCollector->close();
    // Now file must exist
    CPPUNIT_ASSERT(fileExists(fullFilename));

    // write second dataset to file (write to existing file of same name
    dataCollector->open(filename, attr);
    int data2 = rand();

    dataCollector->write(2, ctInt, 1, Selection(Dimensions(1, 1, 1)), "data", &data2);
    dataCollector->close();


    // read data from file
    attr.fileAccType = DataCollector::FAT_READ;
    Dimensions data_size;

    int data = -1;
    dataCollector->open(filename, attr);

    CPPUNIT_ASSERT(dataCollector->getMaxID() == 2);

    dataCollector->read(1, "data", data_size, &data);

    CPPUNIT_ASSERT(data_size.getScalarSize() == 1);
    CPPUNIT_ASSERT(data == data1);

    dataCollector->read(2, "data", data_size, &data);

    CPPUNIT_ASSERT(data_size.getScalarSize() == 1);
    CPPUNIT_ASSERT(data == data2);

    dataCollector->close();

    // erase file
    attr.fileAccType = DataCollector::FAT_CREATE;
    dataCollector->open(filename, attr);

    CPPUNIT_ASSERT_THROW(dataCollector->read(1, "data", data_size, &data), DCException);
    int data3 = rand();
    dataCollector->write(2, ctInt, 1, Selection(Dimensions(1, 1, 1)), "data", &data3);
    dataCollector->close();

    // Read from created file
    attr.fileAccType = DataCollector::FAT_READ;

    data = -1;
    dataCollector->open(filename, attr);

    CPPUNIT_ASSERT(dataCollector->getMaxID() == 2);

    dataCollector->read(2, "data", data_size, &data);

    CPPUNIT_ASSERT(data_size.getScalarSize() == 1);
    CPPUNIT_ASSERT(data == data3);
    dataCollector->close();
}