int main(int argc, char **argv) { MPI_Init(&argc, &argv); if (argc < 2) { std::cout << "Usage: " << argv[0] << " <libsplash-file-base>" << std::endl; return -1; } int mpi_rank, mpi_size; int files_start = 0, files_end = 0; Dimensions file_mpi_size; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); // libSplash filename std::string filename; filename.assign(argv[1]); // create DomainCollector // read single file, not merged DomainCollector dc(100); DataCollector::FileCreationAttr fAttr; DataCollector::initFileCreationAttr(fAttr); fAttr.fileAccType = DataCollector::FAT_READ; // broadcast file MPI size from root to all processes uint64_t f_mpi_size[3]; if (mpi_rank == 0) { fAttr.mpiPosition.set(0, 0, 0); dc.open(filename.c_str(), fAttr); dc.getMPISize(file_mpi_size); std::cout << mpi_rank << ": total file MPI size = " << file_mpi_size.toString() << std::endl; for (int i = 0; i < 3; ++i) f_mpi_size[i] = file_mpi_size[i]; dc.close(); } MPI_Bcast(f_mpi_size, 3, MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD); file_mpi_size.set(f_mpi_size[0], f_mpi_size[1], f_mpi_size[2]); // get number of files for this MPI process filesToProcesses(mpi_size, mpi_rank, file_mpi_size.getScalarSize(), files_start, files_end); for (int f = files_start; f <= files_end; ++f) { // get file MPI pos for this file index indexToPos(f, file_mpi_size, fAttr.mpiPosition); std::cout << mpi_rank << ": opening position " << fAttr.mpiPosition.toString() << std::endl; dc.open(filename.c_str(), fAttr); // get number of entries int32_t *ids = NULL; size_t num_ids = 0; dc.getEntryIDs(NULL, &num_ids); if (num_ids == 0) { dc.close(); continue; } else { ids = new int32_t[num_ids]; dc.getEntryIDs(ids, &num_ids); } // get entries for 1. id (iteration) DataCollector::DCEntry *entries = NULL; size_t num_entries = 0; dc.getEntriesForID(ids[0], NULL, &num_entries); if (num_entries == 0) { delete[] ids; dc.close(); continue; } else { entries = new DataCollector::DCEntry[num_entries]; dc.getEntriesForID(ids[0], entries, &num_entries); } // read 1. entry DataCollector::DCEntry first_entry = entries[0]; std::cout << " " << mpi_rank << ": reading entry " << first_entry.name << std::endl; // read complete domain Domain domain = dc.getGlobalDomain(ids[0], first_entry.name.c_str()); DomainCollector::DomDataClass dataClass = DomainCollector::UndefinedType; DataContainer* container = dc.readDomain(ids[0], first_entry.name.c_str(), domain, &dataClass, false); // access all elements, no matter how many subdomains for (size_t i = 0; i < container->getNumElements(); ++i) { void *element = container->getElement(i); // do anything with this element // ... } // POLY data might be distributed over multiple subdomains for (size_t d = 0; d < container->getNumSubdomains(); ++d) { DomainData* subdomain = container->getIndex(d); Dimensions size = subdomain->getSize(); std::cout << " " << mpi_rank << ": subdomain " << d << " has size " << size.toString() << std::endl; // access the underlying buffer of a subdomain void *elements = subdomain->getData(); } // don't forget to delete the container allocated by DomainCollector delete container; delete[] entries; delete[] ids; dc.close(); } MPI_Finalize(); return 0; }
void DomainsTest::testAppendDomains() { int mpi_rank = totalMpiRank; if (mpi_rank == 0) { Dimensions mpi_size(1, 1, 1); Dimensions mpi_position(0, 0, 0); Dimensions grid_size(12, 40, 7); uint32_t elements = 100; DataCollector::FileCreationAttr fattr; fattr.fileAccType = DataCollector::FAT_CREATE; fattr.mpiSize.set(mpi_size); fattr.mpiPosition.set(mpi_position); dataCollector->open(hdf5_file_append, fattr); #if defined TESTS_DEBUG std::cout << "writing..." << std::endl; std::cout << "mpi_position = " << mpi_position.toString() << std::endl; #endif float *data_write = new float[elements]; for (size_t i = 0; i < elements; ++i) data_write[i] = (float) i; dataCollector->appendDomain(0, ctFloat, 10, 0, 1, "append_data", Dimensions(0, 0, 0), grid_size, data_write); dataCollector->appendDomain(0, ctFloat, elements - 10, 10, 1, "append_data", Dimensions(0, 0, 0), grid_size, data_write); dataCollector->close(); delete[] data_write; data_write = NULL; // now read and test domain subdomain fattr.fileAccType = DataCollector::FAT_READ_MERGED; fattr.mpiSize.set(mpi_size); fattr.mpiPosition.set(mpi_position); dataCollector->open(hdf5_file_append, fattr); // read data container IDomainCollector::DomDataClass data_class = IDomainCollector::UndefinedType; DataContainer *container = dataCollector->readDomain(0, "append_data", Dimensions(0, 0, 0), grid_size, &data_class); #if defined TESTS_DEBUG std::cout << "container->getNumSubdomains() = " << container->getNumSubdomains() << std::endl; #endif // check the container CPPUNIT_ASSERT(data_class == IDomainCollector::PolyType); CPPUNIT_ASSERT(container->getNumSubdomains() == 1); DomainData *subdomain = container->getIndex(0); CPPUNIT_ASSERT(subdomain != NULL); CPPUNIT_ASSERT(subdomain->getData() != NULL); Dimensions subdomain_elements = subdomain->getElements(); #if defined TESTS_DEBUG std::cout << "subdomain->getElements() = " << subdomain->getElements().toString() << std::endl; std::cout << "subdomain->getSize() = " << subdomain->getSize().toString() << std::endl; #endif float *subdomain_data = (float*) (subdomain->getData()); CPPUNIT_ASSERT(subdomain_elements.getDimSize() == elements && subdomain_elements[0] == elements); for (int j = 0; j < elements; ++j) { CPPUNIT_ASSERT(subdomain_data[j] == (float) j); } delete container; container = NULL; dataCollector->close(); } MPI_Barrier(MPI_COMM_WORLD); }
int main(int argc, char **argv) { if (argc < 2) { std::cout << "Usage: " << argv[0] << " <libsplash-file-base>" << std::endl; return -1; } // libSplash filename std::string filename; filename.assign(argv[1]); // create DomainCollector DomainCollector dc(100); DataCollector::FileCreationAttr fAttr; DataCollector::initFileCreationAttr(fAttr); fAttr.fileAccType = DataCollector::FAT_READ_MERGED; dc.open(filename.c_str(), fAttr); // get number of entries int32_t *ids = NULL; size_t num_ids = 0; dc.getEntryIDs(NULL, &num_ids); if (num_ids == 0) { dc.close(); return 1; } else { ids = new int32_t[num_ids]; dc.getEntryIDs(ids, &num_ids); } // get entries for 1. id (iteration) std::cout << "reading from iteration " << ids[0] << std::endl; DataCollector::DCEntry *entries = NULL; size_t num_entries = 0; dc.getEntriesForID(ids[0], NULL, &num_entries); if (num_entries == 0) { delete[] ids; dc.close(); return 1; } else { entries = new DataCollector::DCEntry[num_entries]; dc.getEntriesForID(ids[0], entries, &num_entries); } // read 1. entry from this iteration DataCollector::DCEntry first_entry = entries[0]; std::cout << "reading entry " << first_entry.name << std::endl; // read complete domain Domain domain = dc.getGlobalDomain(ids[0], first_entry.name.c_str()); DomainCollector::DomDataClass dataClass = DomainCollector::UndefinedType; DataContainer* container = dc.readDomain(ids[0], first_entry.name.c_str(), domain, &dataClass, false); // access all elements, no matter how many subdomains for (size_t i = 0; i < container->getNumElements(); ++i) { void *element = container->getElement(i); // do anything with this element // ... } // POLY data might be distributed over multiple subdomains for (size_t d = 0; d < container->getNumSubdomains(); ++d) { DomainData* subdomain = container->getIndex(d); Dimensions size = subdomain->getSize(); std::cout << "subdomain " << d << " has size " << size.toString() << std::endl; // access the underlying buffer of a subdomain void *elements = subdomain->getData(); } // don't forget to delete the container allocated by DomainCollector delete container; delete[] entries; delete[] ids; dc.close(); return 0; }
void DomainsTest::subTestPolyDomains(const Dimensions mpiSize, uint32_t numElements, uint32_t iteration) { Dimensions grid_size(20, 10, 5); Dimensions global_grid_size = grid_size * mpiSize; int max_rank = 1; for (int i = 0; i < 3; i++) max_rank *= mpiSize[i]; int mpi_rank = totalMpiRank; size_t mpi_elements = numElements * (mpi_rank + 1); if (mpi_rank < max_rank) { // write data float *data_write = new float[mpi_elements]; // initialize data for writing with mpi index (mpi rank) Dimensions mpi_position(mpi_rank % mpiSize[0], (mpi_rank / mpiSize[0]) % mpiSize[1], (mpi_rank / mpiSize[0]) / mpiSize[1]); for (size_t i = 0; i < mpi_elements; ++i) data_write[i] = (float) mpi_rank; DataCollector::FileCreationAttr fattr; fattr.fileAccType = DataCollector::FAT_CREATE; fattr.mpiSize.set(mpiSize); fattr.mpiPosition.set(mpi_position); dataCollector->open(hdf5_file_poly, fattr); Dimensions domain_offset = mpi_position * grid_size; #if defined TESTS_DEBUG std::cout << "writing..." << std::endl; std::cout << "mpi_position = " << mpi_position.toString() << std::endl; std::cout << "domain_offset = " << domain_offset.toString() << std::endl; #endif dataCollector->writeDomain(iteration, ctFloat, 1, Dimensions(mpi_elements, 1, 1), "poly_data", domain_offset, grid_size, DomainCollector::PolyType, data_write); dataCollector->close(); delete[] data_write; data_write = NULL; } MPI_Barrier(MPI_COMM_WORLD); if (mpi_rank == 0) { // now read and test domain subdomains DataCollector::FileCreationAttr fattr; fattr.fileAccType = DataCollector::FAT_READ_MERGED; fattr.mpiSize = mpiSize; dataCollector->open(hdf5_file_poly, fattr); size_t global_domain_elements = dataCollector->getTotalElements(iteration, "poly_data"); size_t global_num_elements = 0; for (int i = 0; i < mpiSize.getDimSize(); ++i) global_num_elements += numElements * (i + 1); #if defined TESTS_DEBUG std::cout << "global_domain_elements = " << global_domain_elements << std::endl; std::cout << "global_num_elements = " << global_num_elements << std::endl; #endif CPPUNIT_ASSERT(global_domain_elements == global_num_elements); // test different domain offsets for (uint32_t i = 0; i < 5; ++i) { Dimensions offset(rand() % global_grid_size[0], rand() % global_grid_size[1], rand() % global_grid_size[2]); Dimensions partition_size = global_grid_size - offset; #if defined TESTS_DEBUG std::cout << "offset = " << offset.toString() << std::endl; std::cout << "partition_size = " << partition_size.toString() << std::endl; #endif IDomainCollector::DomDataClass data_class = IDomainCollector::UndefinedType; // read data container DataContainer *container = dataCollector->readDomain(iteration, "poly_data", offset, partition_size, &data_class); #if defined TESTS_DEBUG std::cout << "container->getNumSubdomains() = " << container->getNumSubdomains() << std::endl; #endif // check the container CPPUNIT_ASSERT(data_class == IDomainCollector::PolyType); CPPUNIT_ASSERT(container->getNumSubdomains() >= 1); // check all DomainData entries in the container for (int i = 0; i < container->getNumSubdomains(); ++i) { DomainData *subdomain = container->getIndex(i); CPPUNIT_ASSERT(subdomain != NULL); CPPUNIT_ASSERT(subdomain->getData() != NULL); Dimensions subdomain_elements = subdomain->getElements(); Dimensions subdomain_start = subdomain->getStart(); #if defined TESTS_DEBUG std::cout << "subdomain->getElements() = " << subdomain->getElements().toString() << std::endl; std::cout << "subdomain->getSize() = " << subdomain->getSize().toString() << std::endl; #endif float *subdomain_data = (float*) (subdomain->getData()); CPPUNIT_ASSERT(subdomain_elements.getDimSize() != 0); // Find out the expected value (original mpi rank) // for this subdomain. Dimensions subdomain_mpi_pos = subdomain_start / grid_size; int subdomain_mpi_rank = subdomain_mpi_pos[2] * mpiSize[1] * mpiSize[0] + subdomain_mpi_pos[1] * mpiSize[0] + subdomain_mpi_pos[0]; CPPUNIT_ASSERT(subdomain_elements.getDimSize() == numElements * (subdomain_mpi_rank + 1)); for (int j = 0; j < subdomain_elements.getDimSize(); ++j) { #if defined TESTS_DEBUG std::cout << "j = " << j << ", subdomain_data[j]) = " << subdomain_data[j] << ", subdomain_mpi_rank = " << subdomain_mpi_rank << std::endl; #endif CPPUNIT_ASSERT(subdomain_data[j] == (float) subdomain_mpi_rank); } } delete container; container = NULL; } dataCollector->close(); } MPI_Barrier(MPI_COMM_WORLD); }