// Test a full buffer TEST(FTDCFileManagerTest, TestFull) { Client* client = &cc(); FTDCConfig c; c.maxFileSizeBytes = 300; c.maxDirectorySizeBytes = 1000; c.maxSamplesPerInterimMetricChunk = 1; unittest::TempDir tempdir("metrics_testpath"); boost::filesystem::path dir(tempdir.path()); createDirectoryClean(dir); FTDCCollectorCollection rotate; auto swMgr = FTDCFileManager::create(&c, dir, &rotate, client); ASSERT_OK(swMgr.getStatus()); auto mgr = std::move(swMgr.getValue()); // Test a large numbers of zeros, and incremental numbers in a full buffer for (int j = 0; j < 10; j++) { ASSERT_OK( mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 3230792343LL << "key2" << 235135))); for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) { ASSERT_OK(mgr->writeSampleAndRotateIfNeeded( client, BSON("name" << "joe" << "key1" << static_cast<long long int>(i * j * 37) << "key2" << static_cast<long long int>(i * (645 << j))))); } ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45))); // Add Value ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45))); } mgr->close(); auto files = scanDirectory(dir); int sum = 0; for (auto& file : files) { int fs = boost::filesystem::file_size(file); ASSERT_TRUE(fs < c.maxFileSizeBytes * 1.10); if (file.generic_string().find("interim") == std::string::npos) { sum += fs; } } ASSERT_TRUE(sum < c.maxDirectorySizeBytes * 1.10 && sum > c.maxDirectorySizeBytes * 0.90); }
// Test a normal restart TEST(FTDCFileManagerTest, TestNormalRestart) { Client* client = &cc(); FTDCConfig c; c.maxFileSizeBytes = 1000; c.maxDirectorySizeBytes = 3000; unittest::TempDir tempdir("metrics_testpath"); boost::filesystem::path dir(tempdir.path()); createDirectoryClean(dir); for (int i = 0; i < 3; i++) { // Do a few cases of stop and start to ensure it works as expected FTDCCollectorCollection rotate; auto swMgr = FTDCFileManager::create(&c, dir, &rotate, client); ASSERT_OK(swMgr.getStatus()); auto mgr = std::move(swMgr.getValue()); // Test a large numbers of zeros, and incremental numbers in a full buffer for (int j = 0; j < 4; j++) { ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 3230792343LL << "key2" << 235135), Date_t())); for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) { ASSERT_OK( mgr->writeSampleAndRotateIfNeeded( client, BSON("name" << "joe" << "key1" << static_cast<long long int>(i * j * 37) << "key2" << static_cast<long long int>(i * (645 << j))), Date_t())); } ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45), Date_t())); // Add Value ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45), Date_t())); } mgr->close(); // Validate the interim file does not have data ValidateInterimFileHasData(dir, false); } }
// Test a run of the controller and the data it logs to log file TEST(FTDCControllerTest, TestFull) { unittest::TempDir tempdir("metrics_testpath"); boost::filesystem::path dir(tempdir.path()); createDirectoryClean(dir); FTDCConfig config; config.enabled = true; config.period = Milliseconds(1); config.maxFileSizeBytes = FTDCConfig::kMaxFileSizeBytesDefault; config.maxDirectorySizeBytes = FTDCConfig::kMaxDirectorySizeBytesDefault; FTDCController c(dir, config); auto c1 = std::unique_ptr<FTDCMetricsCollectorMock2>(new FTDCMetricsCollectorMock2()); auto c2 = std::unique_ptr<FTDCMetricsCollectorMockRotate>(new FTDCMetricsCollectorMockRotate()); auto c1Ptr = c1.get(); auto c2Ptr = c2.get(); c1Ptr->setSignalOnCount(100); c.addPeriodicCollector(std::move(c1)); c.addOnRotateCollector(std::move(c2)); c.start(); // Wait for 100 samples to have occured c1Ptr->wait(); c.stop(); auto docsPeriodic = c1Ptr->getDocs(); ASSERT_GREATER_THAN_OR_EQUALS(docsPeriodic.size(), 100UL); auto docsRotate = c2Ptr->getDocs(); ASSERT_EQUALS(docsRotate.size(), 1UL); std::vector<BSONObj> allDocs; allDocs.insert(allDocs.end(), docsRotate.begin(), docsRotate.end()); allDocs.insert(allDocs.end(), docsPeriodic.begin(), docsPeriodic.end()); auto files = scanDirectory(dir); ASSERT_EQUALS(files.size(), 2UL); auto alog = files[0]; ValidateDocumentList(alog, allDocs); }
// Test a restart with a good interim file, and validate we have all the data TEST(FTDCFileManagerTest, TestNormalCrashInterim) { Client* client = &cc(); FTDCConfig c; c.maxSamplesPerInterimMetricChunk = 3; c.maxFileSizeBytes = 10 * 1024 * 1024; c.maxDirectorySizeBytes = 10 * 1024 * 1024; unittest::TempDir tempdir("metrics_testpath"); boost::filesystem::path dir(tempdir.path()); BSONObj mdoc1 = BSON("name" << "some_metadata" << "key1" << 34 << "something" << 98); BSONObj sdoc1 = BSON("name" << "joe" << "key1" << 34 << "key2" << 45); BSONObj sdoc2 = BSON("name" << "joe" << "key3" << 34 << "key5" << 45); boost::filesystem::path fileOut; { FTDCCollectorCollection rotate; auto swMgr = FTDCFileManager::create(&c, dir, &rotate, client); ASSERT_OK(swMgr.getStatus()); auto swFile = swMgr.getValue()->generateArchiveFileName(dir, "0test-crash"); ASSERT_OK(swFile); fileOut = swFile.getValue(); ASSERT_OK(swMgr.getValue()->close()); } createDirectoryClean(dir); { FTDCFileWriter writer(&c); ASSERT_OK(writer.open(fileOut)); ASSERT_OK(writer.writeMetadata(mdoc1, Date_t())); ASSERT_OK(writer.writeSample(sdoc1, Date_t())); ASSERT_OK(writer.writeSample(sdoc1, Date_t())); ASSERT_OK(writer.writeSample(sdoc2, Date_t())); ASSERT_OK(writer.writeSample(sdoc2, Date_t())); ASSERT_OK(writer.writeSample(sdoc2, Date_t())); ASSERT_OK(writer.writeSample(sdoc2, Date_t())); // leave some data in the interim file writer.closeWithoutFlushForTest(); } // Validate the interim file has data ValidateInterimFileHasData(dir, true); // Let the manager run the recovery over the interim file { FTDCCollectorCollection rotate; auto swMgr = FTDCFileManager::create(&c, dir, &rotate, client); ASSERT_OK(swMgr.getStatus()); auto mgr = std::move(swMgr.getValue()); ASSERT_OK(mgr->close()); } // Validate the file manager rolled over the changes to the current archive file // and did not start a new archive file. auto files = scanDirectory(dir); std::sort(files.begin(), files.end()); // Validate old file std::vector<BSONObj> docs1 = {mdoc1, sdoc1, sdoc1}; ValidateDocumentList(files[0], docs1); // Validate new file std::vector<BSONObj> docs2 = {sdoc2, sdoc2, sdoc2, sdoc2}; ValidateDocumentList(files[1], docs2); }
// Test a restart after a crash with a corrupt archive file TEST(FTDCFileManagerTest, TestCorruptCrashRestart) { Client* client = &cc(); FTDCConfig c; c.maxFileSizeBytes = 1000; c.maxDirectorySizeBytes = 3000; unittest::TempDir tempdir("metrics_testpath"); boost::filesystem::path dir(tempdir.path()); createDirectoryClean(dir); for (int i = 0; i < 2; i++) { // Do a few cases of stop and start to ensure it works as expected FTDCCollectorCollection rotate; auto swMgr = FTDCFileManager::create(&c, dir, &rotate, client); ASSERT_OK(swMgr.getStatus()); auto mgr = std::move(swMgr.getValue()); // Test a large numbers of zeros, and incremental numbers in a full buffer for (int j = 0; j < 4; j++) { ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 3230792343LL << "key2" << 235135), Date_t())); for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) { ASSERT_OK( mgr->writeSampleAndRotateIfNeeded( client, BSON("name" << "joe" << "key1" << static_cast<long long int>(i * j * 37) << "key2" << static_cast<long long int>(i * (645 << j))), Date_t())); } ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45), Date_t())); // Add Value ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45), Date_t())); } mgr->close(); auto swFile = mgr->generateArchiveFileName(dir, "0test-crash"); ASSERT_OK(swFile); std::ofstream stream(swFile.getValue().c_str()); // This test case caused us to allocate more memory then the size of the file the first time // I tried it stream << "Hello World"; stream.close(); } }