TEST_P(SnapshotRestoreTest, snapshot_restoration_on_a_clone) { const auto wrns_parent(make_random_namespace()); const std::string parent_name(wrns_parent->ns().str()); SharedVolumePtr parent = newVolume(VolumeId(parent_name), be::Namespace(parent_name)); const std::string pattern1("before-parent-snapshot"); writeToVolume(*parent, 0, parent->getClusterSize(), pattern1); const SnapshotName parent_snap("parent-snapshot"); parent->createSnapshot(parent_snap); waitForThisBackendWrite(*parent); const auto wrns_clone(make_random_namespace()); const std::string clone_name(wrns_clone->ns().str()); SharedVolumePtr clone = createClone(clone_name, be::Namespace(clone_name), be::Namespace(parent_name), parent_snap); const std::string pattern2("before-clone-snapshot"); writeToVolume(*clone, 0, clone->getClusterSize(), pattern2); const SnapshotName clone_snap("clone-snapshot"); clone->createSnapshot(clone_snap); waitForThisBackendWrite(*clone); const std::string pattern3("after-clone-snapshot"); writeToVolume(*clone, 0, clone->getClusterSize(), pattern3); checkVolume(*clone, 0, clone->getClusterSize(), pattern3); restoreSnapshot(*clone, clone_snap); checkVolume(*clone, 0, clone->getClusterSize(), pattern2); }
const scrubbing::ScrubReply prepare_scrub_test(Volume& v, const std::string& fst_cluster_pattern = "first cluster", const std::string& snd_cluster_pattern = "second cluster") { const size_t wsize(v.getClusterSize()); writeToVolume(*&v, 0, wsize, fst_cluster_pattern); const std::string tmp_pattern(fst_cluster_pattern + " but this will be overwritten and scrubbed away"); const size_t nclusters = v.getSCOMultiplier(); for (size_t i = 0; i < nclusters; ++i) { writeToVolume(v, v.getClusterMultiplier(), wsize, tmp_pattern); } const SnapshotName snap1("snap1-"s + yt::UUID().str()); v.createSnapshot(snap1); waitForThisBackendWrite(v); writeToVolume(v, v.getClusterMultiplier(), wsize, snd_cluster_pattern); const SnapshotName snap2("snap2-"s + yt::UUID().str()); v.createSnapshot(snap2); waitForThisBackendWrite(v); v.deleteSnapshot(snap1); waitForThisBackendWrite(v); const std::vector<scrubbing::ScrubWork> scrub_work(v.getScrubbingWork(boost::none, snap2)); EXPECT_EQ(1U, scrub_work.size()); auto scrub_reply(scrub(scrub_work[0])); const auto scrub_res(get_scrub_result(v, scrub_reply)); EXPECT_FALSE(scrub_res.relocs.empty()); return scrub_reply; }
TEST_P(SnapshotRestoreTest, TestFailOver) { auto foc_ctx(start_one_foc()); auto ns_ptr = make_random_namespace(); SharedVolumePtr v = newVolume(VolumeId("volume1"), ns_ptr->ns(), VolumeSize((1 << 18) * 512), SCOMultiplier(1)); v->setFailOverCacheConfig(foc_ctx->config(GetParam().foc_mode())); VolumeConfig cfg = v->get_config(); v->createSnapshot(SnapshotName("snap0")); for(int i = 0; i < 5; ++i) { writeToVolume(*v, 0, 4096, "a"); } waitForThisBackendWrite(*v); v->restoreSnapshot(SnapshotName("snap0")); for(int i = 0; i < 7; ++i) { writeToVolume(*v, 8, 4096, "d"); } flushFailOverCache(*v); destroyVolume(v, DeleteLocalData::T, RemoveVolumeCompletely::F); SharedVolumePtr v1 = 0; v1 = getVolume(VolumeId("volume1")); ASSERT_FALSE(v1); restartVolume(cfg); v1 = getVolume(VolumeId("volume1")); ASSERT_TRUE(v1 != nullptr); checkVolume(*v1,0,4096, "\0"); checkVolume(*v1,8,4096, "d"); checkCurrentBackendSize(*v1); }
TEST_P(BigReadWriteTest, bigReadsOnFull) { auto ns_ptr = make_random_namespace(); SharedVolumePtr v = newVolume(VolumeId("volume1"), ns_ptr->ns()); SCOPED_BLOCK_BACKEND(*v); size_t csz = v->getClusterSize(); size_t lba_size = v->getLBASize(); const std::string pattern(csz,'a'); size_t scoMul = v->getSCOMultiplier(); for(size_t i = 0;i < 50*scoMul; ++i) { writeToVolume(*v, i* csz / lba_size, csz, pattern); } // Stop here to manually delete sco's to check error handling for(size_t i = 0; i < scoMul; ++i) { checkVolume(*v,0, csz*scoMul, pattern); } }
TEST_P(SnapshotRestoreTest, RestoreAndWriteAgain1) { auto ns_ptr = make_random_namespace(); SharedVolumePtr v = newVolume(VolumeId("volume1"), ns_ptr->ns(), VolumeSize(1 << 26), SCOMultiplier(1)); const std::string pattern("e-manual"); v->createSnapshot(SnapshotName("snap1")); waitForThisBackendWrite(*v); writeToVolume(*v, 0, 5 * 4096, pattern); waitForThisBackendWrite(*v); restoreSnapshot(*v,"snap1"); writeToVolume(*v, 0, 4*4096, pattern); waitForThisBackendWrite(*v); checkCurrentBackendSize(*v); }
void worker(Volume& v, std::atomic<bool>& stop) { size_t worker_iterations = 0; auto make_pattern([](size_t i, size_t c) -> std::string { return "iteration-"s + boost::lexical_cast<std::string>(i) + "-cluster-"s + boost::lexical_cast<std::string>(c); }); const size_t csize = v.getClusterSize(); const size_t clusters = v.getSize() < csize; while (not stop) { for (size_t i = 0; i < clusters; ++i) { writeToVolume(*&v, i * v.getClusterMultiplier(), csize, make_pattern(worker_iterations, i)); } v.scheduleBackendSync(); for (size_t i = 0; i < clusters; ++i) { checkVolume(*&v, i * v.getClusterMultiplier(), csize, make_pattern(worker_iterations, i)); } ++worker_iterations; } LOG_INFO("worker exiting after " << worker_iterations << " iterations"); }
TEST_P(BigReadWriteTest, OneBigWriteOneBigRead) { auto ns_ptr = make_random_namespace(); SharedVolumePtr v = newVolume(VolumeId("volume1"), ns_ptr->ns()); SCOPED_BLOCK_BACKEND(*v); size_t csz = v->getClusterSize(); const std::string pattern(csz,'a'); size_t scoMul = v->getSCOMultiplier(); writeToVolume(*v, 0, csz * scoMul, pattern); // Stop here to manually delete sco's to check error handling checkVolume(*v,0, csz*scoMul, pattern); }
TEST_P(SnapshotRestoreTest, HaltOnError) { auto ns_ptr = make_random_namespace(); SharedVolumePtr v = newVolume(VolumeId("volume1"), ns_ptr->ns()); const std::string pattern1("blah"); const TLogId tlog_id(v->getSnapshotManagement().getCurrentTLogId()); writeToVolume(*v, 0, 4096, pattern1); v->createSnapshot(SnapshotName("snap1")); waitForThisBackendWrite(*v); EXPECT_THROW(restoreSnapshot(*v, "snap42"), std::exception); EXPECT_FALSE(v->is_halted()); v->getBackendInterface()->remove(boost::lexical_cast<std::string>(tlog_id)); EXPECT_THROW(restoreSnapshot(*v, "snap1"), std::exception); EXPECT_TRUE(v->is_halted()); }
TEST_P(PrefetchThreadTest, test_one) { auto ns_ptr = make_random_namespace(); const backend::Namespace& ns = ns_ptr->ns(); Volume* v = newVolume("v1", ns); const size_t numwrites = 20; for(size_t i = 0; i < numwrites; ++i) { writeToVolume(v, 0, 4096, "Superflous"); } syncToBackend(v); destroyVolume(v, DeleteLocalData::T, RemoveVolumeCompletely::F); }
void test_after_tlog(bool failover) { const auto wrns(make_random_namespace()); SharedVolumePtr v = make_volume(*wrns); const std::string pattern1("Hairdresser On Fire"); { SCOPED_BLOCK_BACKEND(*v); writeToVolume(*v, v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern1); } v->scheduleBackendSync(); waitForThisBackendWrite(*v); const std::string pattern2("Such A Little Thing Makes Such A Big Difference"); writeToVolume(*v, 2 * v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern2); const auto ncfgs(node_configs()); if (failover) { mds_manager_->stop_one(ncfgs[0]); checkVolume(*v, 0, v->getClusterSize(), ""); } else { const std::vector<MDSNodeConfig> ncfgs2{ ncfgs[1], ncfgs[0] }; v->updateMetaDataBackendConfig(MDSMetaDataBackendConfig(ncfgs2, ApplyRelocationsToSlaves::T)); } check_config(*v, ncfgs, true); checkVolume(*v, v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern1); checkVolume(*v, 2 * v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern2); destroyVolume(v, DeleteLocalData::F, RemoveVolumeCompletely::F); v = localRestart(wrns->ns()); check_config(*v, ncfgs, true); checkVolume(*v, v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern1); checkVolume(*v, 2 * v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern2); }
void test_before_tlog(bool failover) { const auto wrns(make_random_namespace()); SharedVolumePtr v = make_volume(*wrns); const auto ncfgs(node_configs()); const std::string pattern("King Leer"); { SCOPED_BLOCK_BACKEND(*v); writeToVolume(*v, v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern); if (failover) { mds_manager_->stop_one(ncfgs[0]); checkVolume(*v, 0, v->getClusterSize(), ""); } else { const std::vector<MDSNodeConfig> ncfgs2{ ncfgs[1], ncfgs[0] }; v->updateMetaDataBackendConfig(MDSMetaDataBackendConfig(ncfgs2, ApplyRelocationsToSlaves::T)); } check_config(*v, ncfgs, true); checkVolume(*v, v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern); } destroyVolume(v, DeleteLocalData::F, RemoveVolumeCompletely::F); v = localRestart(wrns->ns()); check_config(*v, ncfgs, true); checkVolume(*v, v->getClusterMultiplier() * CachePage::capacity(), v->getClusterSize(), pattern); }
TEST_P(PrefetchThreadTest, test_two) { // backend::Namespace n1; auto ns_ptr = make_random_namespace(); const backend::Namespace& n1 = ns_ptr->ns(); Volume* v = newVolume("v1", n1); // Y42 put in VolManagerTestSetup const VolumeConfig cfg = v->get_config(); const uint64_t scoSize = cfg.getSCOSize(); const size_t numwrites = 20; for(size_t i = 0; i < numwrites; ++i) { writeToVolume(v, 0, scoSize, "Superflous"); } syncToBackend(v); SCONameList list; VolManager::get()->getSCOCache()->getSCONameList(n1, list, true); ASSERT_EQ(numwrites, list.size()); for(SCONameList::const_iterator it = list.begin(); it != list.end(); ++it) { VolManager::get()->getSCOCache()->removeSCO(n1, *it, false); } { SCONameList list; VolManager::get()->getSCOCache()->getSCONameList(n1, list, true); ASSERT_TRUE(list.empty()); } //fill the cache with 1 SCO SCONameList::const_iterator it = list.begin(); v->getPrefetchData().addSCO(*it, 1); ++it; //make sure cachedXValMin_ is set VolManager::get()->getSCOCache()->cleanup(); //make sure next SCO's with low sap are also prefetched as cache is not full float sap = 0.01; for( ; it != list.end(); ++it) { v->getPrefetchData().addSCO(*it, sap); sap *= 0.9; } SCONameList list2; size_t counter = 0; while(list2.size() != numwrites and counter < 60) { boost::this_thread::sleep(boost::posix_time::seconds(1)); list2.clear(); ++counter; VolManager::get()->getSCOCache()->getSCONameList(n1, list2, true); } ASSERT_EQ(numwrites, list2.size()); destroyVolume(v, DeleteLocalData::T, RemoveVolumeCompletely::F); }
TEST_P(SnapshotRestoreTest, SimpleRestore) { auto ns_ptr = make_random_namespace(); SharedVolumePtr v = newVolume(VolumeId("volume1"), ns_ptr->ns()); const std::string pattern1("Frederik"); writeToVolume(*v, 0, 4096, pattern1); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snap1")); const std::string pattern2("Frederik"); writeToVolume(*v, 0, 4096, pattern2); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snap2")); const std::string pattern3("Arne"); writeToVolume(*v, 0, 4096, pattern3); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snap3")); const std::string pattern4("Bart"); writeToVolume(*v, 0, 4096, pattern4); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snap4")); const std::string pattern5("Wouter"); writeToVolume(*v, 0, 4096, pattern5); checkVolume(*v,0,4096,pattern5); waitForThisBackendWrite(*v); EXPECT_NO_THROW(restoreSnapshot(*v, "snap4")); checkVolume(*v,0,4096,pattern4); writeToVolume(*v, 0, 4096, "Bollocks"); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snapper")); waitForThisBackendWrite(*v); EXPECT_NO_THROW(restoreSnapshot(*v, "snap3")); checkVolume(*v,0,4096,pattern3); writeToVolume(*v, 0, 4096, "Bollocks"); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snapper")); waitForThisBackendWrite(*v); EXPECT_NO_THROW(restoreSnapshot(*v, "snap2")); checkVolume(*v,0,4096,pattern2); writeToVolume(*v, 0, 4096, "Bollocks"); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snapper")); waitForThisBackendWrite(*v); EXPECT_NO_THROW(restoreSnapshot(*v, "snap1")); checkVolume(*v,0,4096,pattern1); writeToVolume(*v, 0, 4096, "Bollocks"); waitForThisBackendWrite(*v); v->createSnapshot(SnapshotName("snapper")); waitForThisBackendWrite(*v); checkCurrentBackendSize(*v); }