TEST(QoSPipelineTest, NoCorrections) { uint64_t WINDOWS_SIZE = 10; uint64_t CONTENTION_COOLDOWN = 10; double_t FRATIONAL_THRESHOLD = 0.5; MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE1); SerenityConfig conf; conf["Detector"] = createAssuranceDetectorCfg( WINDOWS_SIZE, CONTENTION_COOLDOWN, FRATIONAL_THRESHOLD); conf.set(ENABLED_VISUALISATION, false); conf.set(VALVE_OPENED, true); QoSControllerPipeline* pipeline = new CpuQoSPipeline(conf); // First iteration. Result<QoSCorrections> corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_NONE(corrections); // Second iteration. corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_SOME(corrections); EXPECT_TRUE(corrections.get().empty()); delete pipeline; }
TEST(QoSPipelineTest, FiltersNotProperlyFed) { uint64_t WINDOWS_SIZE = 10; uint64_t CONTENTION_COOLDOWN = 10; double_t FRATIONAL_THRESHOLD = 0.5; Try<mesos::FixtureResourceUsage> usages = JsonUsage::ReadJson("tests/fixtures/pipeline/insufficient_metrics.json"); if (usages.isError()) { LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl; } ResourceUsage usage; usage.CopyFrom(usages.get().resource_usage(0)); SerenityConfig conf; conf["Detector"] = createAssuranceDetectorCfg( WINDOWS_SIZE, CONTENTION_COOLDOWN, FRATIONAL_THRESHOLD); conf.set(ENABLED_VISUALISATION, false); conf.set(VALVE_OPENED, true); QoSControllerPipeline* pipeline = new CpuQoSPipeline(conf); Result<QoSCorrections> corrections = pipeline->run(usage); EXPECT_NONE(corrections); delete pipeline; }
TEST(QoSPipelineTest, NoCorrections) { uint64_t WINDOWS_SIZE = 10; uint64_t CONTENTION_COOLDOWN = 10; double_t RELATIVE_THRESHOLD = 0.5; MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE1); QoSControllerPipeline* pipeline = new CpuQoSPipeline<RollingChangePointDetector>( QoSPipelineConf( ChangePointDetectionState::createForRollingDetector( WINDOWS_SIZE, CONTENTION_COOLDOWN, RELATIVE_THRESHOLD), ema::DEFAULT_ALPHA, false, true)); // First iteration. Result<QoSCorrections> corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_NONE(corrections); // Second iteration. corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_SOME(corrections); EXPECT_TRUE(corrections.get().empty()); delete pipeline; }
TEST(QoSPipelineTest, FiltersNotProperlyFed) { uint64_t WINDOWS_SIZE = 10; uint64_t CONTENTION_COOLDOWN = 10; double_t RELATIVE_THRESHOLD = 0.5; Try<mesos::FixtureResourceUsage> usages = JsonUsage::ReadJson("tests/fixtures/pipeline/insufficient_metrics.json"); if (usages.isError()) { LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl; } ResourceUsage usage; usage.CopyFrom(usages.get().resource_usage(0)); QoSControllerPipeline* pipeline = new CpuQoSPipeline<RollingChangePointDetector>( QoSPipelineConf( ChangePointDetectionState::createForRollingDetector( WINDOWS_SIZE, CONTENTION_COOLDOWN, RELATIVE_THRESHOLD), ema::DEFAULT_ALPHA, false, true)); Result<QoSCorrections> corrections = pipeline->run(usage); EXPECT_NONE(corrections); delete pipeline; }
TEST(QoSIpcPipelineTest, AssuranceDetectorTwoDropCorrectionsWithEma) { uint64_t WINDOWS_SIZE = 10; uint64_t CONTENTION_COOLDOWN = 4; double_t FRATIONAL_THRESHOLD = 0.3; double_t SEVERITY_LEVEL = 1; double_t NEAR_LEVEL = 0.1; MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE2); SerenityConfig conf; conf["Detector"] = createAssuranceDetectorCfg( WINDOWS_SIZE, CONTENTION_COOLDOWN, FRATIONAL_THRESHOLD, SEVERITY_LEVEL, NEAR_LEVEL); conf.set(ema::ALPHA, 0.9); conf.set(ENABLED_VISUALISATION, false); conf.set(VALVE_OPENED, true); QoSControllerPipeline* pipeline = new CpuQoSPipeline(conf); // First iteration. Result<QoSCorrections> corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_NONE(corrections); ResourceUsage usage = mockSlaveUsage.usage().get(); const int32_t LOAD_ITERATIONS = 17; LoadGenerator loadGen( [](double_t iter) { return 1; }, new ZeroNoise(), LOAD_ITERATIONS); for (; loadGen.end(); loadGen++) { // Test scenario: After 10 iterations create drop in IPC // for executor num 3. double_t ipcFor3Executor = (*loadGen)(); if (loadGen.iteration >= 11) { ipcFor3Executor /= 2.0; } usage.mutable_executors(PR_4CPUS)->CopyFrom( generateIPC(usage.executors(PR_4CPUS), ipcFor3Executor, (*loadGen).timestamp)); usage.mutable_executors(PR_2CPUS)->CopyFrom( generateIPC(usage.executors(PR_2CPUS), (*loadGen)(), (*loadGen).timestamp)); // Third iteration (repeated). corrections = pipeline->run(usage); // Assurance Detector will wait for signal to be returned to the // established state. if (loadGen.iteration == 11 || loadGen.iteration == 16) { EXPECT_SOME(corrections); ASSERT_EQ(slave::QoSCorrection_Type_KILL, corrections.get().front().type()); // Make sure that we do not kill PR tasks! EXPECT_NE("serenityPR", corrections.get().front().kill().executor_id().value()); EXPECT_NE("serenityPR2", corrections.get().front().kill().executor_id().value()); } else { EXPECT_SOME(corrections); EXPECT_TRUE(corrections.get().empty()); } } delete pipeline; }
TEST(QoSIpsPipelineTest, RollingFractionalDetectorOneDropCorrectionsWithEma) { QoSPipelineConf conf; ChangePointDetectionState cpdState; // Detector configuration: // How far we look back in samples. cpdState.windowSize = 10; // How many iterations detector will wait with creating another // contention. cpdState.contentionCooldown = 10; // Defines how much (relatively to base point) value must drop to trigger // contention. // Most detectors will use that. cpdState.fractionalThreshold = 0.5; // Defines how many instructions can be done per one CPU in one second. // This option helps RollingFractionalDetector to estimate severity of // drop. cpdState.severityLevel = 1000000000; // 1 Billion. conf.cpdState = cpdState; conf.emaAlpha = 0.4; conf.visualisation = false; // Let's start with QoS pipeline disabled. conf.valveOpened = true; MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE3); QoSControllerPipeline* pipeline = new IpsQoSPipeline<RollingFractionalDetector>(conf); // First iteration. Result<QoSCorrections> corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_NONE(corrections); // Second iteration is used for manually configured load. ResourceUsage usage = mockSlaveUsage.usage().get(); const int32_t LOAD_ITERATIONS = 14; LoadGenerator loadGen( [](double_t iter) { return 3000000000; }, new ZeroNoise(), LOAD_ITERATIONS); for (; loadGen.end(); loadGen++) { // Test scenario: After 10 iterations create drop in IPS for executor num 3. double ipsFor3Executor = (*loadGen)(); if (loadGen.iteration >= 11) { ipsFor3Executor /= 3.0; } usage.mutable_executors(PR_4CPUS)->CopyFrom( generateIPS(usage.executors(PR_4CPUS), ipsFor3Executor, (*loadGen).timestamp)); usage.mutable_executors(PR_2CPUS)->CopyFrom( generateIPS(usage.executors(PR_2CPUS), (*loadGen)(), (*loadGen).timestamp)); // Third iteration (repeated). corrections = pipeline->run(usage); if (loadGen.iteration >= 13) { EXPECT_SOME(corrections); ASSERT_EQ(slave::QoSCorrection_Type_KILL, corrections.get().front().type()); // Make sure that we do not kill PR tasks! EXPECT_NE("serenityPR", corrections.get().front().kill().executor_id().value()); EXPECT_NE("serenityPR2", corrections.get().front().kill().executor_id().value()); } else { EXPECT_SOME(corrections); EXPECT_TRUE(corrections.get().empty()); } } delete pipeline; }
TEST(QoSIpcPipelineTest, AssuranceFractionalDetectorTwoDropCorrectionsWithEma) { QoSPipelineConf conf; ChangePointDetectionState cpdState; // Detector configuration: // How far we look back in samples. cpdState.windowSize = 10; // How many iterations detector will wait with creating another // contention. cpdState.contentionCooldown = 4; // Defines how much (relatively to base point) value must drop to trigger // contention. // Most detectors will use that. cpdState.fractionalThreshold = 0.3; // Defines how to convert difference in values to CPU. // This option helps RollingFractionalDetector to estimate severity of // drop. cpdState.severityLevel = 1; cpdState.nearFraction = 0.1; conf.cpdState = cpdState; conf.emaAlpha = 0.9; conf.visualisation = false; // Let's start with QoS pipeline disabled. conf.valveOpened = true; MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE2); QoSControllerPipeline* pipeline = new CpuQoSPipeline<AssuranceFractionalDetector>(conf); // First iteration. Result<QoSCorrections> corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_NONE(corrections); ResourceUsage usage = mockSlaveUsage.usage().get(); const int32_t LOAD_ITERATIONS = 17; LoadGenerator loadGen( [](double_t iter) { return 1; }, new ZeroNoise(), LOAD_ITERATIONS); for (; loadGen.end(); loadGen++) { // Test scenario: After 10 iterations create drop in IPC // for executor num 3. double_t ipcFor3Executor = (*loadGen)(); if (loadGen.iteration >= 11) { ipcFor3Executor /= 2.0; } usage.mutable_executors(PR_4CPUS)->CopyFrom( generateIPC(usage.executors(PR_4CPUS), ipcFor3Executor, (*loadGen).timestamp)); usage.mutable_executors(PR_2CPUS)->CopyFrom( generateIPC(usage.executors(PR_2CPUS), (*loadGen)(), (*loadGen).timestamp)); // Third iteration (repeated). corrections = pipeline->run(usage); // Assurance Detector will wait for signal to be returned to the // established state. if (loadGen.iteration == 11 || loadGen.iteration == 16) { EXPECT_SOME(corrections); ASSERT_EQ(slave::QoSCorrection_Type_KILL, corrections.get().front().type()); // Make sure that we do not kill PR tasks! EXPECT_NE("serenityPR", corrections.get().front().kill().executor_id().value()); EXPECT_NE("serenityPR2", corrections.get().front().kill().executor_id().value()); } else { EXPECT_SOME(corrections); EXPECT_TRUE(corrections.get().empty()); } } delete pipeline; }
TEST(QoSIpcPipelineTest, RollingDetectorOneDropCorrectionsWithEma) { uint64_t WINDOWS_SIZE = 10; uint64_t CONTENTION_COOLDOWN = 10; double_t RELATIVE_THRESHOLD = 0.3; MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE2); QoSControllerPipeline* pipeline = new CpuQoSPipeline<RollingChangePointDetector>( QoSPipelineConf( ChangePointDetectionState::createForRollingDetector( WINDOWS_SIZE, CONTENTION_COOLDOWN, RELATIVE_THRESHOLD), 0.2, // Alpha = 1 means no smoothing. 0.2 means high smoothing. false, true)); // First iteration. Result<QoSCorrections> corrections = pipeline->run(mockSlaveUsage.usage().get()); EXPECT_NONE(corrections); ResourceUsage usage = mockSlaveUsage.usage().get(); const int32_t LOAD_ITERATIONS = 16; LoadGenerator loadGen( [](double_t iter) { return 1; }, new ZeroNoise(), LOAD_ITERATIONS); for (; loadGen.end(); loadGen++) { // Test scenario: After 10 iterations create drop in // IPC for executor num 3. double_t ipcFor3Executor = (*loadGen)(); if (loadGen.iteration >= 11) { ipcFor3Executor /= 2.0; } usage.mutable_executors(PR_4CPUS)->CopyFrom( generateIPC(usage.executors(PR_4CPUS), ipcFor3Executor, (*loadGen).timestamp)); usage.mutable_executors(PR_2CPUS)->CopyFrom( generateIPC(usage.executors(PR_2CPUS), (*loadGen)(), (*loadGen).timestamp)); // Third iteration (repeated). corrections = pipeline->run(usage); if (loadGen.iteration >= 15) { EXPECT_SOME(corrections); ASSERT_EQ(slave::QoSCorrection_Type_KILL, corrections.get().front().type()); // Make sure that we do not kill PR tasks! EXPECT_NE("serenityPR", corrections.get().front().kill().executor_id().value()); EXPECT_NE("serenityPR2", corrections.get().front().kill().executor_id().value()); } else { EXPECT_SOME(corrections); EXPECT_TRUE(corrections.get().empty()); } } delete pipeline; }