예제 #1
0
파일: test.cpp 프로젝트: lelezi/serenity
/**
 * In this test we generate stable load with drop and
 * test the RollingChangePointDetector. We expect one
 * contention.
 */
TEST(DropFilterRollingDetectorTest, StableLoadWithDrop) {
  const uint64_t WINDOWS_SIZE = 10;
  const uint64_t CONTENTION_COOLDOWN = 10;
  const double_t RELATIVE_THRESHOLD = 5;
  const uint64_t LOAD_ITERATIONS = 200;
  // End of pipeline.
  MockSink<Contentions> mockSink;
  EXPECT_CALL(mockSink, consume(_))
      .Times(LOAD_ITERATIONS);

  DropFilter<RollingChangePointDetector> dropFilter(
      &mockSink, usage::getIpc,
      ChangePointDetectionState::createForRollingDetector(
          WINDOWS_SIZE, CONTENTION_COOLDOWN, RELATIVE_THRESHOLD));

  // Fake slave ResourceUsage source.
  MockSource<ResourceUsage> usageSource(&dropFilter);

  Try<mesos::FixtureResourceUsage> usages =
      JsonUsage::ReadJson("tests/fixtures/start_json_test.json");
  if (usages.isError()) {
    LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl;
  }

  ResourceUsage usage;
  usage.CopyFrom(usages.get().resource_usage(0));

  const double_t DROP_PROGRES = 1;
  LoadGenerator loadGen(
      [](double_t iter) { return 10; },
      new ZeroNoise(),
      LOAD_ITERATIONS);

  bool dropped = false;
  for (; loadGen.end(); loadGen++) {
    usage.mutable_executors(0)->CopyFrom(
        generateIPC(usage.executors(0),
                    (*loadGen)(),
                    (*loadGen).timestamp));

    // Run pipeline iteration.
    usageSource.produce(usage);

    if (dropped) {
      dropped = false;
      mockSink.expectContentionWithVictim("serenity2");
    } else {
      mockSink.expectContentions(0);
    }

    if (loadGen.iteration >= 100 &&
        loadGen.iteration < 110) {
      // After 6 iterations of 1 drop progress value should be below
      // threshold (4).
      if (loadGen.iteration == 105)
        dropped = true;
      loadGen.modifier -= DROP_PROGRES;
    }
  }
}
예제 #2
0
TEST(QoSPipelineTest, FiltersNotProperlyFed) {
  uint64_t WINDOWS_SIZE = 10;
  uint64_t CONTENTION_COOLDOWN = 10;
  double_t FRATIONAL_THRESHOLD = 0.5;

  Try<mesos::FixtureResourceUsage> usages =
      JsonUsage::ReadJson("tests/fixtures/pipeline/insufficient_metrics.json");
  if (usages.isError()) {
    LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl;
  }

  ResourceUsage usage;
  usage.CopyFrom(usages.get().resource_usage(0));

  SerenityConfig conf;
  conf["Detector"] = createAssuranceDetectorCfg(
    WINDOWS_SIZE, CONTENTION_COOLDOWN, FRATIONAL_THRESHOLD);

  conf.set(ENABLED_VISUALISATION, false);
  conf.set(VALVE_OPENED, true);

  QoSControllerPipeline* pipeline = new CpuQoSPipeline(conf);

  Result<QoSCorrections> corrections = pipeline->run(usage);
  EXPECT_NONE(corrections);

  delete pipeline;
}
예제 #3
0
Try<Nothing> PrExecutorPassFilter::consume(const ResourceUsage& in) {
  ResourceUsage product;
  product.mutable_total()->CopyFrom(in.total());
  for (ResourceUsage_Executor inExec : in.executors()) {
    if (!inExec.has_executor_info()) {
      LOG(ERROR) << name << "Executor <unknown>"
                 << " does not include executor_info";
      // Filter out these executors.
      continue;
    }
    if (inExec.allocated().size() == 0) {
      LOG(ERROR) << name << "Executor "
      << inExec.executor_info().executor_id().value()
      << " does not include allocated resources.";
      // Filter out these executors.
      continue;
    }

    Resources allocated(inExec.allocated());
    // Check if task uses revocable resources.
    if (!allocated.revocable().empty()) {
      continue;
    }

    // Add an PR executor.
    ResourceUsage_Executor* outExec = product.mutable_executors()->Add();
    outExec->CopyFrom(inExec);
  }

  produce(product);

  return Nothing();
}
예제 #4
0
TEST(QoSPipelineTest, FiltersNotProperlyFed) {
  uint64_t WINDOWS_SIZE = 10;
  uint64_t CONTENTION_COOLDOWN = 10;
  double_t RELATIVE_THRESHOLD = 0.5;

  Try<mesos::FixtureResourceUsage> usages =
      JsonUsage::ReadJson("tests/fixtures/pipeline/insufficient_metrics.json");
  if (usages.isError()) {
    LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl;
  }

  ResourceUsage usage;
  usage.CopyFrom(usages.get().resource_usage(0));

  QoSControllerPipeline* pipeline =
      new CpuQoSPipeline<RollingChangePointDetector>(
          QoSPipelineConf(
              ChangePointDetectionState::createForRollingDetector(
                  WINDOWS_SIZE,
                  CONTENTION_COOLDOWN,
                  RELATIVE_THRESHOLD),
              ema::DEFAULT_ALPHA,
              false,
              true));

  Result<QoSCorrections> corrections = pipeline->run(usage);
  EXPECT_NONE(corrections);

  delete pipeline;
}
예제 #5
0
        virtual int overflow(int c) {
            if (!enabled) return c;

            if (lastUser != this) {
                if (column != 0) {
                    os.put('\n');
                    ++lineno;
                    column = 0;
                    mh.stepCount = 0;
                }
                lastUser = this;
            }

            if (c == EOF) return EOF;

            if (column == 0) {
                if (isspace(c)) return c;
                for (int i = mh.indent; i > 0; --i) {
                    os.put(' ');
                    ++column;
                }
            }

            os.put(c);

            if (c == '\n') {
                ++lineno;
                column = 0;
                mh.stepCount = 0;
            }
            else {
                ++column;
            }

            if (c == '.' && ++mh.stepCount >= 50) {
                ResourceUsage usage;
                ResourceUsage diff = usage - mh.prevUsage;
                os << " " << diff.elapsedTime() << ", " << diff.memory()
                        << "\n";
                //        auto backup = os.flags(std::ios::fixed);
                //        os << " " << std::setprecision(2) << diff.utime << "s, ";
                //        os << std::setprecision(0) << diff.maxrss / 1024.0 << "MB\n";
                //        os.flags(backup);
                ++lineno;
                column = 0;
                mh.prevUsage = usage;
                mh.stepCount = 0;
            }

            return c;
        }
예제 #6
0
/**
 * In this test we generate load with noise and
 * test the CpuUsageEMAfilter output in every iteration.
 */
TEST(EMATest, CpuUsageEMATestNoisyConstSample) {
  // End of pipeline.
  MockSink<ResourceUsage> mockSink;

  // Third component in pipeline.
  EMAFilter cpuUsageEMAFilter(
    &mockSink, usage::getCpuUsage, usage::setEmaCpuUsage, 0.2);

  // Second component in pipeline.
  // We need that for cumulative metrics.
  CumulativeFilter cumulativeFilter(
    &cpuUsageEMAFilter);

  // First component in pipeline.
  MockSource<ResourceUsage> source(&cumulativeFilter);

  Try<mesos::FixtureResourceUsage> usages =
      JsonUsage::ReadJson("tests/fixtures/start_json_test.json");
  if (usages.isError()) {
    LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl;
  }

  ResourceUsage usage;
  usage.CopyFrom(usages.get().resource_usage(0));

  const double_t CPU_USAGE_VALUE = 10;
  const double_t THRESHOLD = 1.2;
  const double_t MAX_NOISE = 5;
  const int32_t ITERATIONS = 100;

  SignalScenario signalGen =
    SignalScenario(ITERATIONS)
      .use(math::const10Function)
      .use(new SymetricNoiseGenerator(MAX_NOISE));

  ITERATE_SIGNAL(signalGen) {
    usage.mutable_executors(0)->CopyFrom(
      generateCpuUsage(usage.executors(0),
                       (uint64_t)(*signalGen).cumulative(),
                       signalGen->timestamp));

    // Run pipeline iteration
    source.produce(usage);

    if (signalGen.iteration > 0)
      mockSink.expectCpuUsage(0, CPU_USAGE_VALUE, THRESHOLD);
  }

  EXPECT_EQ(99, mockSink.numberOfMessagesConsumed);
}
예제 #7
0
파일: test.cpp 프로젝트: lelezi/serenity
/**
 * In this test we generate stable load and
 * test the RollingChangePointDetector. We don't expect
 * any contention.
 */
TEST(DropFilterRollingDetectorTest, StableLoad) {
  const uint64_t WINDOWS_SIZE = 10;
  const uint64_t CONTENTION_COOLDOWN = 10;
  const double_t RELATIVE_THRESHOLD = 0.5;
  const uint64_t LOAD_ITERATIONS = 100;
  // End of pipeline.
  MockSink<Contentions> mockSink;
  EXPECT_CALL(mockSink, consume(_))
    .Times(LOAD_ITERATIONS);

  DropFilter<RollingChangePointDetector> dropFilter(
      &mockSink, usage::getIpc,
      ChangePointDetectionState::createForRollingDetector(
          WINDOWS_SIZE, CONTENTION_COOLDOWN, RELATIVE_THRESHOLD));

  // Fake slave ResourceUsage source.
  MockSource<ResourceUsage> usageSource(&dropFilter);

  Try<mesos::FixtureResourceUsage> usages =
      JsonUsage::ReadJson("tests/fixtures/start_json_test.json");
  if (usages.isError()) {
    LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl;
  }

  ResourceUsage usage;
  usage.CopyFrom(usages.get().resource_usage(0));

  LoadGenerator loadGen(
      [](double_t iter) { return 10; },
      new ZeroNoise(),
      LOAD_ITERATIONS);

  for (; loadGen.end(); loadGen++) {
    usage.mutable_executors(0)->CopyFrom(
        generateIPC(usage.executors(0),
                    (*loadGen)(),
                    (*loadGen).timestamp));

    // Run pipeline iteration.
    usageSource.produce(usage);

    if (loadGen.iteration > 0)
      mockSink.expectContentions(0);
  }
}
TEST(EstimatorPipelineTest, FiltersNotProperlyFed) {
  Try<mesos::FixtureResourceUsage> usages =
      JsonUsage::ReadJson("tests/fixtures/pipeline/insufficient_metrics.json");
  if (usages.isError()) {
    LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl;
    ASSERT_FALSE(usages.isError());  // test failure.
  }

  ResourceUsage usage;
  usage.CopyFrom(usages.get().resource_usage(0));

  ResourceEstimatorPipeline* pipeline = new CpuEstimatorPipeline();

  Result<Resources> slack = pipeline->run(usage);
  EXPECT_NONE(slack);

  delete pipeline;
}
예제 #9
0
 static void addExecutor(
     ResourceUsage& usage,
     ExecutorInfo executorInfo,
     Resources allocated,
     ResourceStatistics statistics) {
     ResourceUsage::Executor* executor = usage.add_executors();
     executor->mutable_executor_info()->CopyFrom(executorInfo);
     executor->mutable_allocated()->CopyFrom(allocated);
     executor->mutable_statistics()->CopyFrom(statistics);
 }
예제 #10
0
/**
 * Check if getRevocableExecutors function properly filters out PR executors.
 *
 * TODO(skonefal): Does it really work?
 */
TEST(HelperFunctionsTest, getRevocableExecutors) {
  Try<mesos::FixtureResourceUsage> usages = JsonUsage::ReadJson(QOS_FIXTURE);
  if (usages.isError()) {
    LOG(ERROR) << "JsonSource failed: " << usages.error() << std::endl;
  }

  ResourceUsage usage;
  usage.CopyFrom(usages.get().resource_usage(0));

  std::list<ResourceUsage_Executor> ret =
    ResourceUsageHelper::getRevocableExecutors(usage);

  ASSERT_EQ(3u, ret.size());

  // Expected only BE executors.
  for (auto executor : ret) {
    Resources allocated(executor.allocated());
    EXPECT_FALSE(allocated.revocable().empty());
  }
}
예제 #11
0
 MessageHandler_& begin(std::string const& s) {
     if (!name.empty()) end("aborted");
     name = s.empty() ? "level-" + indentLevel : s;
     indent = indentLevel * INDENT_SIZE;
     *this << capitalize(name);
     indent = ++indentLevel * INDENT_SIZE;
     beginLine = lineno;
     initialUsage.update();
     prevUsage = initialUsage;
     stepCount = 0;
     return *this;
 }
예제 #12
0
    MessageHandler_& step(char dot = '-') {
        if (!enabled) return *this;

        if (!stepping && dotTime + 4 < std::time(0)) {
            *this << '\n';
            stepping = true;
        }

        if (stepping) {
            if (stepCount % 50 != column - indent) {
                *this << '\n';
                for (int i = stepCount % 50; i > 0; --i) {
                    *this << '-';
                }
            }
            *this << dot;
            ++stepCount;
            if (column - indent >= 50) {
                ResourceUsage usage;
                ResourceUsage diff = usage - prevUsage;
                *this << std::setw(3) << std::right
                        << (stepCount * 100 / totalSteps);
                *this << "% (" << diff.elapsedTime() << ", " << diff.memory()
                        << ")\n";
                prevUsage = usage;
            }
        }
        else {
            ++stepCount;
            while (dotCount * totalSteps < stepCount * 10) {
                if (dotCount == 0) *this << ' ';
                *this << '.';
                ++dotCount;
                dotTime = std::time(0);
            }
        }

        return *this;
    }
예제 #13
0
// This test verifies the correct handling of the statistics
// endpoint when statistics is missing in ResourceUsage.
TEST(MonitorTest, MissingStatistics)
{
  ResourceMonitor monitor([]() -> Future<ResourceUsage> {
    FrameworkID frameworkId;
    frameworkId.set_value("framework");

    ExecutorID executorId;
    executorId.set_value("executor");

    ExecutorInfo executorInfo;
    executorInfo.mutable_executor_id()->CopyFrom(executorId);
    executorInfo.mutable_framework_id()->CopyFrom(frameworkId);
    executorInfo.set_name("name");
    executorInfo.set_source("source");

    Resources resources = Resources::parse("cpus:1;mem:2").get();

    ResourceUsage usage;
    ResourceUsage::Executor* executor = usage.add_executors();
    executor->mutable_executor_info()->CopyFrom(executorInfo);
    executor->mutable_allocated()->CopyFrom(resources);

    return usage;
  });

  UPID upid("monitor", process::address());

  Future<http::Response> response = http::get(upid, "statistics");
  AWAIT_READY(response);

  AWAIT_EXPECT_RESPONSE_STATUS_EQ(http::OK().status, response);
  AWAIT_EXPECT_RESPONSE_HEADER_EQ(
      "application/json",
      "Content-Type",
      response);
  AWAIT_EXPECT_RESPONSE_BODY_EQ("[]", response);
}
예제 #14
0
Try<Nothing> ExecutorAgeFilter::consume(const ResourceUsage& in) {
  double_t now = time(NULL);

  for (ResourceUsage_Executor executor : in.executors()) {
    auto startedTime = this->started->find(executor.executor_info());
    if (startedTime == this->started->end()) {
        // If executor is missing, create start entry for executor.
      this->started->insert(pair<ExecutorInfo, double_t>(
          executor.executor_info(), now));
      this->age(executor.executor_info());  // For test!
    }
  }
  // TODO(nnielsen): Clean up finished frameworks and executors.

  this->produce(in);
  return Nothing();
}
예제 #15
0
TEST(QoSIpcPipelineTest, AssuranceDetectorTwoDropCorrectionsWithEma) {
  uint64_t WINDOWS_SIZE = 10;
  uint64_t CONTENTION_COOLDOWN = 4;
  double_t FRATIONAL_THRESHOLD = 0.3;
  double_t SEVERITY_LEVEL = 1;
  double_t NEAR_LEVEL = 0.1;

  MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE2);

  SerenityConfig conf;
  conf["Detector"] = createAssuranceDetectorCfg(
    WINDOWS_SIZE,
    CONTENTION_COOLDOWN,
    FRATIONAL_THRESHOLD,
    SEVERITY_LEVEL,
    NEAR_LEVEL);
  conf.set(ema::ALPHA, 0.9);
  conf.set(ENABLED_VISUALISATION, false);
  conf.set(VALVE_OPENED, true);

  QoSControllerPipeline* pipeline = new CpuQoSPipeline(conf);

  // First iteration.
  Result<QoSCorrections> corrections =
      pipeline->run(mockSlaveUsage.usage().get());
  EXPECT_NONE(corrections);

  ResourceUsage usage = mockSlaveUsage.usage().get();
  const int32_t LOAD_ITERATIONS = 17;
  LoadGenerator loadGen(
      [](double_t iter) { return 1; },
      new ZeroNoise(),
      LOAD_ITERATIONS);

  for (; loadGen.end(); loadGen++) {
    // Test scenario: After 10 iterations create drop in IPC
    // for executor num 3.
    double_t ipcFor3Executor = (*loadGen)();
    if (loadGen.iteration >= 11) {
      ipcFor3Executor /= 2.0;
    }

    usage.mutable_executors(PR_4CPUS)->CopyFrom(
        generateIPC(usage.executors(PR_4CPUS),
                    ipcFor3Executor,
                    (*loadGen).timestamp));

    usage.mutable_executors(PR_2CPUS)->CopyFrom(
        generateIPC(usage.executors(PR_2CPUS),
                    (*loadGen)(),
                    (*loadGen).timestamp));
    // Third iteration (repeated).
    corrections = pipeline->run(usage);

    // Assurance Detector will wait for signal to be returned to the
    // established state.
    if (loadGen.iteration == 11 || loadGen.iteration == 16) {
      EXPECT_SOME(corrections);
      ASSERT_EQ(slave::QoSCorrection_Type_KILL,
                corrections.get().front().type());
      // Make sure that we do not kill PR tasks!
      EXPECT_NE("serenityPR",
                corrections.get().front().kill().executor_id().value());
      EXPECT_NE("serenityPR2",
                corrections.get().front().kill().executor_id().value());
    } else {
      EXPECT_SOME(corrections);
      EXPECT_TRUE(corrections.get().empty());
    }
  }

  delete pipeline;
}
예제 #16
0
TEST(MonitorTest, Statistics)
{
  FrameworkID frameworkId;
  frameworkId.set_value("framework");

  ExecutorID executorId;
  executorId.set_value("executor");

  ExecutorInfo executorInfo;
  executorInfo.mutable_executor_id()->CopyFrom(executorId);
  executorInfo.mutable_framework_id()->CopyFrom(frameworkId);
  executorInfo.set_name("name");
  executorInfo.set_source("source");

  ResourceStatistics statistics;
  statistics.set_cpus_nr_periods(100);
  statistics.set_cpus_nr_throttled(2);
  statistics.set_cpus_user_time_secs(4);
  statistics.set_cpus_system_time_secs(1);
  statistics.set_cpus_throttled_time_secs(0.5);
  statistics.set_cpus_limit(1.0);
  statistics.set_mem_file_bytes(0);
  statistics.set_mem_anon_bytes(0);
  statistics.set_mem_mapped_file_bytes(0);
  statistics.set_mem_rss_bytes(1024);
  statistics.set_mem_limit_bytes(2048);
  statistics.set_timestamp(0);

  ResourceMonitor monitor([=]() -> Future<ResourceUsage> {
    Resources resources = Resources::parse("cpus:1;mem:2").get();

    ResourceUsage usage;
    ResourceUsage::Executor* executor = usage.add_executors();
    executor->mutable_executor_info()->CopyFrom(executorInfo);
    executor->mutable_allocated()->CopyFrom(resources);
    executor->mutable_statistics()->CopyFrom(statistics);

    return usage;
  });

  UPID upid("monitor", process::address());

  Future<http::Response> response = http::get(upid, "statistics");
  AWAIT_READY(response);

  AWAIT_EXPECT_RESPONSE_STATUS_EQ(http::OK().status, response);
  AWAIT_EXPECT_RESPONSE_HEADER_EQ(
      "application/json",
      "Content-Type",
      response);

  JSON::Array expected;
  JSON::Object usage;
  usage.values["executor_id"] = "executor";
  usage.values["executor_name"] = "name";
  usage.values["framework_id"] = "framework";
  usage.values["source"] = "source";
  usage.values["statistics"] = JSON::Protobuf(statistics);
  expected.values.push_back(usage);

  Try<JSON::Array> result = JSON::parse<JSON::Array>(response.get().body);
  ASSERT_SOME(result);
  ASSERT_EQ(expected, result.get());
}
예제 #17
0
파일: ema.cpp 프로젝트: Bplotka/serenity
Try<Nothing> EMAFilter::consume(const ResourceUsage& in) {
  ResourceUsage product;

  for (ResourceUsage_Executor inExec : in.executors()) {
    if (!inExec.has_executor_info()) {
      SERENITY_LOG(ERROR) << "Executor <unknown>"
                 << " does not include executor_info";
      // Filter out these executors.
      continue;
    }
    if (!inExec.has_statistics()) {
      SERENITY_LOG(ERROR) << "Executor "
                 << inExec.executor_info().executor_id().value()
                 << " does not include statistics.";
      // Filter out these executors.
      continue;
    }

    // Check if EMA for given executor exists.
    auto emaSample = this->emaSamples->find(inExec.executor_info());
    if (emaSample == this->emaSamples->end()) {
      SERENITY_LOG(ERROR) << "First EMA iteration for: "
                          << WID(inExec.executor_info()).toString();
      // If not - insert new one.
      ExponentialMovingAverage ema(EMA_REGULAR_SERIES, this->alpha);
      emaSamples->insert(std::pair<ExecutorInfo, ExponentialMovingAverage>(
          inExec.executor_info(), ema));

    } else {
      // Get proper value.
      Try<double_t> value = this->valueGetFunction(inExec);
      if (value.isError()) {
        SERENITY_LOG(ERROR) << value.error();
        continue;
      }

      // Perform EMA filtering.
      double_t emaValue =
        (emaSample->second).calculateEMA(
            value.get(),
            inExec.statistics().perf().timestamp());

      // Store EMA value.
      ResourceUsage_Executor* outExec = new ResourceUsage_Executor(inExec);
      Try<Nothing> result = this->valueSetFunction(emaValue, outExec);
      if (result.isError()) {
        SERENITY_LOG(ERROR) << result.error();
        delete outExec;
        continue;
      }

      // Add an executor only when there was no error.
      product.mutable_executors()->AddAllocated(outExec);
    }
  }

  if (0 != product.executors_size()) {
    SERENITY_LOG(INFO) << "Continuing with "
                       << product.executors_size() << " executor(s).";
    // Continue pipeline.
    // Copy total agent's capacity.
    product.mutable_total()->CopyFrom(in.total());
    produce(product);
  }

  return Nothing();
}
예제 #18
0
TEST(QoSIpsPipelineTest, RollingFractionalDetectorOneDropCorrectionsWithEma) {
  QoSPipelineConf conf;
  ChangePointDetectionState cpdState;
  // Detector configuration:
  // How far we look back in samples.
  cpdState.windowSize = 10;
  // How many iterations detector will wait with creating another
  // contention.
  cpdState.contentionCooldown = 10;
  // Defines how much (relatively to base point) value must drop to trigger
  // contention.
  // Most detectors will use that.
  cpdState.fractionalThreshold = 0.5;
  // Defines how many instructions can be done per one CPU in one second.
  // This option helps RollingFractionalDetector to estimate severity of
  // drop.
  cpdState.severityLevel = 1000000000;  // 1 Billion.

  conf.cpdState = cpdState;
  conf.emaAlpha = 0.4;
  conf.visualisation = false;
  // Let's start with QoS pipeline disabled.
  conf.valveOpened = true;

  MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE3);

  QoSControllerPipeline* pipeline =
    new IpsQoSPipeline<RollingFractionalDetector>(conf);

  // First iteration.
  Result<QoSCorrections> corrections =
      pipeline->run(mockSlaveUsage.usage().get());
  EXPECT_NONE(corrections);

  // Second iteration is used for manually configured load.
  ResourceUsage usage = mockSlaveUsage.usage().get();
  const int32_t LOAD_ITERATIONS = 14;
  LoadGenerator loadGen(
      [](double_t iter) { return 3000000000; },
      new ZeroNoise(),
      LOAD_ITERATIONS);

  for (; loadGen.end(); loadGen++) {
    // Test scenario: After 10 iterations create drop in IPS for executor num 3.
    double ipsFor3Executor = (*loadGen)();
    if (loadGen.iteration >= 11) {
      ipsFor3Executor /= 3.0;
    }

    usage.mutable_executors(PR_4CPUS)->CopyFrom(
        generateIPS(usage.executors(PR_4CPUS),
                    ipsFor3Executor,
                    (*loadGen).timestamp));

    usage.mutable_executors(PR_2CPUS)->CopyFrom(
        generateIPS(usage.executors(PR_2CPUS),
                    (*loadGen)(),
                    (*loadGen).timestamp));
    // Third iteration (repeated).
    corrections = pipeline->run(usage);
    if (loadGen.iteration >= 13) {
      EXPECT_SOME(corrections);
      ASSERT_EQ(slave::QoSCorrection_Type_KILL,
                corrections.get().front().type());
      // Make sure that we do not kill PR tasks!
      EXPECT_NE("serenityPR",
                corrections.get().front().kill().executor_id().value());
      EXPECT_NE("serenityPR2",
                corrections.get().front().kill().executor_id().value());
    } else {
      EXPECT_SOME(corrections);
      EXPECT_TRUE(corrections.get().empty());
    }
  }

  delete pipeline;
}
예제 #19
0
TEST(QoSIpcPipelineTest,
     AssuranceFractionalDetectorTwoDropCorrectionsWithEma) {
  QoSPipelineConf conf;
  ChangePointDetectionState cpdState;
  // Detector configuration:
  // How far we look back in samples.
  cpdState.windowSize = 10;
  // How many iterations detector will wait with creating another
  // contention.
  cpdState.contentionCooldown = 4;
  // Defines how much (relatively to base point) value must drop to trigger
  // contention.
  // Most detectors will use that.
  cpdState.fractionalThreshold = 0.3;
  // Defines how to convert difference in values to CPU.
  // This option helps RollingFractionalDetector to estimate severity of
  // drop.
  cpdState.severityLevel = 1;
  cpdState.nearFraction = 0.1;

  conf.cpdState = cpdState;
  conf.emaAlpha = 0.9;
  conf.visualisation = false;
  // Let's start with QoS pipeline disabled.
  conf.valveOpened = true;

  MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE2);

  QoSControllerPipeline* pipeline =
      new CpuQoSPipeline<AssuranceFractionalDetector>(conf);

  // First iteration.
  Result<QoSCorrections> corrections =
      pipeline->run(mockSlaveUsage.usage().get());
  EXPECT_NONE(corrections);

  ResourceUsage usage = mockSlaveUsage.usage().get();
  const int32_t LOAD_ITERATIONS = 17;
  LoadGenerator loadGen(
      [](double_t iter) { return 1; },
      new ZeroNoise(),
      LOAD_ITERATIONS);

  for (; loadGen.end(); loadGen++) {
    // Test scenario: After 10 iterations create drop in IPC
    // for executor num 3.
    double_t ipcFor3Executor = (*loadGen)();
    if (loadGen.iteration >= 11) {
      ipcFor3Executor /= 2.0;
    }

    usage.mutable_executors(PR_4CPUS)->CopyFrom(
        generateIPC(usage.executors(PR_4CPUS),
                    ipcFor3Executor,
                    (*loadGen).timestamp));

    usage.mutable_executors(PR_2CPUS)->CopyFrom(
        generateIPC(usage.executors(PR_2CPUS),
                    (*loadGen)(),
                    (*loadGen).timestamp));
    // Third iteration (repeated).
    corrections = pipeline->run(usage);

    // Assurance Detector will wait for signal to be returned to the
    // established state.
    if (loadGen.iteration == 11 || loadGen.iteration == 16) {
      EXPECT_SOME(corrections);
      ASSERT_EQ(slave::QoSCorrection_Type_KILL,
                corrections.get().front().type());
      // Make sure that we do not kill PR tasks!
      EXPECT_NE("serenityPR",
                corrections.get().front().kill().executor_id().value());
      EXPECT_NE("serenityPR2",
                corrections.get().front().kill().executor_id().value());
    } else {
      EXPECT_SOME(corrections);
      EXPECT_TRUE(corrections.get().empty());
    }
  }

  delete pipeline;
}
예제 #20
0
TEST(QoSIpcPipelineTest, RollingDetectorOneDropCorrectionsWithEma) {
  uint64_t WINDOWS_SIZE = 10;
  uint64_t CONTENTION_COOLDOWN = 10;
  double_t RELATIVE_THRESHOLD = 0.3;

  MockSlaveUsage mockSlaveUsage(QOS_PIPELINE_FIXTURE2);

  QoSControllerPipeline* pipeline =
      new CpuQoSPipeline<RollingChangePointDetector>(
          QoSPipelineConf(
              ChangePointDetectionState::createForRollingDetector(
                  WINDOWS_SIZE,
                  CONTENTION_COOLDOWN,
                  RELATIVE_THRESHOLD),
              0.2,  // Alpha = 1 means no smoothing. 0.2 means high smoothing.
              false,
              true));

  // First iteration.
  Result<QoSCorrections> corrections =
      pipeline->run(mockSlaveUsage.usage().get());
  EXPECT_NONE(corrections);

  ResourceUsage usage = mockSlaveUsage.usage().get();
  const int32_t LOAD_ITERATIONS = 16;
  LoadGenerator loadGen(
      [](double_t iter) { return 1; },
      new ZeroNoise(),
      LOAD_ITERATIONS);

  for (; loadGen.end(); loadGen++) {
    // Test scenario: After 10 iterations create drop in
    // IPC for executor num 3.
    double_t ipcFor3Executor = (*loadGen)();
    if (loadGen.iteration >= 11) {
      ipcFor3Executor /= 2.0;
    }

    usage.mutable_executors(PR_4CPUS)->CopyFrom(
        generateIPC(usage.executors(PR_4CPUS),
                    ipcFor3Executor,
                    (*loadGen).timestamp));

    usage.mutable_executors(PR_2CPUS)->CopyFrom(
        generateIPC(usage.executors(PR_2CPUS),
                    (*loadGen)(),
                    (*loadGen).timestamp));
    // Third iteration (repeated).
    corrections = pipeline->run(usage);
    if (loadGen.iteration >= 15) {
      EXPECT_SOME(corrections);
      ASSERT_EQ(slave::QoSCorrection_Type_KILL,
                corrections.get().front().type());
      // Make sure that we do not kill PR tasks!
      EXPECT_NE("serenityPR",
                corrections.get().front().kill().executor_id().value());
      EXPECT_NE("serenityPR2",
                corrections.get().front().kill().executor_id().value());
    } else {
      EXPECT_SOME(corrections);
      EXPECT_TRUE(corrections.get().empty());
    }
  }

  delete pipeline;
}
예제 #21
0
Try<Nothing> TooHighCpuUsageDetector::consume(const ResourceUsage& in) {
  Contentions product;

  if (in.total_size() == 0) {
    return Error(std::string(NAME) + " No total in ResourceUsage");
  }

  Resources totalAgentResources(in.total());
  Option<double_t> totalAgentCpus = totalAgentResources.cpus();

  if (totalAgentCpus.isNone()) {
    return Error(std::string(NAME) + " No total cpus in ResourceUsage");
  }

  double_t agentSumValue = 0;
  uint64_t beExecutors = 0;

  for (const ResourceUsage_Executor& inExec : in.executors()) {
    if (!inExec.has_executor_info()) {
      SERENITY_LOG(ERROR) << "Executor <unknown>"
      << " does not include executor_info";
      // Filter out these executors.
      continue;
    }
    if (!inExec.has_statistics()) {
      SERENITY_LOG(ERROR) << "Executor "
      << inExec.executor_info().executor_id().value()
      << " does not include statistics.";
      // Filter out these executors.
      continue;
    }

    Try<double_t> value = this->cpuUsageGetFunction(inExec);
    if (value.isError()) {
      SERENITY_LOG(ERROR) << value.error();
      continue;
    }

    agentSumValue += value.get();

    if (!Resources(inExec.allocated()).revocable().empty()) {
      beExecutors++;
    }
  }

  // Debug only
  SERENITY_LOG(INFO) << "Sum = " << agentSumValue << " vs total = "
                     << totalAgentCpus.get();
  double_t lvl = agentSumValue / totalAgentCpus.get();

  if (lvl > this->cfgUtilizationThreshold) {
    if (beExecutors == 0) {
      SERENITY_LOG(INFO) << "No BE tasks - only high host utilization";
    } else {
      SERENITY_LOG(INFO) << "Creating CPU contention, because of the value"
                         << " above the threshold. " << agentSumValue << "/"
                         << totalAgentCpus.get();
      product.push_back(createContention(totalAgentCpus.get() - agentSumValue,
                                         Contention_Type_CPU));
    }
  }

  // Continue pipeline.
  this->produce(product);

  return Nothing();
}