TYPED_TEST(MemIsolatorTest, MemUsage) { Flags flags; Try<Isolator*> isolator = TypeParam::create(flags); CHECK_SOME(isolator); // A PosixLauncher is sufficient even when testing a cgroups isolator. Try<Launcher*> launcher = PosixLauncher::create(flags); ExecutorInfo executorInfo; executorInfo.mutable_resources()->CopyFrom( Resources::parse("mem:1024").get()); ContainerID containerId; containerId.set_value("memory_usage"); AWAIT_READY(isolator.get()->prepare(containerId, executorInfo)); int pipes[2]; ASSERT_NE(-1, ::pipe(pipes)); lambda::function<int()> inChild = lambda::bind( &consumeMemory, Megabytes(256), Seconds(10), pipes); Try<pid_t> pid = launcher.get()->fork(containerId, inChild); ASSERT_SOME(pid); // Set up the reaper to wait on the forked child. Future<Option<int> > status = process::reap(pid.get()); // Continue in the parent. ::close(pipes[0]); // Isolate the forked child. AWAIT_READY(isolator.get()->isolate(containerId, pid.get())); // Now signal the child to continue. int buf; ASSERT_LT(0, ::write(pipes[1], &buf, sizeof(buf))); ::close(pipes[1]); // Wait up to 5 seconds for the child process to consume 256 MB of memory; ResourceStatistics statistics; Bytes threshold = Megabytes(256); Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = isolator.get()->usage(containerId); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! if (statistics.mem_rss_bytes() >= threshold.bytes()) { break; } os::sleep(Seconds(1)); waited += Seconds(1); } while (waited < Seconds(5)); EXPECT_LE(threshold.bytes(), statistics.mem_rss_bytes()); // Ensure all processes are killed. AWAIT_READY(launcher.get()->destroy(containerId)); // Make sure the child was reaped. AWAIT_READY(status); // Let the isolator clean up. AWAIT_READY(isolator.get()->cleanup(containerId)); delete isolator.get(); delete launcher.get(); }
// This test has been temporarily disabled due to MESOS-1257. TEST_F(ExternalContainerizerTest, DISABLED_Launch) { Try<PID<Master> > master = this->StartMaster(); ASSERT_SOME(master); Flags testFlags; slave::Flags flags = this->CreateSlaveFlags(); flags.isolation = "external"; flags.containerizer_path = testFlags.build_dir + "/src/examples/python/test-containerizer"; MockExternalContainerizer containerizer(flags); Try<PID<Slave> > slave = this->StartSlave(&containerizer, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name("isolator_test"); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); Resources resources(offers.get()[0].resources()); Option<Bytes> mem = resources.mem(); ASSERT_SOME(mem); Option<double> cpus = resources.cpus(); ASSERT_SOME(cpus); const std::string& file = path::join(flags.work_dir, "ready"); // This task induces user/system load in a child process by // running top in a child process for ten seconds. task.mutable_command()->set_value( #ifdef __APPLE__ // Use logging mode with 30,000 samples with no interval. "top -l 30000 -s 0 2>&1 > /dev/null & " #else // Batch mode, with 30,000 samples with no interval. "top -b -d 0 -n 30000 2>&1 > /dev/null & " #endif "touch " + file + "; " // Signals that the top command is running. "sleep 60"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); // Ignore rest for now. Future<ContainerID> containerId; EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _, _)) .WillOnce(DoAll(FutureArg<0>(&containerId), Invoke(&containerizer, &MockExternalContainerizer::_launch))); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(containerId); AWAIT_READY(status); EXPECT_EQ(TASK_RUNNING, status.get().state()); // Wait for the task to begin inducing cpu time. while (!os::exists(file)); ExecutorID executorId; executorId.set_value(task.task_id().value()); // We'll wait up to 10 seconds for the child process to induce // 1/8 of a second of user and system cpu time in total. // TODO(bmahler): Also induce rss memory consumption, by re-using // the balloon framework. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = containerizer.usage(containerId.get()); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! // NOTE: We are currently getting dummy-data from the test- // containerizer python script matching these expectations. // TODO(tillt): Consider working with real data. if (statistics.cpus_user_time_secs() >= 0.120 && statistics.cpus_system_time_secs() >= 0.05 && statistics.mem_rss_bytes() >= 1024u) { break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(10)); EXPECT_GE(statistics.cpus_user_time_secs(), 0.120); EXPECT_GE(statistics.cpus_system_time_secs(), 0.05); EXPECT_EQ(statistics.cpus_limit(), cpus.get()); EXPECT_GE(statistics.mem_rss_bytes(), 1024u); EXPECT_EQ(statistics.mem_limit_bytes(), mem.get().bytes()); EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.killTask(task.task_id()); AWAIT_READY(status); EXPECT_EQ(TASK_KILLED, status.get().state()); driver.stop(); driver.join(); this->Shutdown(); }
TYPED_TEST(CpuIsolatorTest, SystemCpuUsage) { Flags flags; Try<Isolator*> isolator = TypeParam::create(flags); CHECK_SOME(isolator); // A PosixLauncher is sufficient even when testing a cgroups isolator. Try<Launcher*> launcher = PosixLauncher::create(flags); ExecutorInfo executorInfo; executorInfo.mutable_resources()->CopyFrom( Resources::parse("cpus:1.0").get()); ContainerID containerId; containerId.set_value("system_cpu_usage"); AWAIT_READY(isolator.get()->prepare(containerId, executorInfo)); Try<string> dir = os::mkdtemp(); ASSERT_SOME(dir); const string& file = path::join(dir.get(), "mesos_isolator_test_ready"); // Generating random numbers is done by the kernel and will max out a single // core and run almost exclusively in the kernel, i.e., system time. string command = "cat /dev/urandom > /dev/null & " "touch " + file + "; " // Signals the command is running. "sleep 60"; int pipes[2]; ASSERT_NE(-1, ::pipe(pipes)); lambda::function<int()> inChild = lambda::bind(&execute, command, pipes); Try<pid_t> pid = launcher.get()->fork(containerId, inChild); ASSERT_SOME(pid); // Reap the forked child. Future<Option<int> > status = process::reap(pid.get()); // Continue in the parent. ::close(pipes[0]); // Isolate the forked child. AWAIT_READY(isolator.get()->isolate(containerId, pid.get())); // Now signal the child to continue. int buf; ASSERT_LT(0, ::write(pipes[1], &buf, sizeof(buf))); ::close(pipes[1]); // Wait for the command to start. while (!os::exists(file)); // Wait up to 1 second for the child process to induce 1/8 of a second of // system cpu time. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = isolator.get()->usage(containerId); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! if (statistics.cpus_system_time_secs() >= 0.125) { break; } os::sleep(Milliseconds(200)); waited += Milliseconds(200); } while (waited < Seconds(1)); EXPECT_LE(0.125, statistics.cpus_system_time_secs()); // Ensure all processes are killed. AWAIT_READY(launcher.get()->destroy(containerId)); // Make sure the child was reaped. AWAIT_READY(status); // Let the isolator clean up. AWAIT_READY(isolator.get()->cleanup(containerId)); delete isolator.get(); delete launcher.get(); CHECK_SOME(os::rmdir(dir.get())); }
TYPED_TEST(MemIsolatorTest, MemUsage) { slave::Flags flags; Try<Isolator*> isolator = TypeParam::create(flags); CHECK_SOME(isolator); // A PosixLauncher is sufficient even when testing a cgroups isolator. Try<Launcher*> launcher = PosixLauncher::create(flags); ExecutorInfo executorInfo; executorInfo.mutable_resources()->CopyFrom( Resources::parse("mem:1024").get()); ContainerID containerId; containerId.set_value("memory_usage"); // Use a relative temporary directory so it gets cleaned up // automatically with the test. Try<string> dir = os::mkdtemp(path::join(os::getcwd(), "XXXXXX")); ASSERT_SOME(dir); AWAIT_READY( isolator.get()->prepare(containerId, executorInfo, dir.get(), None())); int pipes[2]; ASSERT_NE(-1, ::pipe(pipes)); Try<pid_t> pid = launcher.get()->fork( containerId, "/bin/sh", vector<string>(), Subprocess::FD(STDIN_FILENO), Subprocess::FD(STDOUT_FILENO), Subprocess::FD(STDERR_FILENO), None(), None(), lambda::bind(&consumeMemory, Megabytes(256), Seconds(10), pipes)); ASSERT_SOME(pid); // Set up the reaper to wait on the forked child. Future<Option<int> > status = process::reap(pid.get()); // Continue in the parent. ASSERT_SOME(os::close(pipes[0])); // Isolate the forked child. AWAIT_READY(isolator.get()->isolate(containerId, pid.get())); // Now signal the child to continue. char dummy; ASSERT_LT(0, ::write(pipes[1], &dummy, sizeof(dummy))); ASSERT_SOME(os::close(pipes[1])); // Wait up to 5 seconds for the child process to consume 256 MB of memory; ResourceStatistics statistics; Bytes threshold = Megabytes(256); Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = isolator.get()->usage(containerId); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! if (statistics.mem_rss_bytes() >= threshold.bytes()) { break; } os::sleep(Seconds(1)); waited += Seconds(1); } while (waited < Seconds(5)); EXPECT_LE(threshold.bytes(), statistics.mem_rss_bytes()); // Ensure all processes are killed. AWAIT_READY(launcher.get()->destroy(containerId)); // Make sure the child was reaped. AWAIT_READY(status); // Let the isolator clean up. AWAIT_READY(isolator.get()->cleanup(containerId)); delete isolator.get(); delete launcher.get(); }
bool operator == ( const ResourceStatistics& left, const ResourceStatistics& right) { return left.SerializeAsString() == right.SerializeAsString(); }
TEST(MonitorTest, Statistics) { FrameworkID frameworkId; frameworkId.set_value("framework"); ExecutorID executorId; executorId.set_value("executor"); ExecutorInfo executorInfo; executorInfo.mutable_executor_id()->CopyFrom(executorId); executorInfo.mutable_framework_id()->CopyFrom(frameworkId); executorInfo.set_name("name"); executorInfo.set_source("source"); ResourceStatistics statistics; statistics.set_cpus_nr_periods(100); statistics.set_cpus_nr_throttled(2); statistics.set_cpus_user_time_secs(4); statistics.set_cpus_system_time_secs(1); statistics.set_cpus_throttled_time_secs(0.5); statistics.set_cpus_limit(1.0); statistics.set_mem_file_bytes(0); statistics.set_mem_anon_bytes(0); statistics.set_mem_mapped_file_bytes(0); statistics.set_mem_rss_bytes(1024); statistics.set_mem_limit_bytes(2048); statistics.set_timestamp(0); ResourceMonitor monitor([=]() -> Future<ResourceUsage> { Resources resources = Resources::parse("cpus:1;mem:2").get(); ResourceUsage usage; ResourceUsage::Executor* executor = usage.add_executors(); executor->mutable_executor_info()->CopyFrom(executorInfo); executor->mutable_allocated()->CopyFrom(resources); executor->mutable_statistics()->CopyFrom(statistics); return usage; }); UPID upid("monitor", process::address()); Future<http::Response> response = http::get(upid, "statistics"); AWAIT_READY(response); AWAIT_EXPECT_RESPONSE_STATUS_EQ(http::OK().status, response); AWAIT_EXPECT_RESPONSE_HEADER_EQ( "application/json", "Content-Type", response); JSON::Array expected; JSON::Object usage; usage.values["executor_id"] = "executor"; usage.values["executor_name"] = "name"; usage.values["framework_id"] = "framework"; usage.values["source"] = "source"; usage.values["statistics"] = JSON::Protobuf(statistics); expected.values.push_back(usage); Try<JSON::Array> result = JSON::parse<JSON::Array>(response.get().body); ASSERT_SOME(result); ASSERT_EQ(expected, result.get()); }
TYPED_TEST(CpuIsolatorTest, UserCpuUsage) { slave::Flags flags; Try<Isolator*> isolator = TypeParam::create(flags); CHECK_SOME(isolator); // A PosixLauncher is sufficient even when testing a cgroups isolator. Try<Launcher*> launcher = PosixLauncher::create(flags); ExecutorInfo executorInfo; executorInfo.mutable_resources()->CopyFrom( Resources::parse("cpus:1.0").get()); ContainerID containerId; containerId.set_value("user_cpu_usage"); // Use a relative temporary directory so it gets cleaned up // automatically with the test. Try<string> dir = os::mkdtemp(path::join(os::getcwd(), "XXXXXX")); ASSERT_SOME(dir); AWAIT_READY( isolator.get()->prepare(containerId, executorInfo, dir.get(), None())); const string& file = path::join(dir.get(), "mesos_isolator_test_ready"); // Max out a single core in userspace. This will run for at most one second. string command = "while true ; do true ; done &" "touch " + file + "; " // Signals the command is running. "sleep 60"; int pipes[2]; ASSERT_NE(-1, ::pipe(pipes)); vector<string> argv(3); argv[0] = "sh"; argv[1] = "-c"; argv[2] = command; Try<pid_t> pid = launcher.get()->fork( containerId, "/bin/sh", argv, Subprocess::FD(STDIN_FILENO), Subprocess::FD(STDOUT_FILENO), Subprocess::FD(STDERR_FILENO), None(), None(), lambda::bind(&childSetup, pipes)); ASSERT_SOME(pid); // Reap the forked child. Future<Option<int> > status = process::reap(pid.get()); // Continue in the parent. ASSERT_SOME(os::close(pipes[0])); // Isolate the forked child. AWAIT_READY(isolator.get()->isolate(containerId, pid.get())); // Now signal the child to continue. char dummy; ASSERT_LT(0, ::write(pipes[1], &dummy, sizeof(dummy))); ASSERT_SOME(os::close(pipes[1])); // Wait for the command to start. while (!os::exists(file)); // Wait up to 1 second for the child process to induce 1/8 of a second of // user cpu time. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = isolator.get()->usage(containerId); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! if (statistics.cpus_user_time_secs() >= 0.125) { break; } os::sleep(Milliseconds(200)); waited += Milliseconds(200); } while (waited < Seconds(1)); EXPECT_LE(0.125, statistics.cpus_user_time_secs()); // Ensure all processes are killed. AWAIT_READY(launcher.get()->destroy(containerId)); // Make sure the child was reaped. AWAIT_READY(status); // Let the isolator clean up. AWAIT_READY(isolator.get()->cleanup(containerId)); delete isolator.get(); delete launcher.get(); }
Future<ResourceStatistics> CpuacctSubsystem::usage( const ContainerID& containerId) { ResourceStatistics result; // TODO(chzhcn): Getting the number of processes and threads is // available as long as any cgroup subsystem is used so this best // not be tied to a specific cgroup subsystem. A better place is // probably Linux Launcher, which uses the cgroup freezer subsystem. // That requires some change for it to adopt the new semantics of // reporting subsystem-independent cgroup usage. // Note: The complexity of this operation is linear to the number of // processes and threads in a container: the kernel has to allocate // memory to contain the list of pids or tids; the userspace has to // parse the cgroup files to get the size. If this proves to be a // performance bottleneck, some kind of rate limiting mechanism // needs to be employed. if (flags.cgroups_cpu_enable_pids_and_tids_count) { Try<set<pid_t>> pids = cgroups::processes( hierarchy, path::join(flags.cgroups_root, containerId.value())); if (pids.isError()) { return Failure("Failed to get number of processes: " + pids.error()); } result.set_processes(pids.get().size()); Try<set<pid_t>> tids = cgroups::threads( hierarchy, path::join(flags.cgroups_root, containerId.value())); if (tids.isError()) { return Failure("Failed to get number of threads: " + tids.error()); } result.set_threads(tids.get().size()); } // Get the number of clock ticks, used for cpu accounting. static long ticks = sysconf(_SC_CLK_TCK); PCHECK(ticks > 0) << "Failed to get sysconf(_SC_CLK_TCK)"; // Add the cpuacct.stat information. Try<hashmap<string, uint64_t>> stat = cgroups::stat( hierarchy, path::join(flags.cgroups_root, containerId.value()), "cpuacct.stat"); if (stat.isError()) { return Failure("Failed to read 'cpuacct.stat': " + stat.error()); } // TODO(bmahler): Add namespacing to cgroups to enforce the expected // structure, e.g., cgroups::cpuacct::stat. Option<uint64_t> user = stat.get().get("user"); Option<uint64_t> system = stat.get().get("system"); if (user.isSome() && system.isSome()) { result.set_cpus_user_time_secs((double) user.get() / (double) ticks); result.set_cpus_system_time_secs((double) system.get() / (double) ticks); } return result; }
ResourceStatistics createResourceStatistics() { ResourceStatistics statistics; statistics.set_cpus_nr_periods(100); statistics.set_cpus_nr_throttled(2); statistics.set_cpus_user_time_secs(4); statistics.set_cpus_system_time_secs(1); statistics.set_cpus_throttled_time_secs(0.5); statistics.set_cpus_limit(1.0); statistics.set_mem_file_bytes(0); statistics.set_mem_anon_bytes(0); statistics.set_mem_mapped_file_bytes(0); statistics.set_mem_rss_bytes(1024); statistics.set_mem_limit_bytes(2048); statistics.set_timestamp(0); return statistics; }
TEST_P(CpuIsolatorTest, ROOT_UserCpuUsage) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = GetParam(); Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave( detector.get(), containerizer.get()); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); // Max out a single core in userspace. This will run for at most one // second. TaskInfo task = createTask( offers.get()[0], "while true ; do true ; done & sleep 60"); Future<TaskStatus> statusRunning; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(statusRunning); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); ContainerID containerId = *(containers->begin()); // Wait up to 1 second for the child process to induce 1/8 of a // second of user cpu time. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! if (statistics.cpus_user_time_secs() >= 0.125) { break; } os::sleep(Milliseconds(200)); waited += Milliseconds(200); } while (waited < Seconds(1)); EXPECT_LE(0.125, statistics.cpus_user_time_secs()); driver.stop(); driver.join(); }
TYPED_TEST(IsolatorTest, Usage) { Try<PID<Master> > master = this->StartMaster(); ASSERT_SOME(master); TypeParam isolator; slave::Flags flags = this->CreateSlaveFlags(); Try<PID<Slave> > slave = this->StartSlave(&isolator, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name("isolator_test"); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); Resources resources(offers.get()[0].resources()); Option<Bytes> mem = resources.mem(); ASSERT_SOME(mem); Option<double> cpus = resources.cpus(); ASSERT_SOME(cpus); const std::string& file = path::join(flags.work_dir, "ready"); // This task induces user/system load in a child process by // running top in a child process for ten seconds. task.mutable_command()->set_value( #ifdef __APPLE__ // Use logging mode with 30,000 samples with no interval. "top -l 30000 -s 0 2>&1 > /dev/null & " #else // Batch mode, with 30,000 samples with no interval. "top -b -d 0 -n 30000 2>&1 > /dev/null & " #endif "touch " + file + "; " // Signals that the top command is running. "sleep 60"); vector<TaskInfo> tasks; tasks.push_back(task); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(status); EXPECT_EQ(TASK_RUNNING, status.get().state()); // Wait for the task to begin inducing cpu time. while (!os::exists(file)); ExecutorID executorId; executorId.set_value(task.task_id().value()); // We'll wait up to 10 seconds for the child process to induce // 1/8 of a second of user and system cpu time in total. // TODO(bmahler): Also induce rss memory consumption, by re-using // the balloon framework. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = process::dispatch( (Isolator*) &isolator, // TODO(benh): Fix after reaper changes. &Isolator::usage, frameworkId.get(), executorId); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! if (statistics.cpus_user_time_secs() >= 0.125 && statistics.cpus_system_time_secs() >= 0.125 && statistics.mem_rss_bytes() >= 1024u) { break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(10)); EXPECT_GE(statistics.cpus_user_time_secs(), 0.125); EXPECT_GE(statistics.cpus_system_time_secs(), 0.125); EXPECT_EQ(statistics.cpus_limit(), cpus.get()); EXPECT_GE(statistics.mem_rss_bytes(), 1024u); EXPECT_EQ(statistics.mem_limit_bytes(), mem.get().bytes()); EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.killTask(task.task_id()); AWAIT_READY(status); EXPECT_EQ(TASK_KILLED, status.get().state()); driver.stop(); driver.join(); this->Shutdown(); // Must shutdown before 'isolator' gets deallocated. }
// TODO(bmahler): Add additional tests: // 1. Check that the data has been published to statistics. // 2. Check that metering is occurring on subsequent resource data. TEST(MonitorTest, WatchUnwatch) { FrameworkID frameworkId; frameworkId.set_value("framework"); ExecutorID executorId; executorId.set_value("executor"); ExecutorInfo executorInfo; executorInfo.mutable_executor_id()->CopyFrom(executorId); executorInfo.mutable_framework_id()->CopyFrom(frameworkId); executorInfo.set_name("name"); executorInfo.set_source("source"); ResourceStatistics initialStatistics; initialStatistics.set_cpus_user_time_secs(0); initialStatistics.set_cpus_system_time_secs(0); initialStatistics.set_cpus_limit(2.5); initialStatistics.set_mem_rss_bytes(0); initialStatistics.set_mem_limit_bytes(2048); initialStatistics.set_timestamp(Clock::now().secs()); ResourceStatistics statistics; statistics.set_cpus_nr_periods(100); statistics.set_cpus_nr_throttled(2); statistics.set_cpus_user_time_secs(4); statistics.set_cpus_system_time_secs(1); statistics.set_cpus_throttled_time_secs(0.5); statistics.set_cpus_limit(2.5); statistics.set_mem_rss_bytes(1024); statistics.set_mem_limit_bytes(2048); statistics.set_timestamp( initialStatistics.timestamp() + slave::RESOURCE_MONITORING_INTERVAL.secs()); TestingIsolator isolator; process::spawn(isolator); Future<Nothing> usage1, usage2; EXPECT_CALL(isolator, usage(frameworkId, executorId)) .WillOnce(DoAll(FutureSatisfy(&usage1), Return(initialStatistics))) .WillOnce(DoAll(FutureSatisfy(&usage2), Return(statistics))); slave::ResourceMonitor monitor(&isolator); // We pause the clock first in order to make sure that we can // advance time below to force the 'delay' in // ResourceMonitorProcess::watch to execute. process::Clock::pause(); monitor.watch( frameworkId, executorId, executorInfo, slave::RESOURCE_MONITORING_INTERVAL); // Now wait for ResouorceMonitorProcess::watch to finish so we can // advance time to cause collection to begin. process::Clock::settle(); process::Clock::advance(slave::RESOURCE_MONITORING_INTERVAL); process::Clock::settle(); AWAIT_READY(usage1); // Wait until the isolator has finished returning the statistics. process::Clock::settle(); // The second collection will populate the cpus_usage. process::Clock::advance(slave::RESOURCE_MONITORING_INTERVAL); process::Clock::settle(); AWAIT_READY(usage2); // Wait until the isolator has finished returning the statistics. process::Clock::settle(); process::UPID upid("monitor", process::ip(), process::port()); Future<Response> response = process::http::get(upid, "usage.json"); AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response); AWAIT_EXPECT_RESPONSE_HEADER_EQ( "application/json", "Content-Type", response); // TODO(bmahler): Verify metering directly through statistics. AWAIT_EXPECT_RESPONSE_BODY_EQ( strings::format( "[{" "\"executor_id\":\"executor\"," "\"executor_name\":\"name\"," "\"framework_id\":\"framework\"," "\"resource_usage\":{" "\"cpu_time\":%g," "\"cpu_usage\":%g," "\"memory_rss\":%lu" "}," "\"source\":\"source\"" "}]", statistics.cpus_system_time_secs() + statistics.cpus_user_time_secs(), (statistics.cpus_system_time_secs() + statistics.cpus_user_time_secs()) / slave::RESOURCE_MONITORING_INTERVAL.secs(), statistics.mem_rss_bytes()).get(), response); response = process::http::get(upid, "statistics.json"); AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response); AWAIT_EXPECT_RESPONSE_HEADER_EQ( "application/json", "Content-Type", response); // TODO(bmahler): Verify metering directly through statistics. AWAIT_EXPECT_RESPONSE_BODY_EQ( strings::format( "[{" "\"executor_id\":\"executor\"," "\"executor_name\":\"name\"," "\"framework_id\":\"framework\"," "\"source\":\"source\"," "\"statistics\":{" "\"cpus_limit\":%g," "\"cpus_nr_periods\":%d," "\"cpus_nr_throttled\":%d," "\"cpus_system_time_secs\":%g," "\"cpus_throttled_time_secs\":%g," "\"cpus_user_time_secs\":%g," "\"mem_limit_bytes\":%lu," "\"mem_rss_bytes\":%lu" "}" "}]", statistics.cpus_limit(), statistics.cpus_nr_periods(), statistics.cpus_nr_throttled(), statistics.cpus_system_time_secs(), statistics.cpus_throttled_time_secs(), statistics.cpus_user_time_secs(), statistics.mem_limit_bytes(), statistics.mem_rss_bytes()).get(), response); // Ensure the monitor stops polling the isolator. monitor.unwatch(frameworkId, executorId); // Wait until ResourceMonitorProcess::unwatch has completed. process::Clock::settle(); // This time, Isolator::usage should not get called. EXPECT_CALL(isolator, usage(frameworkId, executorId)) .Times(0); process::Clock::advance(slave::RESOURCE_MONITORING_INTERVAL); process::Clock::settle(); response = process::http::get(upid, "usage.json"); AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response); AWAIT_EXPECT_RESPONSE_HEADER_EQ( "application/json", "Content-Type", response); AWAIT_EXPECT_RESPONSE_BODY_EQ("[]", response); }