foreach (const Future<TaskStatus>& taskStatus, taskStatuses) { AWAIT_READY(taskStatus); Option<TaskState> taskState = taskStates.get(taskStatus->task_id()); ASSERT_SOME(taskState); switch (taskState.get()) { case TASK_STAGING: { ASSERT_EQ(TASK_STARTING, taskStatus->state()) << taskStatus->DebugString(); taskStates[taskStatus->task_id()] = TASK_STARTING; break; } case TASK_STARTING: { ASSERT_EQ(TASK_RUNNING, taskStatus->state()) << taskStatus->DebugString(); taskStates[taskStatus->task_id()] = TASK_RUNNING; break; } default: { FAIL() << "Unexpected task update: " << taskStatus->DebugString(); break; } } }
// This test verifies that disk quota isolator recovers properly after // the slave restarts. TEST_F(DiskQuotaTest, SlaveRecovery) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/cpu,posix/mem,disk/du"; flags.container_disk_watch_interval = Milliseconds(1); Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; // Enable checkpointing for the framework. FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_checkpoint(true); MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; // Create a task that uses 2MB disk. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:3").get(), "dd if=/dev/zero of=file bs=1048576 count=2 && sleep 1000"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offer.id(), {task}); AWAIT_READY(status); EXPECT_EQ(task.task_id(), status->task_id()); EXPECT_EQ(TASK_RUNNING, status->state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); const ContainerID& containerId = *(containers->begin()); // Stop the slave. slave.get()->terminate(); Future<ReregisterExecutorMessage> reregisterExecutorMessage = FUTURE_PROTOBUF(ReregisterExecutorMessage(), _, _); Future<Nothing> _recover = FUTURE_DISPATCH(_, &Slave::_recover); _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); containerizer.reset(_containerizer.get()); detector = master.get()->createDetector(); slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); Clock::pause(); AWAIT_READY(_recover); // Wait for slave to schedule reregister timeout. Clock::settle(); // Ensure the executor re-registers before completing recovery. AWAIT_READY(reregisterExecutorMessage); // Ensure the slave considers itself recovered. Clock::advance(flags.executor_reregistration_timeout); // NOTE: We resume the clock because we need the reaper to reap the // 'du' subprocess. Clock::resume(); // Wait until disk usage can be retrieved. Duration elapsed = Duration::zero(); while (true) { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes()) { EXPECT_LE(usage->disk_used_bytes(), usage->disk_limit_bytes()); // NOTE: This is to capture the regression in MESOS-2452. The data // stored in the executor meta directory should be less than 64K. if (usage->disk_used_bytes() > Kilobytes(64).bytes()) { break; } } ASSERT_LT(elapsed, Seconds(15)); os::sleep(Milliseconds(1)); elapsed += Milliseconds(1); } driver.stop(); driver.join(); }
// This test verifies that the container will not be killed if // disk_enforce_quota flag is false (even if the disk usage exceeds // its quota). TEST_F(DiskQuotaTest, NoQuotaEnforcement) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/cpu,posix/mem,disk/du"; // NOTE: We can't pause the clock because we need the reaper to reap // the 'du' subprocess. flags.container_disk_watch_interval = Milliseconds(1); flags.enforce_container_disk_quota = false; Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; // Create a task that uses 2MB disk. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1048576 count=2 && sleep 1000"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offer.id(), {task}); AWAIT_READY(status); EXPECT_EQ(task.task_id(), status->task_id()); EXPECT_EQ(TASK_RUNNING, status->state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); const ContainerID& containerId = *(containers->begin()); // Wait until disk usage can be retrieved and the usage actually // exceeds the limit. If the container is killed due to quota // enforcement (which shouldn't happen), the 'usage' call will // return a failed future, leading to a failed test. Duration elapsed = Duration::zero(); while (true) { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(1), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes() && usage->disk_used_bytes() > usage->disk_limit_bytes()) { break; } ASSERT_LT(elapsed, Seconds(5)); os::sleep(Milliseconds(1)); elapsed += Milliseconds(1); } driver.stop(); driver.join(); }
// Test that memory pressure listening is restarted after recovery. TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); // We only care about memory cgroup for this test. flags.isolation = "cgroups/mem"; Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; // Enable checkpointing for the framework. FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_checkpoint(true); MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; // Run a task that triggers memory pressure event. We request 1G // disk because we are going to write a 512 MB file repeatedly. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:256;disk:1024").get(), "while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done"); Future<TaskStatus> starting; Future<TaskStatus> running; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&starting)) .WillOnce(FutureArg<1>(&running)) .WillRepeatedly(Return()); // Ignore subsequent updates. Future<Nothing> runningAck = FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement); Future<Nothing> startingAck = FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(starting); EXPECT_EQ(task.task_id(), starting->task_id()); EXPECT_EQ(TASK_STARTING, starting->state()); AWAIT_READY(startingAck); AWAIT_READY(running); EXPECT_EQ(task.task_id(), running->task_id()); EXPECT_EQ(TASK_RUNNING, running->state()); // Wait for the ACK to be checkpointed. AWAIT_READY_FOR(runningAck, Seconds(120)); // We restart the slave to let it recover. slave.get()->terminate(); // Set up so we can wait until the new slave updates the container's // resources (this occurs after the executor has re-registered). Future<Nothing> update = FUTURE_DISPATCH(_, &MesosContainerizerProcess::update); // Use the same flags. _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); containerizer.reset(_containerizer.get()); Future<SlaveReregisteredMessage> reregistered = FUTURE_PROTOBUF(SlaveReregisteredMessage(), master.get()->pid, _); slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); AWAIT_READY(reregistered); // Wait until the containerizer is updated. AWAIT_READY(update); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); ContainerID containerId = *(containers->begin()); // Wait a while for some memory pressure events to occur. Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); if (usage->mem_low_pressure_counter() > 0) { // We will check the correctness of the memory pressure counters // later, because the memory-hammering task is still active // and potentially incrementing these counters. break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(5)); EXPECT_LE(waited, Seconds(5)); // Pause the clock to ensure that the reaper doesn't reap the exited // command executor and inform the containerizer/slave. Clock::pause(); Clock::settle(); Future<TaskStatus> killed; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&killed)); // Stop the memory-hammering task. driver.killTask(task.task_id()); AWAIT_READY_FOR(killed, Seconds(120)); EXPECT_EQ(task.task_id(), killed->task_id()); EXPECT_EQ(TASK_KILLED, killed->state()); // Now check the correctness of the memory pressure counters. Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); EXPECT_GE(usage->mem_low_pressure_counter(), usage->mem_medium_pressure_counter()); EXPECT_GE(usage->mem_medium_pressure_counter(), usage->mem_critical_pressure_counter()); Clock::resume(); driver.stop(); driver.join(); }
// This test checks the behavior of passed invalid limits. TEST_F(PosixRLimitsIsolatorTest, InvalidLimits) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/rlimits"; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_NE(0u, offers->size()); TaskInfo task = createTask( offers.get()[0].slave_id(), offers.get()[0].resources(), "true"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); // Set impossible limit soft > hard. RLimitInfo rlimitInfo; RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits(); rlimit->set_type(RLimitInfo::RLimit::RLMT_CPU); rlimit->set_soft(100); rlimit->set_hard(1); container->mutable_rlimit_info()->CopyFrom(rlimitInfo); Future<TaskStatus> taskStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&taskStatus)); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(taskStatus); EXPECT_EQ(task.task_id(), taskStatus->task_id()); EXPECT_EQ(TASK_FAILED, taskStatus->state()); EXPECT_EQ(TaskStatus::REASON_EXECUTOR_TERMINATED, taskStatus->reason()); driver.stop(); driver.join(); }
// Tests that the logrotate container logger only closes FDs when it // is supposed to and does not interfere with other FDs on the agent. TEST_F(ContainerLoggerTest, LOGROTATE_ModuleFDOwnership) { // Create a master, agent, and framework. Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Future<SlaveRegisteredMessage> slaveRegisteredMessage = FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _); // We'll need access to these flags later. slave::Flags flags = CreateSlaveFlags(); // Use the non-default container logger that rotates logs. flags.container_logger = LOGROTATE_CONTAINER_LOGGER_NAME; Fetcher fetcher(flags); // We use an actual containerizer + executor since we want something to run. Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, false, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); AWAIT_READY(slaveRegisteredMessage); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); // Wait for an offer, and start a task. Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); // Start a task that will keep running until the end of the test. TaskInfo task = createTask(offers.get()[0], "sleep 100"); Future<TaskStatus> statusStarting; Future<TaskStatus> statusRunning; Future<TaskStatus> statusKilled; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusStarting)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusKilled)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(statusStarting); EXPECT_EQ(TASK_STARTING, statusStarting->state()); AWAIT_READY(statusRunning); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); // Open multiple files, so that we're fairly certain we've opened // the same FDs (integers) opened by the container logger. vector<int> fds; for (int i = 0; i < 50; i++) { Try<int> fd = os::open(os::DEV_NULL, O_RDONLY); ASSERT_SOME(fd); fds.push_back(fd.get()); } // Kill the task, which also kills the executor. driver.killTask(statusRunning->task_id()); AWAIT_READY(statusKilled); EXPECT_EQ(TASK_KILLED, statusKilled->state()); Future<Nothing> executorTerminated = FUTURE_DISPATCH(_, &Slave::executorTerminated); AWAIT_READY(executorTerminated); // Close all the FDs we opened. Every `close` should succeed. foreach (int fd, fds) { ASSERT_SOME(os::close(fd)); }
// This test launches a container which has an image and joins host // network, and then verifies that the container can access Internet. TEST_F(CniIsolatorTest, ROOT_INTERNET_CURL_LaunchContainerInHostNetwork) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "docker/runtime,filesystem/linux"; flags.image_providers = "docker"; flags.docker_store_dir = path::join(sandbox.get(), "store"); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers->size()); const Offer& offer = offers.get()[0]; // NOTE: We use a non-shell command here because 'sh' might not be // in the PATH. 'alpine' does not specify env PATH in the image. CommandInfo command; command.set_shell(false); command.set_value("/bin/ping"); command.add_arguments("/bin/ping"); command.add_arguments("-c1"); command.add_arguments("google.com"); TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128").get(), command); Image image; image.set_type(Image::DOCKER); image.mutable_docker()->set_name("alpine"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); container->mutable_mesos()->mutable_image()->CopyFrom(image); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.launchTasks(offer.id(), {task}); AWAIT_READY_FOR(statusRunning, Seconds(60)); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinished); EXPECT_EQ(task.task_id(), statusFinished->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinished->state()); driver.stop(); driver.join(); }
TEST_F(ResourceOffersTest, ResourcesGetReofferedAfterTaskInfoError) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get()); ASSERT_SOME(slave); MockScheduler sched1; MesosSchedulerDriver driver1( &sched1, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched1, registered(&driver1, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched1, resourceOffers(&driver1, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver1.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO); Resource* cpus = task.add_resources(); cpus->set_name("cpus"); cpus->set_type(Value::SCALAR); cpus->mutable_scalar()->set_value(-1); Resource* mem = task.add_resources(); mem->set_name("mem"); mem->set_type(Value::SCALAR); mem->mutable_scalar()->set_value(static_cast<double>(Gigabytes(1).bytes())); vector<TaskInfo> tasks; tasks.push_back(task); Future<TaskStatus> status; EXPECT_CALL(sched1, statusUpdate(&driver1, _)) .WillOnce(FutureArg<1>(&status)); driver1.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(status); EXPECT_EQ(task.task_id(), status->task_id()); EXPECT_EQ(TASK_ERROR, status->state()); EXPECT_EQ(TaskStatus::REASON_TASK_INVALID, status->reason()); EXPECT_TRUE(status->has_message()); EXPECT_TRUE(strings::contains(status->message(), "Invalid scalar resource")) << status->message(); MockScheduler sched2; MesosSchedulerDriver driver2( &sched2, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched2, registered(&driver2, _, _)); EXPECT_CALL(sched2, resourceOffers(&driver2, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver2.start(); AWAIT_READY(offers); driver1.stop(); driver1.join(); driver2.stop(); driver2.join(); }
// Verify that we can get accurate resource statistics from the XFS // disk isolator. TEST_F(ROOT_XFS_QuotaTest, ResourceStatistics) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); Fetcher fetcher(flags); Owned<MasterDetector> detector = master.get()->createDetector(); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; // Create a task that uses 4 of 3MB disk but doesn't fail. We will verify // that the allocated disk is filled. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:3").get(), "dd if=/dev/zero of=file bs=1048576 count=4 || sleep 1000"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); Future<hashset<ContainerID>> containers = containerizer.get()->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); ContainerID containerId = *(containers->begin()); Timeout timeout = Timeout::in(Seconds(5)); while (true) { Future<ResourceStatistics> usage = containerizer.get()->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes()) { // Usage must always be <= the limit. EXPECT_LE(usage->disk_used_bytes(), usage->disk_limit_bytes()); // Usage might not be equal to the limit, but it must hit // and not exceed the limit. if (usage->disk_used_bytes() >= usage->disk_limit_bytes()) { EXPECT_EQ( usage->disk_used_bytes(), usage->disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_used_bytes())); break; } } ASSERT_FALSE(timeout.expired()); os::sleep(Milliseconds(100)); } driver.stop(); driver.join(); }
// This is the same logic as DiskUsageExceedsQuota except we turn off disk quota // enforcement, so exceeding the quota should be allowed. TEST_F(ROOT_XFS_QuotaTest, DiskUsageExceedsQuotaNoEnforce) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Owned<MasterDetector> detector = master.get()->createDetector(); slave::Flags flags = CreateSlaveFlags(); flags.enforce_container_disk_quota = false; Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; // Create a task which requests 1MB disk, but actually uses more // than 2MB disk. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1048576 count=2"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; Future<TaskStatus> finishedStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillOnce(FutureArg<1>(&finishedStatus)); driver.launchTasks(offer.id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); // We expect the task to succeed even though it exceeded // the disk quota. AWAIT_READY(finishedStatus); EXPECT_EQ(task.task_id(), finishedStatus->task_id()); EXPECT_EQ(TASK_FINISHED, finishedStatus->state()); driver.stop(); driver.join(); }
// Verify that a task that tries to consume more space than it has requested // is only allowed to consume exactly the assigned resources. We tell dd // to write 2MB but only give it 1MB of resources and (roughly) verify that // it exits with a failure (that should be a write error). TEST_F(ROOT_XFS_QuotaTest, DiskUsageExceedsQuota) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), CreateSlaveFlags()); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; // Create a task which requests 1MB disk, but actually uses more // than 2MB disk. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1048576 count=2"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; Future<TaskStatus> failedStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillOnce(FutureArg<1>(&failedStatus)); driver.launchTasks(offer.id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); AWAIT_READY(failedStatus); EXPECT_EQ(task.task_id(), failedStatus->task_id()); EXPECT_EQ(TASK_FAILED, failedStatus->state()); // Unlike the 'disk/du' isolator, the reason for task failure // should be that dd got an IO error. EXPECT_EQ(TaskStatus::SOURCE_EXECUTOR, failedStatus->source()); EXPECT_EQ("Command exited with status 1", failedStatus->message()); driver.stop(); driver.join(); }
// Tests that the task fails when it attempts to write to a persistent volume // mounted as read-only. Note that although we use a shared persistent volume, // the behavior is the same for non-shared persistent volumes. TEST_F(LinuxFilesystemIsolatorMesosTest, ROOT_WriteAccessSharedPersistentVolumeReadOnlyMode) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); string registry = path::join(sandbox.get(), "registry"); AWAIT_READY(DockerArchive::create(registry, "test_image")); slave::Flags flags = CreateSlaveFlags(); flags.resources = "cpus:2;mem:128;disk(role1):128"; flags.isolation = "filesystem/linux,docker/runtime"; flags.docker_registry = registry; flags.docker_store_dir = path::join(sandbox.get(), "store"); flags.image_providers = "docker"; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_roles(0, "role1"); frameworkInfo.add_capabilities()->set_type( FrameworkInfo::Capability::SHARED_RESOURCES); MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); // We create a shared volume which shall be used by the task to // write to that volume. Resource volume = createPersistentVolume( Megabytes(4), "role1", "id1", "volume_path", None(), None(), frameworkInfo.principal(), true); // Shared volume. // The task uses the shared volume as read-only. Resource roVolume = volume; roVolume.mutable_disk()->mutable_volume()->set_mode(Volume::RO); Resources taskResources = Resources::parse("cpus:1;mem:64;disk(role1):1").get() + roVolume; TaskInfo task = createTask( offers.get()[0].slave_id(), taskResources, "echo hello > volume_path/file"); // The task fails to write to the volume since the task's resources // intends to use the volume as read-only. Future<TaskStatus> statusStarting; Future<TaskStatus> statusRunning; Future<TaskStatus> statusFailed; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusStarting)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFailed)); driver.acceptOffers( {offers.get()[0].id()}, {CREATE(volume), LAUNCH({task})}); AWAIT_READY(statusStarting); EXPECT_EQ(task.task_id(), statusStarting->task_id()); EXPECT_EQ(TASK_STARTING, statusStarting->state()); AWAIT_READY(statusRunning); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFailed); EXPECT_EQ(task.task_id(), statusFailed->task_id()); EXPECT_EQ(TASK_FAILED, statusFailed->state()); driver.stop(); driver.join(); }
// This test verifies that the volume usage accounting for sandboxes // with bind-mounted volumes (while linux filesystem isolator is used) // works correctly by creating a file within the volume the size of // which exceeds the sandbox quota. TEST_F(LinuxFilesystemIsolatorMesosTest, ROOT_VolumeUsageExceedsSandboxQuota) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); string registry = path::join(sandbox.get(), "registry"); AWAIT_READY(DockerArchive::create(registry, "test_image")); slave::Flags flags = CreateSlaveFlags(); flags.resources = "cpus:2;mem:128;disk(role1):128"; flags.isolation = "disk/du,filesystem/linux,docker/runtime"; flags.docker_registry = registry; flags.docker_store_dir = path::join(sandbox.get(), "store"); flags.image_providers = "docker"; // NOTE: We can't pause the clock because we need the reaper to reap // the 'du' subprocess. flags.container_disk_watch_interval = Milliseconds(1); flags.enforce_container_disk_quota = true; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_roles(0, "role1"); MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); // We request a sandbox (1MB) that is smaller than the persistent // volume (4MB) and attempt to create a file in that volume that is // twice the size of the sanbox (2MB). Resources volume = createPersistentVolume( Megabytes(4), "role1", "id1", "volume_path", None(), None(), frameworkInfo.principal()); Resources taskResources = Resources::parse("cpus:1;mem:64;disk(role1):1").get() + volume; // We sleep to give quota enforcement (du) a chance to kick in. TaskInfo task = createTask( offers.get()[0].slave_id(), taskResources, "dd if=/dev/zero of=volume_path/file bs=1048576 count=2 && sleep 1"); Future<TaskStatus> statusStarting; Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusStarting)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.acceptOffers( {offers.get()[0].id()}, {CREATE(volume), LAUNCH({task})}); AWAIT_READY(statusStarting); EXPECT_EQ(task.task_id(), statusStarting->task_id()); EXPECT_EQ(TASK_STARTING, statusStarting->state()); AWAIT_READY(statusRunning); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinished); EXPECT_EQ(task.task_id(), statusFinished->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinished->state()); driver.stop(); driver.join(); }
// This test verifies that a task group is launched on the agent if the executor // provides a valid authentication token specifying its own ContainerID. TEST_F(ExecutorAuthorizationTest, RunTaskGroup) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); // Start an agent with permissive ACLs so that a task can be launched. ACLs acls; acls.set_permissive(true); slave::Flags flags = CreateSlaveFlags(); flags.acls = acls; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; MockScheduler sched; MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:0.5;mem:32").get(), "sleep 1000"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); Resources executorResources = allocatedResources(Resources::parse("cpus:0.1;mem:32;disk:32").get(), "*"); ExecutorInfo executor; executor.mutable_executor_id()->set_value("default"); executor.set_type(ExecutorInfo::DEFAULT); executor.mutable_framework_id()->CopyFrom(frameworkId.get()); executor.mutable_resources()->CopyFrom(executorResources); TaskGroupInfo taskGroup; taskGroup.add_tasks()->CopyFrom(task); driver.acceptOffers({offer.id()}, {LAUNCH_GROUP(executor, taskGroup)}); AWAIT_READY(status); ASSERT_EQ(task.task_id(), status->task_id()); EXPECT_EQ(TASK_STARTING, status->state()); driver.stop(); driver.join(); }
// This test verifies that docker image default entrypoint is executed // correctly using registry puller. This corresponds to the case in runtime // isolator logic table: sh=0, value=0, argv=1, entrypoint=1, cmd=0. TEST_F(DockerRuntimeIsolatorTest, ROOT_CURL_INTERNET_DockerDefaultEntryptRegistryPuller) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "docker/runtime,filesystem/linux"; flags.image_providers = "docker"; flags.docker_store_dir = path::join(os::getcwd(), "store"); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers->size()); const Offer& offer = offers.get()[0]; TaskInfo task; task.set_name("test-task"); task.mutable_task_id()->set_value(UUID::random().toString()); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get()); task.mutable_command()->set_shell(false); task.mutable_command()->add_arguments("hello world"); Image image; image.set_type(Image::DOCKER); // 'mesosphere/inky' image is used in docker containerizer test, which // contains entrypoint as 'echo' and cmd as null. image.mutable_docker()->set_name("mesosphere/inky"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); container->mutable_mesos()->mutable_image()->CopyFrom(image); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.launchTasks(offer.id(), {task}); AWAIT_READY_FOR(statusRunning, Seconds(60)); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinished); EXPECT_EQ(task.task_id(), statusFinished->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinished->state()); driver.stop(); driver.join(); }
// This is the same logic as ResourceStatistics, except the task should // be allowed to exceed the disk quota, and usage statistics should report // that the quota was exceeded. TEST_F(ROOT_XFS_QuotaTest, ResourceStatisticsNoEnforce) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.enforce_container_disk_quota = false; Fetcher fetcher(flags); Owned<MasterDetector> detector = master.get()->createDetector(); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; // Create a task that uses 4MB of 3MB disk and fails if it can't // write the full amount. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:3").get(), "dd if=/dev/zero of=file bs=1048576 count=4 && sleep 1000"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); Future<hashset<ContainerID>> containers = containerizer.get()->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); ContainerID containerId = *(containers->begin()); Duration diskTimeout = Seconds(5); Timeout timeout = Timeout::in(diskTimeout); while (true) { Future<ResourceStatistics> usage = containerizer.get()->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes()) { if (usage->disk_used_bytes() >= Megabytes(4).bytes()) { break; } } // The stopping condition for this test is that the isolator is // able to report that we wrote the full amount of data without // being constrained by the task disk limit. EXPECT_LE(usage->disk_used_bytes(), Megabytes(4).bytes()); ASSERT_FALSE(timeout.expired()) << "Used " << Bytes(usage->disk_used_bytes()) << " of expected " << Megabytes(4) << " within the " << diskTimeout << " timeout"; os::sleep(Milliseconds(100)); } driver.stop(); driver.join(); }
// This test verifies that docker image default cmd is executed correctly. // This corresponds to the case in runtime isolator logic table: sh=0, // value=0, argv=1, entrypoint=0, cmd=1. TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultCmdLocalPuller) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); const string directory = path::join(os::getcwd(), "archives"); Future<Nothing> testImage = DockerArchive::create(directory, "alpine", "null", "[\"sh\"]"); AWAIT_READY(testImage); ASSERT_TRUE(os::exists(path::join(directory, "alpine.tar"))); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "docker/runtime,filesystem/linux"; flags.image_providers = "docker"; flags.docker_registry = directory; // Make docker store directory as a temparary directory. Because the // manifest of the test image is changeable, the image cached on // previous tests should never be used. flags.docker_store_dir = path::join(os::getcwd(), "store"); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers->size()); const Offer& offer = offers.get()[0]; TaskInfo task; task.set_name("test-task"); task.mutable_task_id()->set_value(UUID::random().toString()); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get()); task.mutable_command()->set_shell(false); task.mutable_command()->add_arguments("-c"); task.mutable_command()->add_arguments("echo 'hello world'"); Image image; image.set_type(Image::DOCKER); image.mutable_docker()->set_name("alpine"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); container->mutable_mesos()->mutable_image()->CopyFrom(image); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.launchTasks(offer.id(), {task}); AWAIT_READY_FOR(statusRunning, Seconds(60)); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinished); EXPECT_EQ(task.task_id(), statusFinished->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinished->state()); driver.stop(); driver.join(); }
// In this test, the framework is not checkpointed. This ensures that when we // stop the slave, the executor is killed and we will need to recover the // working directories without getting any checkpointed recovery state. TEST_F(ROOT_XFS_QuotaTest, NoCheckpointRecovery) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave( detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1048576 count=1; sleep 1000"); Future<TaskStatus> runningStatus; Future<TaskStatus> startingStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillOnce(Return()); driver.launchTasks(offer.id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); Future<ResourceUsage> usage1 = process::dispatch(slave.get()->pid, &Slave::usage); AWAIT_READY(usage1); // We should have 1 executor using resources. ASSERT_EQ(1, usage1->executors().size()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); ContainerID containerId = *containers->begin(); // Restart the slave. slave.get()->terminate(); Future<SlaveReregisteredMessage> slaveReregisteredMessage = FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _); _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); containerizer.reset(_containerizer.get()); slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); // Wait until slave recovery is complete. Future<Nothing> _recover = FUTURE_DISPATCH(_, &Slave::_recover); AWAIT_READY_FOR(_recover, Seconds(60)); // Wait until the orphan containers are cleaned up. AWAIT_READY_FOR(containerizer.get()->wait(containerId), Seconds(60)); AWAIT_READY(slaveReregisteredMessage); Future<ResourceUsage> usage2 = process::dispatch(slave.get()->pid, &Slave::usage); AWAIT_READY(usage2); // We should have no executors left because we didn't checkpoint. ASSERT_TRUE(usage2->executors().empty()); Try<std::list<string>> sandboxes = os::glob(path::join( slave::paths::getSandboxRootDir(mountPoint.get()), "*", "frameworks", "*", "executors", "*", "runs", "*")); ASSERT_SOME(sandboxes); // One sandbox and one symlink. ASSERT_EQ(2u, sandboxes->size()); // Scan the remaining sandboxes and make sure that no projects are assigned. foreach (const string& sandbox, sandboxes.get()) { // Skip the "latest" symlink. if (os::stat::islink(sandbox)) { continue; } EXPECT_NONE(xfs::getProjectId(sandbox)); } driver.stop(); driver.join(); }
// This test launches a command task which has checkpoint enabled, and // agent is terminated when the task is running, after agent is restarted, // kill the task and then verify we can receive TASK_KILLED for the task. TEST_F(CniIsolatorTest, ROOT_SlaveRecovery) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "network/cni"; flags.network_cni_plugins_dir = cniPluginDir; flags.network_cni_config_dir = cniConfigDir; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; // Enable checkpointing for the framework. FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_checkpoint(true); MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers->size()); const Offer& offer = offers.get()[0]; CommandInfo command; command.set_value("sleep 1000"); TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128").get(), command); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); // Make sure the container join the mock CNI network. container->add_network_infos()->set_name("__MESOS_TEST__"); Future<TaskStatus> statusRunning; Future<TaskStatus> statusKilled; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusKilled)); EXPECT_CALL(sched, offerRescinded(&driver, _)) .Times(AtMost(1)); Future<Nothing> ack = FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement); driver.launchTasks(offer.id(), {task}); AWAIT_READY(statusRunning); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); // Wait for the ACK to be checkpointed. AWAIT_READY(ack); // Stop the slave after TASK_RUNNING is received. slave.get()->terminate(); // Restart the slave. slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); // Kill the task. driver.killTask(task.task_id()); AWAIT_READY(statusKilled); EXPECT_EQ(task.task_id(), statusKilled->task_id()); EXPECT_EQ(TASK_KILLED, statusKilled->state()); driver.stop(); driver.join(); }
// In this test, the framework is checkpointed so we expect the executor to // persist across the slave restart and to have the same resource usage before // and after. TEST_F(ROOT_XFS_QuotaTest, CheckpointRecovery) { slave::Flags flags = CreateSlaveFlags(); Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), CreateSlaveFlags()); ASSERT_SOME(slave); FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_checkpoint(true); MockScheduler sched; MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1048576 count=1; sleep 1000"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)); driver.launchTasks(offer.id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); Future<ResourceUsage> usage1 = process::dispatch(slave.get()->pid, &Slave::usage); AWAIT_READY(usage1); // We should have 1 executor using resources. ASSERT_EQ(1, usage1->executors().size()); // Restart the slave. slave.get()->terminate(); Future<SlaveReregisteredMessage> slaveReregisteredMessage = FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _); slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); // Wait for the slave to re-register. AWAIT_READY(slaveReregisteredMessage); Future<ResourceUsage> usage2 = process::dispatch(slave.get()->pid, &Slave::usage); AWAIT_READY(usage2); // We should have still have 1 executor using resources. ASSERT_EQ(1, usage1->executors().size()); Try<std::list<string>> sandboxes = os::glob(path::join( slave::paths::getSandboxRootDir(mountPoint.get()), "*", "frameworks", "*", "executors", "*", "runs", "*")); ASSERT_SOME(sandboxes); // One sandbox and one symlink. ASSERT_EQ(2u, sandboxes->size()); // Scan the remaining sandboxes. We ought to still have project IDs // assigned to them all. foreach (const string& sandbox, sandboxes.get()) { // Skip the "latest" symlink. if (os::stat::islink(sandbox)) { continue; } EXPECT_SOME(xfs::getProjectId(sandbox)); } driver.stop(); driver.join(); }
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); // We only care about memory cgroup for this test. flags.isolation = "cgroups/mem"; flags.agent_subsystems = None(); Fetcher fetcher; Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); Offer offer = offers.get()[0]; // Run a task that triggers memory pressure event. We request 1G // disk because we are going to write a 512 MB file repeatedly. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:256;disk:1024").get(), "while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done"); Future<TaskStatus> running; Future<TaskStatus> killed; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&running)) .WillOnce(FutureArg<1>(&killed)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offer.id(), {task}); AWAIT_READY(running); EXPECT_EQ(task.task_id(), running.get().task_id()); EXPECT_EQ(TASK_RUNNING, running.get().state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers.get().size()); ContainerID containerId = *(containers.get().begin()); // Wait a while for some memory pressure events to occur. Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); if (usage.get().mem_low_pressure_counter() > 0) { // We will check the correctness of the memory pressure counters // later, because the memory-hammering task is still active // and potentially incrementing these counters. break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(5)); EXPECT_LE(waited, Seconds(5)); // Pause the clock to ensure that the reaper doesn't reap the exited // command executor and inform the containerizer/slave. Clock::pause(); Clock::settle(); // Stop the memory-hammering task. driver.killTask(task.task_id()); AWAIT_READY_FOR(killed, Seconds(120)); EXPECT_EQ(task.task_id(), killed->task_id()); EXPECT_EQ(TASK_KILLED, killed->state()); // Now check the correctness of the memory pressure counters. Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); EXPECT_GE(usage.get().mem_low_pressure_counter(), usage.get().mem_medium_pressure_counter()); EXPECT_GE(usage.get().mem_medium_pressure_counter(), usage.get().mem_critical_pressure_counter()); Clock::resume(); driver.stop(); driver.join(); }
// In this test, the agent initially doesn't enable disk isolation // but then restarts with XFS disk isolation enabled. We verify that // the old container launched before the agent restart is // successfully recovered. TEST_F(ROOT_XFS_QuotaTest, RecoverOldContainers) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Owned<MasterDetector> detector = master.get()->createDetector(); slave::Flags flags = CreateSlaveFlags(); // `CreateSlaveFlags()` enables `disk/xfs` so here we reset // `isolation` to empty. flags.isolation.clear(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_checkpoint(true); MockScheduler sched; MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1024 count=1; sleep 1000"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningstatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningstatus)); driver.launchTasks(offer.id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningstatus); EXPECT_EQ(task.task_id(), runningstatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningstatus->state()); { Future<ResourceUsage> usage = process::dispatch(slave.get()->pid, &Slave::usage); AWAIT_READY(usage); // We should have 1 executor using resources but it doesn't have // disk limit enabled. ASSERT_EQ(1, usage->executors().size()); const ResourceUsage_Executor& executor = usage->executors().Get(0); ASSERT_TRUE(executor.has_statistics()); ASSERT_FALSE(executor.statistics().has_disk_limit_bytes()); } // Restart the slave. slave.get()->terminate(); Future<SlaveReregisteredMessage> slaveReregisteredMessage = FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _); // This time use the agent flags that include XFS disk isolation. slave = StartSlave(detector.get(), CreateSlaveFlags()); ASSERT_SOME(slave); // Wait for the slave to re-register. AWAIT_READY(slaveReregisteredMessage); { Future<ResourceUsage> usage = process::dispatch(slave.get()->pid, &Slave::usage); AWAIT_READY(usage); // We should still have 1 executor using resources but it doesn't // have disk limit enabled. ASSERT_EQ(1, usage->executors().size()); const ResourceUsage_Executor& executor = usage->executors().Get(0); ASSERT_TRUE(executor.has_statistics()); ASSERT_FALSE(executor.statistics().has_disk_limit_bytes()); } driver.stop(); driver.join(); }
// This test confirms that if a task exceeds configured resource // limits it is forcibly terminated. TEST_F(PosixRLimitsIsolatorTest, TaskExceedingLimit) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/rlimits"; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); // The task attempts to use an infinite amount of CPU time. TaskInfo task = createTask( offers.get()[0].slave_id(), offers.get()[0].resources(), "while true; do true; done"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); // Limit the process to use maximally 1 second of CPU time. RLimitInfo rlimitInfo; RLimitInfo::RLimit* cpuLimit = rlimitInfo.add_rlimits(); cpuLimit->set_type(RLimitInfo::RLimit::RLMT_CPU); cpuLimit->set_soft(1); cpuLimit->set_hard(1); container->mutable_rlimit_info()->CopyFrom(rlimitInfo); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFailed; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFailed)); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(statusRunning); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFailed); EXPECT_EQ(task.task_id(), statusFailed->task_id()); EXPECT_EQ(TASK_FAILED, statusFailed->state()); driver.stop(); driver.join(); }
// This test confirms that setting no values for the soft and hard // limits implies an unlimited resource. TEST_F(PosixRLimitsIsolatorTest, UnsetLimits) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/rlimits"; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_NE(0u, offers->size()); TaskInfo task = createTask( offers.get()[0].slave_id(), offers.get()[0].resources(), "exit `ulimit -c | grep -q unlimited`"); // Force usage of C locale as we interpret a potentially translated // string in the task's command. mesos::Environment::Variable* locale = task.mutable_command()->mutable_environment()->add_variables(); locale->set_name("LC_ALL"); locale->set_value("C"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); // Setting rlimit for core without soft or hard limit signifies // unlimited range. RLimitInfo rlimitInfo; RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits(); rlimit->set_type(RLimitInfo::RLimit::RLMT_CORE); container->mutable_rlimit_info()->CopyFrom(rlimitInfo); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinal; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinal)); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(statusRunning); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinal); EXPECT_EQ(task.task_id(), statusFinal->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinal->state()); driver.stop(); driver.join(); }