// This test verifies that disk quota isolator recovers properly after // the slave restarts. TEST_F(DiskQuotaTest, SlaveRecovery) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/cpu,posix/mem,disk/du"; flags.container_disk_watch_interval = Milliseconds(1); Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; // Enable checkpointing for the framework. FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_checkpoint(true); MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; // Create a task that uses 2MB disk. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:3").get(), "dd if=/dev/zero of=file bs=1048576 count=2 && sleep 1000"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offer.id(), {task}); AWAIT_READY(status); EXPECT_EQ(task.task_id(), status->task_id()); EXPECT_EQ(TASK_RUNNING, status->state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); const ContainerID& containerId = *(containers->begin()); // Stop the slave. slave.get()->terminate(); Future<ReregisterExecutorMessage> reregisterExecutorMessage = FUTURE_PROTOBUF(ReregisterExecutorMessage(), _, _); Future<Nothing> _recover = FUTURE_DISPATCH(_, &Slave::_recover); _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); containerizer.reset(_containerizer.get()); detector = master.get()->createDetector(); slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); Clock::pause(); AWAIT_READY(_recover); // Wait for slave to schedule reregister timeout. Clock::settle(); // Ensure the executor re-registers before completing recovery. AWAIT_READY(reregisterExecutorMessage); // Ensure the slave considers itself recovered. Clock::advance(flags.executor_reregistration_timeout); // NOTE: We resume the clock because we need the reaper to reap the // 'du' subprocess. Clock::resume(); // Wait until disk usage can be retrieved. Duration elapsed = Duration::zero(); while (true) { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes()) { EXPECT_LE(usage->disk_used_bytes(), usage->disk_limit_bytes()); // NOTE: This is to capture the regression in MESOS-2452. The data // stored in the executor meta directory should be less than 64K. if (usage->disk_used_bytes() > Kilobytes(64).bytes()) { break; } } ASSERT_LT(elapsed, Seconds(15)); os::sleep(Milliseconds(1)); elapsed += Milliseconds(1); } driver.stop(); driver.join(); }
TEST_F(DiskQuotaTest, ResourceStatistics) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/cpu,posix/mem,disk/du"; flags.resources = strings::format("disk(%s):10", DEFAULT_TEST_ROLE).get(); // NOTE: We can't pause the clock because we need the reaper to reap // the 'du' subprocess. flags.container_disk_watch_interval = Milliseconds(1); Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO; frameworkInfo.set_role(DEFAULT_TEST_ROLE); MockScheduler sched; MesosSchedulerDriver driver( &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; Resource volume = createPersistentVolume( Megabytes(4), DEFAULT_TEST_ROLE, "id1", "path1", None(), None(), DEFAULT_CREDENTIAL.principal()); Resources taskResources = Resources::parse("cpus:1;mem:128").get(); taskResources += createDiskResource( "3", DEFAULT_TEST_ROLE, None(), None()); taskResources += volume; // Create a task that uses 2MB disk. TaskInfo task = createTask( offer.slave_id(), taskResources, "dd if=/dev/zero of=file bs=1048576 count=2 && " "dd if=/dev/zero of=path1/file bs=1048576 count=2 && " "sleep 1000"); Future<TaskStatus> status1; Future<TaskStatus> status2; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status1)) .WillOnce(FutureArg<1>(&status2)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.acceptOffers( {offer.id()}, {CREATE(volume), LAUNCH({task})}); AWAIT_READY(status1); EXPECT_EQ(task.task_id(), status1->task_id()); EXPECT_EQ(TASK_RUNNING, status1->state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); const ContainerID& containerId = *(containers->begin()); // Wait until disk usage can be retrieved. Duration elapsed = Duration::zero(); while (true) { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes()) { EXPECT_LE(usage->disk_used_bytes(), usage->disk_limit_bytes()); } ASSERT_EQ(2u, usage->disk_statistics().size()); bool done = true; foreach (const DiskStatistics& statistics, usage->disk_statistics()) { ASSERT_TRUE(statistics.has_limit_bytes()); EXPECT_EQ( statistics.has_persistence() ? Megabytes(4) : Megabytes(3), statistics.limit_bytes()); if (!statistics.has_used_bytes()) { done = false; } else { EXPECT_GT( statistics.has_persistence() ? Megabytes(4) : Megabytes(3), statistics.used_bytes()); } } if (done) { break; } ASSERT_LT(elapsed, Seconds(5)); os::sleep(Milliseconds(1)); elapsed += Milliseconds(1); } driver.killTask(task.task_id()); AWAIT_READY(status2); EXPECT_EQ(task.task_id(), status2->task_id()); EXPECT_EQ(TASK_KILLED, status2->state()); driver.stop(); driver.join(); }
// This is the same logic as ResourceStatistics, except the task should // be allowed to exceed the disk quota, and usage statistics should report // that the quota was exceeded. TEST_F(ROOT_XFS_QuotaTest, ResourceStatisticsNoEnforce) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.enforce_container_disk_quota = false; Fetcher fetcher(flags); Owned<MasterDetector> detector = master.get()->createDetector(); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; // Create a task that uses 4MB of 3MB disk and fails if it can't // write the full amount. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:3").get(), "dd if=/dev/zero of=file bs=1048576 count=4 && sleep 1000"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); Future<hashset<ContainerID>> containers = containerizer.get()->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); ContainerID containerId = *(containers->begin()); Duration diskTimeout = Seconds(5); Timeout timeout = Timeout::in(diskTimeout); while (true) { Future<ResourceStatistics> usage = containerizer.get()->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes()) { if (usage->disk_used_bytes() >= Megabytes(4).bytes()) { break; } } // The stopping condition for this test is that the isolator is // able to report that we wrote the full amount of data without // being constrained by the task disk limit. EXPECT_LE(usage->disk_used_bytes(), Megabytes(4).bytes()); ASSERT_FALSE(timeout.expired()) << "Used " << Bytes(usage->disk_used_bytes()) << " of expected " << Megabytes(4) << " within the " << diskTimeout << " timeout"; os::sleep(Milliseconds(100)); } driver.stop(); driver.join(); }
// This test verifies that the container will not be killed if // disk_enforce_quota flag is false (even if the disk usage exceeds // its quota). TEST_F(DiskQuotaTest, NoQuotaEnforcement) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/cpu,posix/mem,disk/du"; // NOTE: We can't pause the clock because we need the reaper to reap // the 'du' subprocess. flags.container_disk_watch_interval = Milliseconds(1); flags.enforce_container_disk_quota = false; Fetcher fetcher(flags); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; // Create a task that uses 2MB disk. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1048576 count=2 && sleep 1000"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offer.id(), {task}); AWAIT_READY(status); EXPECT_EQ(task.task_id(), status->task_id()); EXPECT_EQ(TASK_RUNNING, status->state()); Future<hashset<ContainerID>> containers = containerizer->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); const ContainerID& containerId = *(containers->begin()); // Wait until disk usage can be retrieved and the usage actually // exceeds the limit. If the container is killed due to quota // enforcement (which shouldn't happen), the 'usage' call will // return a failed future, leading to a failed test. Duration elapsed = Duration::zero(); while (true) { Future<ResourceStatistics> usage = containerizer->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(1), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes() && usage->disk_used_bytes() > usage->disk_limit_bytes()) { break; } ASSERT_LT(elapsed, Seconds(5)); os::sleep(Milliseconds(1)); elapsed += Milliseconds(1); } driver.stop(); driver.join(); }
// Verify that we can get accurate resource statistics from the XFS // disk isolator. TEST_F(ROOT_XFS_QuotaTest, ResourceStatistics) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); Fetcher fetcher(flags); Owned<MasterDetector> detector = master.get()->createDetector(); Try<MesosContainerizer*> _containerizer = MesosContainerizer::create(flags, true, &fetcher); ASSERT_SOME(_containerizer); Owned<MesosContainerizer> containerizer(_containerizer.get()); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), containerizer.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); Offer offer = offers.get()[0]; // Create a task that uses 4 of 3MB disk but doesn't fail. We will verify // that the allocated disk is filled. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:3").get(), "dd if=/dev/zero of=file bs=1048576 count=4 || sleep 1000"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillRepeatedly(Return()); // Ignore subsequent updates. driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); Future<hashset<ContainerID>> containers = containerizer.get()->containers(); AWAIT_READY(containers); ASSERT_EQ(1u, containers->size()); ContainerID containerId = *(containers->begin()); Timeout timeout = Timeout::in(Seconds(5)); while (true) { Future<ResourceStatistics> usage = containerizer.get()->usage(containerId); AWAIT_READY(usage); ASSERT_TRUE(usage->has_disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes())); if (usage->has_disk_used_bytes()) { // Usage must always be <= the limit. EXPECT_LE(usage->disk_used_bytes(), usage->disk_limit_bytes()); // Usage might not be equal to the limit, but it must hit // and not exceed the limit. if (usage->disk_used_bytes() >= usage->disk_limit_bytes()) { EXPECT_EQ( usage->disk_used_bytes(), usage->disk_limit_bytes()); EXPECT_EQ(Megabytes(3), Bytes(usage->disk_used_bytes())); break; } } ASSERT_FALSE(timeout.expired()); os::sleep(Milliseconds(100)); } driver.stop(); driver.join(); }