// This test ensures that the driver handles an empty slave id // in an acknowledgement message by dropping it. The driver will // log an error in this case (but we don't test for that). We // generate a status with no slave id by performing reconciliation. TEST_F(MesosSchedulerDriverTest, ExplicitAcknowledgementsUnsetSlaveID) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, false, DEFAULT_CREDENTIAL); Future<Nothing> registered; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureSatisfy(®istered)); // Ensure no status update acknowledgements are sent to the master. EXPECT_NO_FUTURE_CALLS( mesos::scheduler::Call(), mesos::scheduler::Call::ACKNOWLEDGE, _ , master.get()->pid); driver.start(); AWAIT_READY(registered); Future<TaskStatus> update; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&update)); // Peform reconciliation without using a slave id. vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->set_value("foo"); status.set_state(TASK_RUNNING); statuses.push_back(status); driver.reconcileTasks(statuses); AWAIT_READY(update); ASSERT_EQ(TASK_LOST, update->state()); ASSERT_EQ(TaskStatus::SOURCE_MASTER, update->source()); ASSERT_EQ(TaskStatus::REASON_RECONCILIATION, update->reason()); ASSERT_FALSE(update->has_slave_id()); // Now send the acknowledgement. driver.acknowledgeStatusUpdate(update.get()); // Settle the clock to ensure driver processes the acknowledgement, // which should get dropped due to the missing slave id. Clock::pause(); Clock::settle(); driver.stop(); driver.join(); }
// Verify that a task that tries to consume more space than it has requested // is only allowed to consume exactly the assigned resources. We tell dd // to write 2MB but only give it 1MB of resources and (roughly) verify that // it exits with a failure (that should be a write error). TEST_F(ROOT_XFS_QuotaTest, DiskUsageExceedsQuota) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), CreateSlaveFlags()); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_FALSE(offers->empty()); const Offer& offer = offers.get()[0]; // Create a task which requests 1MB disk, but actually uses more // than 2MB disk. TaskInfo task = createTask( offer.slave_id(), Resources::parse("cpus:1;mem:128;disk:1").get(), "dd if=/dev/zero of=file bs=1048576 count=2"); Future<TaskStatus> startingStatus; Future<TaskStatus> runningStatus; Future<TaskStatus> failedStatus; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&startingStatus)) .WillOnce(FutureArg<1>(&runningStatus)) .WillOnce(FutureArg<1>(&failedStatus)); driver.launchTasks(offer.id(), {task}); AWAIT_READY(startingStatus); EXPECT_EQ(task.task_id(), startingStatus->task_id()); EXPECT_EQ(TASK_STARTING, startingStatus->state()); AWAIT_READY(runningStatus); EXPECT_EQ(task.task_id(), runningStatus->task_id()); EXPECT_EQ(TASK_RUNNING, runningStatus->state()); AWAIT_READY(failedStatus); EXPECT_EQ(task.task_id(), failedStatus->task_id()); EXPECT_EQ(TASK_FAILED, failedStatus->state()); // Unlike the 'disk/du' isolator, the reason for task failure // should be that dd got an IO error. EXPECT_EQ(TaskStatus::SOURCE_EXECUTOR, failedStatus->source()); EXPECT_EQ("Command exited with status 1", failedStatus->message()); driver.stop(); driver.join(); }
// This test ensures that when explicit acknowledgements are enabled, // acknowledgements for master-generated updates are dropped by the // driver. We test this by creating an invalid task that uses no // resources. TEST_F(MesosSchedulerDriverTest, ExplicitAcknowledgementsMasterGeneratedUpdate) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get()); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, false, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. // Ensure no status update acknowledgements are sent to the master. EXPECT_NO_FUTURE_CALLS( mesos::scheduler::Call(), mesos::scheduler::Call::ACKNOWLEDGE, _ , master.get()->pid); driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers->size()); // Launch a task using no resources. TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO); vector<TaskInfo> tasks; tasks.push_back(task); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(status); ASSERT_EQ(TASK_ERROR, status->state()); ASSERT_EQ(TaskStatus::SOURCE_MASTER, status->source()); ASSERT_EQ(TaskStatus::REASON_TASK_INVALID, status->reason()); // Now send the acknowledgement. driver.acknowledgeStatusUpdate(status.get()); // Settle the clock to ensure driver processes the acknowledgement, // which should get dropped due to having come from the master. Clock::pause(); Clock::settle(); driver.stop(); driver.join(); }