void launchTask(ExecutorDriver* driver, const TaskInfo& task) { if (run.isSome()) { // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.set_state(TASK_FAILED); status.set_message( "Attempted to run multiple tasks using a \"docker\" executor"); driver->sendStatusUpdate(status); return; } // Capture the TaskID. taskId = task.task_id(); // Capture the kill policy. if (task.has_kill_policy()) { killPolicy = task.kill_policy(); } LOG(INFO) << "Starting task " << taskId.get(); CHECK(task.has_container()); CHECK(task.has_command()); CHECK(task.container().type() == ContainerInfo::DOCKER); Try<Docker::RunOptions> runOptions = Docker::RunOptions::create( task.container(), task.command(), containerName, sandboxDirectory, mappedDirectory, task.resources() + task.executor().resources(), cgroupsEnableCfs, taskEnvironment, None(), // No extra devices. defaultContainerDNS ); if (runOptions.isError()) { // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.set_state(TASK_FAILED); status.set_message( "Failed to create docker run options: " + runOptions.error()); driver->sendStatusUpdate(status); _stop(); return; } // We're adding task and executor resources to launch docker since // the DockerContainerizer updates the container cgroup limits // directly and it expects it to be the sum of both task and // executor resources. This does leave to a bit of unaccounted // resources for running this executor, but we are assuming // this is just a very small amount of overcommit. run = docker->run( runOptions.get(), Subprocess::FD(STDOUT_FILENO), Subprocess::FD(STDERR_FILENO)); run->onAny(defer(self(), &Self::reaped, lambda::_1)); // Delay sending TASK_RUNNING status update until we receive // inspect output. Note that we store a future that completes // after the sending of the running update. This allows us to // ensure that the terminal update is sent after the running // update (see `reaped()`). inspect = docker->inspect(containerName, DOCKER_INSPECT_DELAY) .then(defer(self(), [=](const Docker::Container& container) { if (!killed) { containerPid = container.pid; // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(taskId.get()); status.set_state(TASK_RUNNING); status.set_data(container.output); if (container.ipAddress.isSome()) { // TODO(karya): Deprecated -- Remove after 0.25.0 has shipped. Label* label = status.mutable_labels()->add_labels(); label->set_key("Docker.NetworkSettings.IPAddress"); label->set_value(container.ipAddress.get()); NetworkInfo* networkInfo = status.mutable_container_status()->add_network_infos(); // Copy the NetworkInfo if it is specified in the // ContainerInfo. A Docker container has at most one // NetworkInfo, which is validated in containerizer. if (task.container().network_infos().size() > 0) { networkInfo->CopyFrom(task.container().network_infos(0)); networkInfo->clear_ip_addresses(); } NetworkInfo::IPAddress* ipAddress = networkInfo->add_ip_addresses(); ipAddress->set_ip_address(container.ipAddress.get()); containerNetworkInfo = *networkInfo; } driver->sendStatusUpdate(status); } return Nothing(); })); inspect.onFailed(defer(self(), [=](const string& failure) { LOG(ERROR) << "Failed to inspect container '" << containerName << "'" << ": " << failure; // TODO(bmahler): This is fatal, try to shut down cleanly. // Since we don't have a container id, we can only discard // the run future. })); inspect.onReady(defer(self(), &Self::launchCheck, task)); inspect.onReady( defer(self(), &Self::launchHealthCheck, containerName, task)); }
// Test executor environment decorator hook and remove executor hook // for slave. We expect the environment-decorator hook to create a // temporary file and the remove-executor hook to delete that file. TEST_F(HookTest, DISABLED_VerifySlaveLaunchExecutorHook) { master::Flags masterFlags = CreateMasterFlags(); Try<PID<Master>> master = StartMaster(masterFlags); ASSERT_SOME(master); slave::Flags slaveFlags = CreateSlaveFlags(); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); Try<PID<Slave>> slave = StartSlave(&containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); // Launch a task with the command executor. TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); task.mutable_executor()->CopyFrom(DEFAULT_EXECUTOR_INFO); vector<TaskInfo> tasks; tasks.push_back(task); EXPECT_CALL(exec, launchTask(_, _)); Future<ExecutorInfo> executorInfo; EXPECT_CALL(exec, registered(_, _, _, _)) .WillOnce(FutureArg<1>(&executorInfo)); // On successful completion of the "slaveLaunchExecutorHook", the // test hook will send a HookExecuted message to itself. We wait // until that message is intercepted by the testing infrastructure. Future<HookExecuted> hookFuture = FUTURE_PROTOBUF(HookExecuted(), _, _); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(executorInfo); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. // Now wait for the hook to finish execution. AWAIT_READY(hookFuture); }
// This test verifies that the slave run task label decorator can add // and remove labels from a task during the launch sequence. A task // with two labels ("foo":"bar" and "bar":"baz") is launched and will // get modified by the slave hook to strip the "foo":"bar" pair and // add a new "baz":"qux" pair. TEST_F(HookTest, VerifySlaveRunTaskHook) { Try<PID<Master>> master = StartMaster(); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); Try<PID<Slave>> slave = StartSlave(&containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers.get().size()); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); task.mutable_executor()->CopyFrom(DEFAULT_EXECUTOR_INFO); // Add two labels: (1) will be removed by the hook to ensure that // runTaskHook can remove labels (2) will be preserved to ensure // that the framework can add labels to the task and have those be // available by the end of the launch task sequence when hooks are // used (to protect against hooks removing labels completely). Labels* labels = task.mutable_labels(); Label* label1 = labels->add_labels(); label1->set_key("foo"); label1->set_value("bar"); Label* label2 = labels->add_labels(); label2->set_key("bar"); label2->set_value("baz"); vector<TaskInfo> tasks; tasks.push_back(task); EXPECT_CALL(exec, registered(_, _, _, _)); Future<TaskInfo> taskInfo; EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(DoAll( FutureArg<1>(&taskInfo), SendStatusUpdateFromTask(TASK_RUNNING))); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(taskInfo); // The master hook will hang an extra label off. const Labels& labels_ = taskInfo.get().labels(); ASSERT_EQ(3, labels_.labels_size()); // The slave run task hook will prepend a new "baz":"qux" label. EXPECT_EQ(labels_.labels(0).key(), "baz"); EXPECT_EQ(labels_.labels(0).value(), "qux"); // Master launch task hook will still hang off test label. EXPECT_EQ(labels_.labels(1).key(), testLabelKey); EXPECT_EQ(labels_.labels(1).value(), testLabelValue); // And lastly, we only expect the "foo":"bar" pair to be stripped by // the module. The last pair should be the original "bar":"baz" // pair set by the test. EXPECT_EQ(labels_.labels(2).key(), "bar"); EXPECT_EQ(labels_.labels(2).value(), "baz"); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }
// Test that the label decorator hook hangs a new label off the // taskinfo message during master launch task. TEST_F(HookTest, VerifyMasterLaunchTaskHook) { Try<PID<Master>> master = StartMaster(CreateMasterFlags()); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); // Start a mock slave since we aren't testing the slave hooks yet. Try<PID<Slave>> slave = StartSlave(&containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); task.mutable_executor()->CopyFrom(DEFAULT_EXECUTOR_INFO); // Add label which will be removed by the hook. Labels* labels = task.mutable_labels(); Label* label = labels->add_labels(); label->set_key(testRemoveLabelKey); label->set_value(testRemoveLabelValue); vector<TaskInfo> tasks; tasks.push_back(task); Future<RunTaskMessage> runTaskMessage = FUTURE_PROTOBUF(RunTaskMessage(), _, _); EXPECT_CALL(exec, registered(_, _, _, _)); EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(runTaskMessage); AWAIT_READY(status); // At launchTasks, the label decorator hook inside should have been // executed and we should see the labels now. Also, verify that the // hook module has stripped the first 'testRemoveLabelKey' label. // We do this by ensuring that only one label is present and that it // is the new 'testLabelKey' label. const Labels &labels_ = runTaskMessage.get().task().labels(); ASSERT_EQ(1, labels_.labels_size()); EXPECT_EQ(labels_.labels().Get(0).key(), testLabelKey); EXPECT_EQ(labels_.labels().Get(0).value(), testLabelValue); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }