virtual void resourceOffers( SchedulerDriver* driver, const vector<Offer>& offers) { static const Try<Resources> TASK_RESOURCES = Resources::parse(resources); if (TASK_RESOURCES.isError()) { cerr << "Failed to parse resources '" << resources << "': " << TASK_RESOURCES.error() << endl; driver->abort(); return; } foreach (const Offer& offer, offers) { if (!launched && Resources(offer.resources()).contains(TASK_RESOURCES.get())) { TaskInfo task; task.set_name(name); task.mutable_task_id()->set_value(name); task.mutable_slave_id()->MergeFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(TASK_RESOURCES.get()); task.mutable_command()->set_value(command); if (uri.isSome()) { task.mutable_command()->add_uris()->set_value(uri.get()); } if (dockerImage.isSome()) { ContainerInfo containerInfo; containerInfo.set_type(ContainerInfo::DOCKER); ContainerInfo::DockerInfo dockerInfo; dockerInfo.set_image(dockerImage.get()); containerInfo.mutable_docker()->CopyFrom(dockerInfo); task.mutable_container()->CopyFrom(containerInfo); } vector<TaskInfo> tasks; tasks.push_back(task); driver->launchTasks(offer.id(), tasks); cout << "task " << name << " submitted to slave " << offer.slave_id() << endl; launched = true; } else { driver->declineOffer(offer.id()); } } }
TaskInfo buildTask (string hostname, string id, const SlaveID& slave) { hostProfile profile = hostList[hostname]; // Define the Docker container. /* Since there is no "executor" to manage the tasks, the container will be built and attached directly into the task below */ ContainerInfo container; container.set_type(container.DOCKER); ContainerInfo::DockerInfo docker; docker.set_image(DOCKER_IMAGE); container.mutable_docker()->MergeFrom(docker); // Mount local volume inside Container Volume * volume = container.add_volumes(); volume->set_container_path("/mnt"); volume->set_host_path("/local/mesos"); volume->set_mode(Volume_Mode_RW); // Define the task TaskInfo task; task.set_name("K3-" + k3binary); task.mutable_task_id()->set_value(id); task.mutable_slave_id()->MergeFrom(slave); task.mutable_container()->MergeFrom(container); //task.set_data(stringify(localTasks)); // Define include files for the command CommandInfo command; CommandInfo_URI * k3_bin = command.add_uris(); k3_bin->set_value(fileServer + "/" + k3binary); k3_bin->set_executable(true); k3_bin->set_extract(false); // CommandInfo_URI * k3_args = command.add_uris(); // k3_args->set_value(runpath + "/k3input.yaml"); // command.set_value("$MESOS_SANDBOX/" + k3binary + " -l INFO -p " + // "$MESOS_SANDBOX/k3input.yaml"); task.mutable_command()->MergeFrom(command); // Option A for doing resources management (see scheduler for option B) Resource* resource; resource = task.add_resources(); resource->set_name("cpus"); resource->set_type(Value::SCALAR); resource->mutable_scalar()->set_value(profile.cpu); resource = task.add_resources(); resource->set_name("mem"); resource->set_type(Value::SCALAR); resource->mutable_scalar()->set_value(profile.mem); return task; }
void offers(const vector<Offer>& offers) { CHECK_EQ(SUBSCRIBED, state); static const Try<Resources> TASK_RESOURCES = Resources::parse(resources); if (TASK_RESOURCES.isError()) { EXIT(EXIT_FAILURE) << "Failed to parse resources '" << resources << "': " << TASK_RESOURCES.error(); } foreach (const Offer& offer, offers) { Resources offered = offer.resources(); if (!launched && offered.flatten().contains(TASK_RESOURCES.get())) { TaskInfo task; task.set_name(name); task.mutable_task_id()->set_value(name); task.mutable_agent_id()->MergeFrom(offer.agent_id()); // Takes resources first from the specified role, then from '*'. Option<Resources> resources = offered.find(TASK_RESOURCES.get().flatten(frameworkInfo.role())); CHECK_SOME(resources); task.mutable_resources()->CopyFrom(resources.get()); CommandInfo* commandInfo = task.mutable_command(); if (shell) { CHECK_SOME(command); commandInfo->set_shell(true); commandInfo->set_value(command.get()); } else { // TODO(gilbert): Treat 'command' as executable value and arguments. commandInfo->set_shell(false); } if (environment.isSome()) { Environment* environment_ = commandInfo->mutable_environment(); foreachpair ( const string& name, const string& value, environment.get()) { Environment::Variable* environmentVariable = environment_->add_variables(); environmentVariable->set_name(name); environmentVariable->set_value(value); } }
inline TaskInfo createTask( const Offer& offer, const std::string& command, const std::string& name = "test-task", const std::string& id = UUID::random().toString()) { TaskInfo task; task.set_name(name); task.mutable_task_id()->set_value(id); task.mutable_slave_id()->MergeFrom(offer.slave_id()); task.mutable_resources()->MergeFrom(offer.resources()); task.mutable_command()->set_value(command); return task; }
virtual void resourceOffers(SchedulerDriver* driver, const vector<Offer>& offers) { foreach (const Offer& offer, offers) { cout << "Received offer " << offer.id() << " with " << offer.resources() << endl; static const Resources TASK_RESOURCES = Resources::parse( "cpus:" + stringify(CPUS_PER_TASK) + ";mem:" + stringify(MEM_PER_TASK)).get(); Resources remaining = offer.resources(); // Launch tasks. vector<TaskInfo> tasks; while (tasksLaunched < totalTasks && remaining.flatten().contains(TASK_RESOURCES)) { int taskId = tasksLaunched++; cout << "Launching task " << taskId << " using offer " << offer.id() << endl; TaskInfo task; task.set_name("Task " + lexical_cast<string>(taskId)); task.mutable_task_id()->set_value(lexical_cast<string>(taskId)); task.mutable_slave_id()->MergeFrom(offer.slave_id()); task.mutable_command()->set_value("echo hello"); Option<Resources> resources = remaining.find(TASK_RESOURCES); CHECK_SOME(resources); task.mutable_resources()->MergeFrom(resources.get()); remaining -= resources.get(); tasks.push_back(task); } driver->launchTasks(offer.id(), tasks); }
inline TaskInfo createTask( const Offer& offer, const std::string& command, const Option<mesos::ExecutorID>& executorId = None(), const std::string& name = "test-task", const std::string& id = UUID::random().toString()) { TaskInfo task; task.set_name(name); task.mutable_task_id()->set_value(id); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(offer.resources()); if (executorId.isSome()) { ExecutorInfo executor; executor.mutable_executor_id()->CopyFrom(executorId.get()); executor.mutable_command()->set_value(command); task.mutable_executor()->CopyFrom(executor); } else { task.mutable_command()->set_value(command); } return task; }
vector<TaskInfo> populateTasks( const string& cmd, CommandInfo healthCommand, const Offer& offer, int gracePeriodSeconds = 0, const Option<int>& consecutiveFailures = None(), const Option<map<string, string> >& env = None()) { TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(offer.resources()); CommandInfo command; command.set_value(cmd); Environment::Variable* variable = command.mutable_environment()->add_variables(); // We need to set the correct directory to launch health check process // instead of the default for tests. variable->set_name("MESOS_LAUNCHER_DIR"); variable->set_value(path::join(tests::flags.build_dir, "src")); task.mutable_command()->CopyFrom(command); HealthCheck healthCheck; if (env.isSome()) { foreachpair (const string& name, const string value, env.get()) { Environment::Variable* variable = healthCommand.mutable_environment()->mutable_variables()->Add(); variable->set_name(name); variable->set_value(value); } }
// This test has been temporarily disabled due to MESOS-1257. TEST_F(ExternalContainerizerTest, DISABLED_Launch) { Try<PID<Master> > master = this->StartMaster(); ASSERT_SOME(master); Flags testFlags; slave::Flags flags = this->CreateSlaveFlags(); flags.isolation = "external"; flags.containerizer_path = testFlags.build_dir + "/src/examples/python/test-containerizer"; MockExternalContainerizer containerizer(flags); Try<PID<Slave> > slave = this->StartSlave(&containerizer, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name("isolator_test"); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); Resources resources(offers.get()[0].resources()); Option<Bytes> mem = resources.mem(); ASSERT_SOME(mem); Option<double> cpus = resources.cpus(); ASSERT_SOME(cpus); const std::string& file = path::join(flags.work_dir, "ready"); // This task induces user/system load in a child process by // running top in a child process for ten seconds. task.mutable_command()->set_value( #ifdef __APPLE__ // Use logging mode with 30,000 samples with no interval. "top -l 30000 -s 0 2>&1 > /dev/null & " #else // Batch mode, with 30,000 samples with no interval. "top -b -d 0 -n 30000 2>&1 > /dev/null & " #endif "touch " + file + "; " // Signals that the top command is running. "sleep 60"); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); // Ignore rest for now. Future<ContainerID> containerId; EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _, _)) .WillOnce(DoAll(FutureArg<0>(&containerId), Invoke(&containerizer, &MockExternalContainerizer::_launch))); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(containerId); AWAIT_READY(status); EXPECT_EQ(TASK_RUNNING, status.get().state()); // Wait for the task to begin inducing cpu time. while (!os::exists(file)); ExecutorID executorId; executorId.set_value(task.task_id().value()); // We'll wait up to 10 seconds for the child process to induce // 1/8 of a second of user and system cpu time in total. // TODO(bmahler): Also induce rss memory consumption, by re-using // the balloon framework. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = containerizer.usage(containerId.get()); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! // NOTE: We are currently getting dummy-data from the test- // containerizer python script matching these expectations. // TODO(tillt): Consider working with real data. if (statistics.cpus_user_time_secs() >= 0.120 && statistics.cpus_system_time_secs() >= 0.05 && statistics.mem_rss_bytes() >= 1024u) { break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(10)); EXPECT_GE(statistics.cpus_user_time_secs(), 0.120); EXPECT_GE(statistics.cpus_system_time_secs(), 0.05); EXPECT_EQ(statistics.cpus_limit(), cpus.get()); EXPECT_GE(statistics.mem_rss_bytes(), 1024u); EXPECT_EQ(statistics.mem_limit_bytes(), mem.get().bytes()); EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.killTask(task.task_id()); AWAIT_READY(status); EXPECT_EQ(TASK_KILLED, status.get().state()); driver.stop(); driver.join(); this->Shutdown(); }
virtual void resourceOffers(SchedulerDriver* driver, const vector<Offer>& offers) { foreach (const Offer& offer, offers) { LOG(INFO) << "Received offer " << offer.id() << " with " << offer.resources(); // If the framework got this offer for the first time, the state is // `State::INIT`; framework will reserve it (sending RESERVE operation // to master) in this loop. if (!states.contains(offer.slave_id())) { // If all tasks were launched, do not reserve more resources; wait // for them to finish and unreserve resources. if (tasksLaunched == totalTasks) { continue; } states[offer.slave_id()] = State::INIT; } const State state = states[offer.slave_id()]; Filters filters; filters.set_refuse_seconds(0); switch (state) { case State::INIT: { // Framework reserves resources from this offer for only one task; // the task'll be dispatched when reserved resources are re-offered // to this framework. Resources resources = offer.resources(); Offer::Operation reserve = RESERVE(taskResources); Try<Resources> apply = resources.apply(reserve); if (apply.isError()) { LOG(INFO) << "Failed to reserve resources for task in offer " << stringify(offer.id()) << ": " << apply.error(); break; } driver->acceptOffers({offer.id()}, {reserve}, filters); states[offer.slave_id()] = State::RESERVING; break; } case State::RESERVING: { Resources resources = offer.resources(); Resources reserved = resources.reserved(role); if (!reserved.contains(taskResources)) { break; } states[offer.slave_id()] = State::RESERVED; // We fallthrough here to save an offer cycle. } case State::RESERVED: { Resources resources = offer.resources(); Resources reserved = resources.reserved(role); CHECK(reserved.contains(taskResources)); // If all tasks were launched, unreserve those resources. if (tasksLaunched == totalTasks) { driver->acceptOffers( {offer.id()}, {UNRESERVE(taskResources)}, filters); states[offer.slave_id()] = State::UNRESERVING; break; } // Framework dispatches task on the reserved resources. CHECK(tasksLaunched < totalTasks); // Launch tasks on reserved resources. const string& taskId = stringify(tasksLaunched++); LOG(INFO) << "Launching task " << taskId << " using offer " << offer.id(); TaskInfo task; task.set_name("Task " + taskId + ": " + command); task.mutable_task_id()->set_value(taskId); task.mutable_slave_id()->MergeFrom(offer.slave_id()); task.mutable_command()->set_shell(true); task.mutable_command()->set_value(command); task.mutable_resources()->MergeFrom(taskResources); driver->launchTasks(offer.id(), {task}, filters); states[offer.slave_id()] = State::TASK_RUNNING; break; } case State::TASK_RUNNING: LOG(INFO) << "The task on " << offer.slave_id() << " is running, waiting for task done"; break; case State::UNRESERVING: { Resources resources = offer.resources(); Resources reserved = resources.reserved(role); if (!reserved.contains(taskResources)) { states[offer.slave_id()] = State::UNRESERVED; } break; } case State::UNRESERVED: // If state of slave is UNRESERVED, ignore it. The driver is stopped // when all tasks are done and all resources are unreserved. break; } }
// Test that the prepare launch docker hook execute before launch // a docker container. Test hook create a file "foo" in the sandbox // directory. When the docker container launched, the sandbox directory // is mounted to the docker container. We validate the hook by verifying // the "foo" file exists in the docker container or not. TEST_F(HookTest, ROOT_DOCKER_VerifySlavePreLaunchDockerHook) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockDocker* mockDocker = new MockDocker(tests::flags.docker, tests::flags.docker_socket); Shared<Docker> docker(mockDocker); slave::Flags flags = CreateSlaveFlags(); Fetcher fetcher; Try<ContainerLogger*> logger = ContainerLogger::create(flags.container_logger); ASSERT_SOME(logger); MockDockerContainerizer containerizer( flags, &fetcher, Owned<ContainerLogger>(logger.get()), docker); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), &containerizer, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); ASSERT_NE(0u, offers.get().size()); const Offer& offer = offers.get()[0]; SlaveID slaveId = offer.slave_id(); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(offer.resources()); CommandInfo command; command.set_value("test -f " + path::join(flags.sandbox_directory, "foo")); ContainerInfo containerInfo; containerInfo.set_type(ContainerInfo::DOCKER); // TODO(tnachen): Use local image to test if possible. ContainerInfo::DockerInfo dockerInfo; dockerInfo.set_image("alpine"); containerInfo.mutable_docker()->CopyFrom(dockerInfo); task.mutable_command()->CopyFrom(command); task.mutable_container()->CopyFrom(containerInfo); vector<TaskInfo> tasks; tasks.push_back(task); Future<ContainerID> containerId; EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _, _)) .WillOnce(DoAll(FutureArg<0>(&containerId), Invoke(&containerizer, &MockDockerContainerizer::_launch))); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)) .WillRepeatedly(DoDefault()); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY_FOR(containerId, Seconds(60)); AWAIT_READY_FOR(statusRunning, Seconds(60)); EXPECT_EQ(TASK_RUNNING, statusRunning.get().state()); AWAIT_READY_FOR(statusFinished, Seconds(60)); EXPECT_EQ(TASK_FINISHED, statusFinished.get().state()); Future<containerizer::Termination> termination = containerizer.wait(containerId.get()); driver.stop(); driver.join(); AWAIT_READY(termination); Future<list<Docker::Container>> containers = docker.get()->ps(true, slave::DOCKER_NAME_PREFIX); AWAIT_READY(containers); // Cleanup all mesos launched containers. foreach (const Docker::Container& container, containers.get()) { AWAIT_READY_FOR(docker.get()->rm(container.id, true), Seconds(30)); } }
virtual void resourceOffers(SchedulerDriver* driver, const vector<Offer>& offers) { cout << "." << flush; for (size_t i = 0; i < offers.size(); i++) { const Offer& offer = offers[i]; // Lookup resources we care about. // TODO(benh): It would be nice to ultimately have some helper // functions for looking up resources. double cpus = 0; double mem = 0; for (int i = 0; i < offer.resources_size(); i++) { const Resource& resource = offer.resources(i); if (resource.name() == "cpus" && resource.type() == Value::SCALAR) { cpus = resource.scalar().value(); } else if (resource.name() == "mem" && resource.type() == Value::SCALAR) { mem = resource.scalar().value(); } } // Launch tasks. vector<TaskInfo> tasks; while (tasksLaunched < totalTasks && cpus >= CPUS_PER_TASK && mem >= MEM_PER_TASK) { int taskId = tasksLaunched++; cout << "Starting task " << taskId << " on " << offer.hostname() << endl; TaskInfo task; task.set_name("Task " + lexical_cast<string>(taskId)); task.mutable_task_id()->set_value(lexical_cast<string>(taskId)); task.mutable_slave_id()->MergeFrom(offer.slave_id()); task.mutable_command()->set_value("echo hello"); Resource* resource; resource = task.add_resources(); resource->set_name("cpus"); resource->set_type(Value::SCALAR); resource->mutable_scalar()->set_value(CPUS_PER_TASK); resource = task.add_resources(); resource->set_name("mem"); resource->set_type(Value::SCALAR); resource->mutable_scalar()->set_value(MEM_PER_TASK); tasks.push_back(task); cpus -= CPUS_PER_TASK; mem -= MEM_PER_TASK; } driver->launchTasks(offer.id(), tasks); } }
virtual void resourceOffers( SchedulerDriver* driver, const vector<Offer>& offers) { static const Try<Resources> TASK_RESOURCES = Resources::parse(resources); if (TASK_RESOURCES.isError()) { cerr << "Failed to parse resources '" << resources << "': " << TASK_RESOURCES.error() << endl; driver->abort(); return; } foreach (const Offer& offer, offers) { if (!launched && Resources(offer.resources()).contains(TASK_RESOURCES.get())) { TaskInfo task; task.set_name(name); task.mutable_task_id()->set_value(name); task.mutable_slave_id()->MergeFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(TASK_RESOURCES.get()); CommandInfo* commandInfo = task.mutable_command(); commandInfo->set_value(command); if (environment.isSome()) { Environment* environment_ = commandInfo->mutable_environment(); foreachpair (const std::string& name, const std::string& value, environment.get()) { Environment_Variable* environmentVariable = environment_->add_variables(); environmentVariable->set_name(name); environmentVariable->set_value(value); } } if (uri.isSome()) { task.mutable_command()->add_uris()->set_value(uri.get()); } if (dockerImage.isSome()) { ContainerInfo containerInfo; if (containerizer == "mesos") { containerInfo.set_type(ContainerInfo::MESOS); ContainerInfo::MesosInfo mesosInfo; Image mesosImage; mesosImage.set_type(Image::DOCKER); mesosImage.mutable_docker()->set_name(dockerImage.get()); mesosInfo.mutable_image()->CopyFrom(mesosImage); containerInfo.mutable_mesos()->CopyFrom(mesosInfo); } else if (containerizer == "docker") { containerInfo.set_type(ContainerInfo::DOCKER); ContainerInfo::DockerInfo dockerInfo; dockerInfo.set_image(dockerImage.get()); containerInfo.mutable_docker()->CopyFrom(dockerInfo); } else { cerr << "Unsupported containerizer: " << containerizer << endl;; driver->abort(); return; } task.mutable_container()->CopyFrom(containerInfo); } vector<TaskInfo> tasks; tasks.push_back(task); driver->launchTasks(offer.id(), tasks); cout << "task " << name << " submitted to slave " << offer.slave_id() << endl; launched = true; } else {
TEST_F(SlaveTest, ShutdownUnregisteredExecutor) { Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); // Need flags for 'executor_registration_timeout'. slave::Flags flags = CreateSlaveFlags(); // Set the isolation flag so we know a MesoContainerizer will be created. flags.isolation = "posix/cpu,posix/mem"; Try<MesosContainerizer*> containerizer = MesosContainerizer::create(flags, false); CHECK_SOME(containerizer); Try<PID<Slave> > slave = StartSlave(containerizer.get()); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); // Launch a task with the command executor. TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); CommandInfo command; command.set_value("sleep 10"); task.mutable_command()->MergeFrom(command); vector<TaskInfo> tasks; tasks.push_back(task); // Drop the registration message from the executor to the slave. Future<process::Message> registerExecutor = DROP_MESSAGE(Eq(RegisterExecutorMessage().GetTypeName()), _, _); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(registerExecutor); Clock::pause(); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); // Ensure that the slave times out and kills the executor. Future<Nothing> destroyExecutor = FUTURE_DISPATCH(_, &MesosContainerizerProcess::destroy); Clock::advance(flags.executor_registration_timeout); AWAIT_READY(destroyExecutor); Clock::settle(); // Wait for Containerizer::destroy to complete. // Now advance time until the reaper reaps the executor. while (status.isPending()) { Clock::advance(Seconds(1)); Clock::settle(); } AWAIT_READY(status); ASSERT_EQ(TASK_FAILED, status.get().state()); Clock::resume(); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }
// This test confirms that setting no values for the soft and hard // limits implies an unlimited resource. TEST_F(PosixRLimitsIsolatorTest, UnsetLimits) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/rlimits"; Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(_, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(_, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_NE(0u, offers->size()); TaskInfo task = createTask( offers.get()[0].slave_id(), offers.get()[0].resources(), "exit `ulimit -c | grep -q unlimited`"); // Force usage of C locale as we interpret a potentially translated // string in the task's command. mesos::Environment::Variable* locale = task.mutable_command()->mutable_environment()->add_variables(); locale->set_name("LC_ALL"); locale->set_value("C"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); // Setting rlimit for core without soft or hard limit signifies // unlimited range. RLimitInfo rlimitInfo; RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits(); rlimit->set_type(RLimitInfo::RLimit::RLMT_CORE); container->mutable_rlimit_info()->CopyFrom(rlimitInfo); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinal; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinal)); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(statusRunning); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinal); EXPECT_EQ(task.task_id(), statusFinal->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinal->state()); driver.stop(); driver.join(); }
// This test runs a command without the command user field set. The // command will verify the assumption that the command is run as the // slave user (in this case, root). TEST_F(SlaveTest, ROOT_RunTaskWithCommandInfoWithoutUser) { Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); // Need flags for 'executor_registration_timeout'. slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/cpu,posix/mem"; Try<MesosContainerizer*> containerizer = MesosContainerizer::create(flags, false); CHECK_SOME(containerizer); Try<PID<Slave> > slave = StartSlave(containerizer.get()); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); // Launch a task with the command executor. TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); Result<string> user = os::user(); CHECK_SOME(user) << "Failed to get current user name" << (user.isError() ? ": " + user.error() : ""); // Command executor will run as user running test. CommandInfo command; command.set_value("test `whoami` = " + user.get()); task.mutable_command()->MergeFrom(command); vector<TaskInfo> tasks; tasks.push_back(task); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(statusRunning); EXPECT_EQ(TASK_RUNNING, statusRunning.get().state()); AWAIT_READY(statusFinished); EXPECT_EQ(TASK_FINISHED, statusFinished.get().state()); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }
void ChapelScheduler::resourceOffers(SchedulerDriver* driver, const vector<Offer>& offers) { // offers only contain resources describing a single node -> for more details read include/mesos/mesos.proto // cout << "***\tProcessing Offers!" << endl; const int remainingCpusReq = cpusReq - launchedTsks.size(); if(remainingCpusReq == 0) { for(size_t k = 0; k < offers.size(); k++) { const Offer& offer = offers[k]; driver->declineOffer(offer.id()); } cout << "\t\tChapelScheduler declined offer because resource requirements satisfied" << endl; } // cycle through all the offers and resource a task // each offer corresponds to a single compute node // const static Resources TASK_RESOURCES = Resources::parse(mesosReq).get(); vector<TaskInfo> tsks; for(size_t i = 0; i < offers.size(); i++) { const Offer& offer = offers[i]; if(tsks.size() == remainingCpusReq) { driver->declineOffer(offer.id()); continue; // need to cycle through the remaining offers and decline them } Resources remaining = offer.resources(); /* attempting to exercise multi-tenancy capabilities in mesos * given an offer from a node, try to maximize the number of jobs * that can be allocated to that node given the job's resource * requirements * * if the desired number of nodes and jobs are met, then launch * all the jobs on that node's offer * * this means some nodes will get multiple tasks assigned for * execution */ vector<TaskInfo> tol; while(remaining.flatten().contains(TASK_RESOUCES) && ((remainingCpusReq-tsks.size()) > 0)) { const string tid = stringify<size_t>(tsks.size()); TaskInfo task; task.set_name("Chapel Remote Program Task\t" + tid); task.mutable_task_id()->set_value(tid); task.mutable_slave_id()->MergeFrom(offer.slave_id()); task.mutable_command()->MergeFrom(chplCmdInfo); task.mutable_resources()->MergeFrom(TASK_RESOURCES); task.set_data(remoteCmd); tol.push_back(task); // tol means "to launch" tsks.push_back(task); // tsks tracks tasks launched for framework termination purposes remaining-=TASK_RESOURCES; tasksLaunched+=1; cout << "\t\t+++\tLaunching # of Tasks!\t" << tol.size() << " of " << tasksLaunched << endl; } // after all the tasks for this offer have been "resourced" // launch the tasks using this offer.id // driver->launchTasks(offer.id(), tol); } const size_t pendingTsksSize = tsks.size(); cout << endl << "\tAcquired # tasks " << pendingTsksSize << " required # of tasks " << cpusReq << " remaining required # tasks " << remainingCpusReq << endl << endl; if(pendingTsksSize > 0) { for(vector<TaskInfo>::iterator i = tsks.begin(); i != tsks.end(); i++) { launchedTsks.insert(make_pair(i->task_id().value(), *i)); } } }
TYPED_TEST(IsolatorTest, Usage) { Try<PID<Master> > master = this->StartMaster(); ASSERT_SOME(master); TypeParam isolator; slave::Flags flags = this->CreateSlaveFlags(); Try<PID<Slave> > slave = this->StartSlave(&isolator, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name("isolator_test"); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); Resources resources(offers.get()[0].resources()); Option<Bytes> mem = resources.mem(); ASSERT_SOME(mem); Option<double> cpus = resources.cpus(); ASSERT_SOME(cpus); const std::string& file = path::join(flags.work_dir, "ready"); // This task induces user/system load in a child process by // running top in a child process for ten seconds. task.mutable_command()->set_value( #ifdef __APPLE__ // Use logging mode with 30,000 samples with no interval. "top -l 30000 -s 0 2>&1 > /dev/null & " #else // Batch mode, with 30,000 samples with no interval. "top -b -d 0 -n 30000 2>&1 > /dev/null & " #endif "touch " + file + "; " // Signals that the top command is running. "sleep 60"); vector<TaskInfo> tasks; tasks.push_back(task); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(status); EXPECT_EQ(TASK_RUNNING, status.get().state()); // Wait for the task to begin inducing cpu time. while (!os::exists(file)); ExecutorID executorId; executorId.set_value(task.task_id().value()); // We'll wait up to 10 seconds for the child process to induce // 1/8 of a second of user and system cpu time in total. // TODO(bmahler): Also induce rss memory consumption, by re-using // the balloon framework. ResourceStatistics statistics; Duration waited = Duration::zero(); do { Future<ResourceStatistics> usage = process::dispatch( (Isolator*) &isolator, // TODO(benh): Fix after reaper changes. &Isolator::usage, frameworkId.get(), executorId); AWAIT_READY(usage); statistics = usage.get(); // If we meet our usage expectations, we're done! if (statistics.cpus_user_time_secs() >= 0.125 && statistics.cpus_system_time_secs() >= 0.125 && statistics.mem_rss_bytes() >= 1024u) { break; } os::sleep(Milliseconds(100)); waited += Milliseconds(100); } while (waited < Seconds(10)); EXPECT_GE(statistics.cpus_user_time_secs(), 0.125); EXPECT_GE(statistics.cpus_system_time_secs(), 0.125); EXPECT_EQ(statistics.cpus_limit(), cpus.get()); EXPECT_GE(statistics.mem_rss_bytes(), 1024u); EXPECT_EQ(statistics.mem_limit_bytes(), mem.get().bytes()); EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.killTask(task.task_id()); AWAIT_READY(status); EXPECT_EQ(TASK_KILLED, status.get().state()); driver.stop(); driver.join(); this->Shutdown(); // Must shutdown before 'isolator' gets deallocated. }
Future<ExecutorInfo> ExternalContainerizerProcess::launch( const ContainerID& containerId, const TaskInfo& taskInfo, const FrameworkID& frameworkId, const std::string& directory, const Option<std::string>& user, const SlaveID& slaveId, const PID<Slave>& slavePid, bool checkpoint) { LOG(INFO) << "Launching container '" << containerId << "'"; // Get the executor from our task. If no executor is associated with // the given task, this function renders an ExecutorInfo using the // mesos-executor as its command. ExecutorInfo executor = containerExecutorInfo(flags, taskInfo, frameworkId); executor.mutable_resources()->MergeFrom(taskInfo.resources()); if (containers.contains(containerId)) { return Failure("Cannot start already running container '" + containerId.value() + "'"); } sandboxes.put(containerId, Owned<Sandbox>(new Sandbox(directory, user))); map<string, string> environment = executorEnvironment( executor, directory, slaveId, slavePid, checkpoint, flags.recovery_timeout); if (!flags.hadoop_home.empty()) { environment["HADOOP_HOME"] = flags.hadoop_home; } TaskInfo task; task.CopyFrom(taskInfo); CommandInfo* command = task.has_executor() ? task.mutable_executor()->mutable_command() : task.mutable_command(); // When the selected command has no container attached, use the // default from the slave startup flags, if available. if (!command->has_container()) { if (flags.default_container_image.isSome()) { command->mutable_container()->set_image( flags.default_container_image.get()); } else { LOG(INFO) << "No container specified in task and no default given. " << "The external containerizer will have to fill in " << "defaults."; } } ExternalTask external; external.mutable_task()->CopyFrom(task); external.set_mesos_executor_path( path::join(flags.launcher_dir, "mesos-executor")); stringstream output; external.SerializeToOstream(&output); Try<Subprocess> invoked = invoke( "launch", containerId, output.str(), environment); if (invoked.isError()) { return Failure("Launch of container '" + containerId.value() + "' failed (error: " + invoked.error() + ")"); } // Record the process. containers.put( containerId, Owned<Container>(new Container(invoked.get().pid()))); VLOG(2) << "Now awaiting data from pipe..."; // Read from the result-pipe and invoke callbacks when reaching EOF. return await(read(invoked.get().out()), invoked.get().status()) .then(defer( PID<ExternalContainerizerProcess>(this), &ExternalContainerizerProcess::_launch, containerId, frameworkId, executor, slaveId, checkpoint, lambda::_1)); }
virtual void resourceOffers(SchedulerDriver* driver, const vector<Offer>& offers) { cout << "[RESOURCE OFFER] " << offers.size() << " Offer(s)" << endl; vector<ofstream> infile; YAML::Node master; int acceptedOffers = 0; // 1. Determine the set of offers to accept int unallocatedPeers = totalPeers - peersAssigned; size_t cur_offer = 0; while (cur_offer < offers.size() && unallocatedPeers > 0) { const Offer& offer = offers[cur_offer]; double cpus = 0; double mem = 0; bool accept_offer = true; // ONE way of handling resources (see k3_scheduler for the other) for (int i = 0; i < offer.resources_size(); i++) { const Resource& resource = offer.resources(i); if (resource.name() == "cpus" && resource.type() == Value::SCALAR) { cpus = resource.scalar().value(); } else if (resource.name() == "mem" && resource.type() == Value::SCALAR) { mem = resource.scalar().value(); } // CHECK OTHER RESOURCES HERE } // TODO: Check Attributes as well (hd, hm, etc...) /* for (int i = 0; i < offer.attributes_size(); i++) { const Attribute& attribute = offer.attributes(i); if (attribute.name() == "cpus" && resource.type() == Value::SCALAR) { cpus = resource.scalar().value(); } else if (resource.name() == "mem" && resource.type() == Value::SCALAR) { mem = resource.scalar().value(); } // CHECK OTHER RESOURCES HERE } */ // Accept the offer /* ALLOCATE ALL available cpus (or all required) TODO: Est. policies to allocate resources as desired */ if (offer.hostname() == "qp-hd10") { accept_offer = false; } if (accept_offer) { cout << " Accepted offer on " << offer.hostname() << endl; acceptedOffers++; hostProfile profile; int localPeers = (unallocatedPeers > cpus) ? cpus : unallocatedPeers; // Create profile for Resource allocation & other info profile.cpu = localPeers; //assume: 1 cpu per peer profile.mem = localPeers * MEM_PER_TASK; //assume available mem profile.offer = cur_offer; for (int p=0; p<localPeers; p++) { int peerId = peersAssigned++; // TODO: PORT management string port = stringify(44440 + peerId); profile.addPeer(peerId); peerProfile peer (ip_addr[offer.hostname()], port); peerList.push_back(peer); unallocatedPeers--; } hostList[offer.hostname()] = profile; containersAssigned++; } cur_offer++; } if (acceptedOffers == 0 || unallocatedPeers > 0) { return; } // Build the Peers list vector<YAML::Node> peerNodeList; for (auto &peer : peerList) { YAML::Node peerNode; peerNode["addr"] = peer.getAddr(); peerNodeList.push_back(peerNode); } // IMPLEMENTS: Multi-Threaded, // 1-process per container, 1 container per host for (auto &host : hostList) { string hostname = host.first; hostProfile profile = host.second; vector<YAML::Node> hostParams; // Build program input parameters for each peer on this host for (auto p : profile.peers) { YAML::Node peerParams; cout << " Building Peer # " << p << " on host " << hostname << endl; // TODO: unique param list for each peer -- aside from 'me' for (YAML::const_iterator var = k3vars.begin(); var != k3vars.end(); var++) { string key = var->first.as<string>(); string value = var->second.as<string>(); // Special case for a rendezvous/master point if (value == "auto") { peerParams[key] = peerList[0].getAddr(); } // OTHER INTRA-PROGRAM PROCESSING CAN GO HERE else { peerParams[key] = value; } } peerParams["me"] = peerList[p].getAddr(); peerParams["peers"] = peerNodeList; hostParams.push_back(peerParams); } // ofstream outdoc ("k3input.yaml"); cout << " Loading " << stringify(hostParams.size()) << " peers for " << hostname << endl; string k3_cmd = "$MESOS_SANDBOX/" + k3binary + " -l INFO "; for (auto &p : hostParams) { YAML::Emitter emit; emit << YAML::Flow << p; k3_cmd += " -p '" + stringify(emit.c_str()) + "'"; } // outdoc << emit.c_str(); // cout << emit.c_str(); TaskInfo task = buildTask(hostname, stringify(containersLaunched++), offers[profile.offer].slave_id()); task.mutable_command()->set_value(k3_cmd); cout << " CMD " << k3_cmd << endl; vector<TaskInfo> tasks; // Now running 1 task per slave tasks.push_back(task); cout << " Launching " << hostname << endl; driver->launchTasks(offers[profile.offer].id(), tasks); } }
// Test that we can run the mesos-executor and specify an "override" // command to use via the --override argument. TEST_F(SlaveTest, MesosExecutorWithOverride) { Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); TestContainerizer containerizer; Try<PID<Slave> > slave = StartSlave(&containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); // Launch a task with the command executor. TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); CommandInfo command; command.set_value("sleep 10"); task.mutable_command()->MergeFrom(command); vector<TaskInfo> tasks; tasks.push_back(task); // Expect the launch and just assume it was sucessful since we'll be // launching the executor ourselves manually below. Future<Nothing> launch; EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _)) .WillOnce(DoAll(FutureSatisfy(&launch), Return(true))); // Expect wait after launch is called but don't return anything // until after we've finished everything below. Future<Nothing> wait; process::Promise<containerizer::Termination> promise; EXPECT_CALL(containerizer, wait(_)) .WillOnce(DoAll(FutureSatisfy(&wait), Return(promise.future()))); driver.launchTasks(offers.get()[0].id(), tasks); // Once we get the launch the mesos-executor with --override. AWAIT_READY(launch); // Set up fake environment for executor. map<string, string> environment; environment["MESOS_SLAVE_PID"] = stringify(slave.get()); environment["MESOS_SLAVE_ID"] = stringify(offers.get()[0].slave_id()); environment["MESOS_FRAMEWORK_ID"] = stringify(offers.get()[0].framework_id()); environment["MESOS_EXECUTOR_ID"] = stringify(task.task_id()); environment["MESOS_DIRECTORY"] = ""; // Create temporary file to store validation string. If command is // succesfully replaced, this file will end up containing the string // 'Hello World\n'. Otherwise, the original task command i.e. // 'sleep' will be called and the test will fail. Try<std::string> file = os::mktemp(); ASSERT_SOME(file); string executorCommand = path::join(tests::flags.build_dir, "src", "mesos-executor") + " --override -- /bin/sh -c 'echo hello world >" + file.get() + "'"; // Expect two status updates, one for once the mesos-executor says // the task is running and one for after our overridden command // above finishes. Future<TaskStatus> status1, status2; EXPECT_CALL(sched, statusUpdate(_, _)) .WillOnce(FutureArg<1>(&status1)) .WillOnce(FutureArg<1>(&status2)); Try<process::Subprocess> executor = process::subprocess( executorCommand, process::Subprocess::PIPE(), process::Subprocess::PIPE(), process::Subprocess::PIPE(), environment); ASSERT_SOME(executor); // Scheduler should receive the TASK_RUNNING update. AWAIT_READY(status1); ASSERT_EQ(TASK_RUNNING, status1.get().state()); AWAIT_READY(status2); ASSERT_EQ(TASK_FINISHED, status2.get().state()); AWAIT_READY(wait); containerizer::Termination termination; termination.set_killed(false); termination.set_message("Killed executor"); termination.set_status(0); promise.set(termination); driver.stop(); driver.join(); AWAIT_READY(executor.get().status()); // Verify file contents. Try<std::string> validate = os::read(file.get()); ASSERT_SOME(validate); EXPECT_EQ(validate.get(), "hello world\n"); os::rm(file.get()); Shutdown(); }
// This test verifies that docker image default entrypoint is executed // correctly using registry puller. This corresponds to the case in runtime // isolator logic table: sh=0, value=0, argv=1, entrypoint=1, cmd=0. TEST_F(DockerRuntimeIsolatorTest, ROOT_CURL_INTERNET_DockerDefaultEntryptRegistryPuller) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "docker/runtime,filesystem/linux"; flags.image_providers = "docker"; flags.docker_store_dir = path::join(os::getcwd(), "store"); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers->size()); const Offer& offer = offers.get()[0]; TaskInfo task; task.set_name("test-task"); task.mutable_task_id()->set_value(UUID::random().toString()); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get()); task.mutable_command()->set_shell(false); task.mutable_command()->add_arguments("hello world"); Image image; image.set_type(Image::DOCKER); // 'mesosphere/inky' image is used in docker containerizer test, which // contains entrypoint as 'echo' and cmd as null. image.mutable_docker()->set_name("mesosphere/inky"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); container->mutable_mesos()->mutable_image()->CopyFrom(image); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.launchTasks(offer.id(), {task}); AWAIT_READY_FOR(statusRunning, Seconds(60)); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinished); EXPECT_EQ(task.task_id(), statusFinished->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinished->state()); driver.stop(); driver.join(); }
// This test runs a command _with_ the command user field set. The // command will verify the assumption that the command is run as the // specified user. We use (and assume the precense) of the // unprivileged 'nobody' user which should be available on both Linux // and Mac OS X. TEST_F(SlaveTest, DISABLED_ROOT_RunTaskWithCommandInfoWithUser) { // TODO(nnielsen): Introduce STOUT abstraction for user verification // instead of flat getpwnam call. const string testUser = "******"; if (::getpwnam(testUser.c_str()) == NULL) { LOG(WARNING) << "Cannot run ROOT_RunTaskWithCommandInfoWithUser test:" << " user '" << testUser << "' is not present"; return; } Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); // Need flags for 'executor_registration_timeout'. slave::Flags flags = CreateSlaveFlags(); flags.isolation = "posix/cpu,posix/mem"; Try<MesosContainerizer*> containerizer = MesosContainerizer::create(flags, false); CHECK_SOME(containerizer); Try<PID<Slave> > slave = StartSlave(containerizer.get()); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); // Launch a task with the command executor. TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); CommandInfo command; command.set_value("test `whoami` = " + testUser); command.set_user(testUser); task.mutable_command()->MergeFrom(command); vector<TaskInfo> tasks; tasks.push_back(task); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(statusRunning); EXPECT_EQ(TASK_RUNNING, statusRunning.get().state()); AWAIT_READY(statusFinished); EXPECT_EQ(TASK_FINISHED, statusFinished.get().state()); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }
// This test verifies that docker image default cmd is executed correctly. // This corresponds to the case in runtime isolator logic table: sh=0, // value=0, argv=1, entrypoint=0, cmd=1. TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultCmdLocalPuller) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); const string directory = path::join(os::getcwd(), "archives"); Future<Nothing> testImage = DockerArchive::create(directory, "alpine", "null", "[\"sh\"]"); AWAIT_READY(testImage); ASSERT_TRUE(os::exists(path::join(directory, "alpine.tar"))); slave::Flags flags = CreateSlaveFlags(); flags.isolation = "docker/runtime,filesystem/linux"; flags.image_providers = "docker"; flags.docker_registry = directory; // Make docker store directory as a temparary directory. Because the // manifest of the test image is changeable, the image cached on // previous tests should never be used. flags.docker_store_dir = path::join(os::getcwd(), "store"); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers->size()); const Offer& offer = offers.get()[0]; TaskInfo task; task.set_name("test-task"); task.mutable_task_id()->set_value(UUID::random().toString()); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get()); task.mutable_command()->set_shell(false); task.mutable_command()->add_arguments("-c"); task.mutable_command()->add_arguments("echo 'hello world'"); Image image; image.set_type(Image::DOCKER); image.mutable_docker()->set_name("alpine"); ContainerInfo* container = task.mutable_container(); container->set_type(ContainerInfo::MESOS); container->mutable_mesos()->mutable_image()->CopyFrom(image); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)); driver.launchTasks(offer.id(), {task}); AWAIT_READY_FOR(statusRunning, Seconds(60)); EXPECT_EQ(task.task_id(), statusRunning->task_id()); EXPECT_EQ(TASK_RUNNING, statusRunning->state()); AWAIT_READY(statusFinished); EXPECT_EQ(task.task_id(), statusFinished->task_id()); EXPECT_EQ(TASK_FINISHED, statusFinished->state()); driver.stop(); driver.join(); }
// This test ensures we don't break the API when it comes to JSON // representation of tasks. Also, we want to ensure that tasks are // modeled the same way when using 'Task' vs. 'TaskInfo'. TEST(HTTP, ModelTask) { TaskID taskId; taskId.set_value("t"); SlaveID slaveId; slaveId.set_value("s"); ExecutorID executorId; executorId.set_value("t"); FrameworkID frameworkId; frameworkId.set_value("f"); TaskState state = TASK_RUNNING; vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->CopyFrom(taskId); status.set_state(state); status.mutable_slave_id()->CopyFrom(slaveId); status.mutable_executor_id()->CopyFrom(executorId); status.set_timestamp(0.0); statuses.push_back(status); TaskInfo task; task.set_name("task"); task.mutable_task_id()->CopyFrom(taskId); task.mutable_slave_id()->CopyFrom(slaveId); task.mutable_command()->set_value("echo hello"); Task task_ = protobuf::createTask(task, state, frameworkId); task_.add_statuses()->CopyFrom(statuses[0]); JSON::Value object = model(task, frameworkId, state, statuses); JSON::Value object_ = model(task_); Try<JSON::Value> expected = JSON::parse( "{" " \"executor_id\":\"\"," " \"framework_id\":\"f\"," " \"id\":\"t\"," " \"name\":\"task\"," " \"resources\":" " {" " \"cpus\":0," " \"disk\":0," " \"mem\":0" " }," " \"slave_id\":\"s\"," " \"state\":\"TASK_RUNNING\"," " \"statuses\":" " [" " {" " \"state\":\"TASK_RUNNING\"," " \"timestamp\":0" " }" " ]" "}"); ASSERT_SOME(expected); EXPECT_EQ(expected.get(), object); EXPECT_EQ(expected.get(), object_); // Ensure both are modeled the same. EXPECT_EQ(object, object_); }
// This test ensures we don't break the API when it comes to JSON // representation of tasks. TEST(HTTPTest, ModelTask) { TaskID taskId; taskId.set_value("t"); SlaveID slaveId; slaveId.set_value("s"); ExecutorID executorId; executorId.set_value("t"); FrameworkID frameworkId; frameworkId.set_value("f"); TaskState state = TASK_RUNNING; vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->CopyFrom(taskId); status.set_state(state); status.mutable_slave_id()->CopyFrom(slaveId); status.mutable_executor_id()->CopyFrom(executorId); status.set_timestamp(0.0); statuses.push_back(status); Labels labels; labels.add_labels()->CopyFrom(createLabel("ACTION", "port:7987 DENY")); Ports ports; Port* port = ports.add_ports(); port->set_number(80); port->mutable_labels()->CopyFrom(labels); DiscoveryInfo discovery; discovery.set_visibility(DiscoveryInfo::CLUSTER); discovery.set_name("discover"); discovery.mutable_ports()->CopyFrom(ports); TaskInfo taskInfo; taskInfo.set_name("task"); taskInfo.mutable_task_id()->CopyFrom(taskId); taskInfo.mutable_slave_id()->CopyFrom(slaveId); taskInfo.mutable_command()->set_value("echo hello"); taskInfo.mutable_discovery()->CopyFrom(discovery); Task task = createTask(taskInfo, state, frameworkId); task.add_statuses()->CopyFrom(statuses[0]); JSON::Value object = model(task); Try<JSON::Value> expected = JSON::parse( "{" " \"executor_id\":\"\"," " \"framework_id\":\"f\"," " \"id\":\"t\"," " \"name\":\"task\"," " \"resources\":" " {" " \"cpus\":0," " \"disk\":0," " \"gpus\":0," " \"mem\":0" " }," " \"slave_id\":\"s\"," " \"state\":\"TASK_RUNNING\"," " \"statuses\":" " [" " {" " \"state\":\"TASK_RUNNING\"," " \"timestamp\":0" " }" " ]," " \"discovery\":" " {" " \"name\":\"discover\"," " \"ports\":" " {" " \"ports\":" " [" " {" " \"number\":80," " \"labels\":" " {" " \"labels\":" " [" " {" " \"key\":\"ACTION\"," " \"value\":\"port:7987 DENY\"" " }" " ]" " }" " }" " ]" " }," " \"visibility\":\"CLUSTER\"" " }" "}"); ASSERT_SOME(expected); EXPECT_EQ(expected.get(), object); }