// TODO(bmahler): Expose the executor name / source. JSON::Object model( const TaskInfo& task, const FrameworkID& frameworkId, const TaskState& state, const vector<TaskStatus>& statuses) { JSON::Object object; object.values["id"] = task.task_id().value(); object.values["name"] = task.name(); object.values["framework_id"] = frameworkId.value(); if (task.has_executor()) { object.values["executor_id"] = task.executor().executor_id().value(); } else { object.values["executor_id"] = ""; } object.values["slave_id"] = task.slave_id().value(); object.values["state"] = TaskState_Name(state); object.values["resources"] = model(task.resources()); JSON::Array array; foreach (const TaskStatus& status, statuses) { array.values.push_back(model(status)); }
Task createTask( const TaskInfo& task, const TaskState& state, const FrameworkID& frameworkId) { Task t; t.mutable_framework_id()->MergeFrom(frameworkId); t.set_state(state); t.set_name(task.name()); t.mutable_task_id()->MergeFrom(task.task_id()); t.mutable_slave_id()->MergeFrom(task.slave_id()); t.mutable_resources()->MergeFrom(task.resources()); if (task.has_executor()) { t.mutable_executor_id()->CopyFrom(task.executor().executor_id()); } t.mutable_labels()->MergeFrom(task.labels()); if (task.has_discovery()) { t.mutable_discovery()->MergeFrom(task.discovery()); } return t; }
void ExecuteTask (const TaskInfo& task) { const string& executorId = task.executor().executor_id().value(); const string& data = task.data(); vector<string> args = split(data, '\n'); Resources resources = task.resources(); LOG(INFO) << "executor " << executorId << " with '" << join(args, " ") << "'" << " and " << resources; Resources persistent = filterIsPersistentVolume(resources); for (const auto& volume : persistent) { const string& path = volume.disk().volume().container_path(); LOG(INFO) << "using persistent volume " << path; os::mkdir(path + "/data"); os::mkdir(path + "/logs"); os::mkdir(path + "/apps"); string cmd = "ls -l " + path; int r = system(cmd.c_str()); if (r) { LOG(WARNING) << "failed with " << r; } } size_t len = args.size(); char** a = static_cast<char**>(malloc(sizeof(char*) * (len + 1))); for (size_t i = 0; i < len; ++i) { a[i] = const_cast<char*>(args[i].c_str()); LOG(INFO) << i << ". argument: " << a[i]; } a[len] = nullptr; execv(args[0].c_str(), a); LOG(FATAL) << "PANIC execv failed"; // upps, exec failed _exit(1); }
inline Task createTask(const TaskInfo& task, const TaskState& state, const ExecutorID& executorId, const FrameworkID& frameworkId) { Task t; t.mutable_framework_id()->MergeFrom(frameworkId); t.set_state(state); t.set_name(task.name()); t.mutable_task_id()->MergeFrom(task.task_id()); t.mutable_slave_id()->MergeFrom(task.slave_id()); t.mutable_resources()->MergeFrom(task.resources()); if (!task.has_command()) { t.mutable_executor_id()->MergeFrom(executorId); } return t; }
Task createTask( const TaskInfo& task, const TaskState& state, const FrameworkID& frameworkId) { Task t; t.mutable_framework_id()->CopyFrom(frameworkId); t.set_state(state); t.set_name(task.name()); t.mutable_task_id()->CopyFrom(task.task_id()); t.mutable_slave_id()->CopyFrom(task.slave_id()); t.mutable_resources()->CopyFrom(task.resources()); if (task.has_executor()) { t.mutable_executor_id()->CopyFrom(task.executor().executor_id()); } if (task.has_labels()) { t.mutable_labels()->CopyFrom(task.labels()); } if (task.has_discovery()) { t.mutable_discovery()->CopyFrom(task.discovery()); } if (task.has_container()) { t.mutable_container()->CopyFrom(task.container()); } // Copy `user` if set. if (task.has_command() && task.command().has_user()) { t.set_user(task.command().user()); } else if (task.has_executor() && task.executor().command().has_user()) { t.set_user(task.executor().command().user()); } return t; }
Future<ExecutorInfo> ExternalContainerizerProcess::launch( const ContainerID& containerId, const TaskInfo& taskInfo, const FrameworkID& frameworkId, const std::string& directory, const Option<std::string>& user, const SlaveID& slaveId, const PID<Slave>& slavePid, bool checkpoint) { LOG(INFO) << "Launching container '" << containerId << "'"; // Get the executor from our task. If no executor is associated with // the given task, this function renders an ExecutorInfo using the // mesos-executor as its command. ExecutorInfo executor = containerExecutorInfo(flags, taskInfo, frameworkId); executor.mutable_resources()->MergeFrom(taskInfo.resources()); if (containers.contains(containerId)) { return Failure("Cannot start already running container '" + containerId.value() + "'"); } sandboxes.put(containerId, Owned<Sandbox>(new Sandbox(directory, user))); map<string, string> environment = executorEnvironment( executor, directory, slaveId, slavePid, checkpoint, flags.recovery_timeout); if (!flags.hadoop_home.empty()) { environment["HADOOP_HOME"] = flags.hadoop_home; } TaskInfo task; task.CopyFrom(taskInfo); CommandInfo* command = task.has_executor() ? task.mutable_executor()->mutable_command() : task.mutable_command(); // When the selected command has no container attached, use the // default from the slave startup flags, if available. if (!command->has_container()) { if (flags.default_container_image.isSome()) { command->mutable_container()->set_image( flags.default_container_image.get()); } else { LOG(INFO) << "No container specified in task and no default given. " << "The external containerizer will have to fill in " << "defaults."; } } ExternalTask external; external.mutable_task()->CopyFrom(task); external.set_mesos_executor_path( path::join(flags.launcher_dir, "mesos-executor")); stringstream output; external.SerializeToOstream(&output); Try<Subprocess> invoked = invoke( "launch", containerId, output.str(), environment); if (invoked.isError()) { return Failure("Launch of container '" + containerId.value() + "' failed (error: " + invoked.error() + ")"); } // Record the process. containers.put( containerId, Owned<Container>(new Container(invoked.get().pid()))); VLOG(2) << "Now awaiting data from pipe..."; // Read from the result-pipe and invoke callbacks when reaching EOF. return await(read(invoked.get().out()), invoked.get().status()) .then(defer( PID<ExternalContainerizerProcess>(this), &ExternalContainerizerProcess::_launch, containerId, frameworkId, executor, slaveId, checkpoint, lambda::_1)); }
void launchTask(ExecutorDriver* driver, const TaskInfo& task) { if (run.isSome()) { // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.set_state(TASK_FAILED); status.set_message( "Attempted to run multiple tasks using a \"docker\" executor"); driver->sendStatusUpdate(status); return; } // Capture the TaskID. taskId = task.task_id(); // Capture the kill policy. if (task.has_kill_policy()) { killPolicy = task.kill_policy(); } LOG(INFO) << "Starting task " << taskId.get(); CHECK(task.has_container()); CHECK(task.has_command()); CHECK(task.container().type() == ContainerInfo::DOCKER); Try<Docker::RunOptions> runOptions = Docker::RunOptions::create( task.container(), task.command(), containerName, sandboxDirectory, mappedDirectory, task.resources() + task.executor().resources(), cgroupsEnableCfs, taskEnvironment, None(), // No extra devices. defaultContainerDNS ); if (runOptions.isError()) { // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.set_state(TASK_FAILED); status.set_message( "Failed to create docker run options: " + runOptions.error()); driver->sendStatusUpdate(status); _stop(); return; } // We're adding task and executor resources to launch docker since // the DockerContainerizer updates the container cgroup limits // directly and it expects it to be the sum of both task and // executor resources. This does leave to a bit of unaccounted // resources for running this executor, but we are assuming // this is just a very small amount of overcommit. run = docker->run( runOptions.get(), Subprocess::FD(STDOUT_FILENO), Subprocess::FD(STDERR_FILENO)); run->onAny(defer(self(), &Self::reaped, lambda::_1)); // Delay sending TASK_RUNNING status update until we receive // inspect output. Note that we store a future that completes // after the sending of the running update. This allows us to // ensure that the terminal update is sent after the running // update (see `reaped()`). inspect = docker->inspect(containerName, DOCKER_INSPECT_DELAY) .then(defer(self(), [=](const Docker::Container& container) { if (!killed) { containerPid = container.pid; // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(taskId.get()); status.set_state(TASK_RUNNING); status.set_data(container.output); if (container.ipAddress.isSome()) { // TODO(karya): Deprecated -- Remove after 0.25.0 has shipped. Label* label = status.mutable_labels()->add_labels(); label->set_key("Docker.NetworkSettings.IPAddress"); label->set_value(container.ipAddress.get()); NetworkInfo* networkInfo = status.mutable_container_status()->add_network_infos(); // Copy the NetworkInfo if it is specified in the // ContainerInfo. A Docker container has at most one // NetworkInfo, which is validated in containerizer. if (task.container().network_infos().size() > 0) { networkInfo->CopyFrom(task.container().network_infos(0)); networkInfo->clear_ip_addresses(); } NetworkInfo::IPAddress* ipAddress = networkInfo->add_ip_addresses(); ipAddress->set_ip_address(container.ipAddress.get()); containerNetworkInfo = *networkInfo; } driver->sendStatusUpdate(status); } return Nothing(); })); inspect.onFailed(defer(self(), [=](const string& failure) { LOG(ERROR) << "Failed to inspect container '" << containerName << "'" << ": " << failure; // TODO(bmahler): This is fatal, try to shut down cleanly. // Since we don't have a container id, we can only discard // the run future. })); inspect.onReady(defer(self(), &Self::launchCheck, task)); inspect.onReady( defer(self(), &Self::launchHealthCheck, containerName, task)); }