void update(const TaskStatus& status) { CHECK_EQ(SUBSCRIBED, state); LOG(INFO) << "Task " << status.task_id().value() << " is in state " << TaskState_Name(status.state()) << (status.has_message() ? " with message: " + status.message() : ""); if (status.has_uuid()) { Call call; call.set_type(Call::ACKNOWLEDGE); CHECK(framework.has_id()); call.mutable_framework_id()->CopyFrom(framework.id()); Call::Acknowledge* acknowledge = call.mutable_acknowledge(); acknowledge->mutable_agent_id()->CopyFrom(status.agent_id()); acknowledge->mutable_task_id()->CopyFrom(status.task_id()); acknowledge->set_uuid(status.uuid()); mesos->send(call); } if (status.state() == TaskState::TASK_KILLED || status.state() == TaskState::TASK_LOST || status.state() == TaskState::TASK_FAILED || status.state() == TaskState::TASK_ERROR) { ++metrics.abnormal_terminations; } }
inline StatusUpdate createStatusUpdate( const FrameworkID& frameworkId, const SlaveID& slaveId, const TaskID& taskId, const TaskState& state, const std::string& message = "", const Option<ExecutorID>& executorId = None()) { StatusUpdate update; update.set_timestamp(process::Clock::now().secs()); update.set_uuid(UUID::random().toBytes()); update.mutable_framework_id()->MergeFrom(frameworkId); update.mutable_slave_id()->MergeFrom(slaveId); if (executorId.isSome()) { update.mutable_executor_id()->MergeFrom(executorId.get()); } TaskStatus* status = update.mutable_status(); status->mutable_task_id()->MergeFrom(taskId); status->set_state(state); status->set_message(message); return update; }
FilterResultPtr Task::process(TaskStatus const& status, FilterData const& data) { status.throwIfCancelled(); Dependencies const deps(data.xform().resultingCropArea()); std::auto_ptr<Params> params(m_ptrSettings->getPageParams(m_pageId)); if (params.get() && !params->dependencies().matches(deps)) { params.reset(); } OptionsWidget::UiData ui_data; ui_data.setSizeCalc(PhysSizeCalc(data.xform())); if (params.get()) { ui_data.setContentRect(params->contentRect()); ui_data.setDependencies(params->dependencies()); ui_data.setMode(params->mode()); if (params->contentSizeMM().isEmpty() && !params->contentRect().isEmpty()) { // Backwards compatibilty: put the missing data where it belongs. Params const new_params( ui_data.contentRect(), ui_data.contentSizeMM(), params->dependencies(), params->mode() ); m_ptrSettings->setPageParams(m_pageId, new_params); } } else { QRectF const content_rect( ContentBoxFinder::findContentBox( status, data, m_ptrDbg.get() ) ); ui_data.setContentRect(content_rect); ui_data.setDependencies(deps); ui_data.setMode(MODE_AUTO); Params const new_params( ui_data.contentRect(), ui_data.contentSizeMM(), deps, MODE_AUTO ); m_ptrSettings->setPageParams(m_pageId, new_params); } status.throwIfCancelled(); if (m_ptrNextTask) { return m_ptrNextTask->process( status, FilterData(data, data.xform()), ui_data.contentRect() ); } else { return FilterResultPtr( new UiUpdater( m_ptrFilter, m_pageId, m_ptrDbg, data.origImage(), data.xform(), ui_data, m_batchProcessing ) ); } }
// This test ensures that the driver handles an empty slave id // in an acknowledgement message by dropping it. The driver will // log an error in this case (but we don't test for that). We // generate a status with no slave id by performing reconciliation. TEST_F(MesosSchedulerDriverTest, ExplicitAcknowledgementsUnsetSlaveID) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, false, DEFAULT_CREDENTIAL); Future<Nothing> registered; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureSatisfy(®istered)); // Ensure no status update acknowledgements are sent to the master. EXPECT_NO_FUTURE_CALLS( mesos::scheduler::Call(), mesos::scheduler::Call::ACKNOWLEDGE, _ , master.get()->pid); driver.start(); AWAIT_READY(registered); Future<TaskStatus> update; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&update)); // Peform reconciliation without using a slave id. vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->set_value("foo"); status.set_state(TASK_RUNNING); statuses.push_back(status); driver.reconcileTasks(statuses); AWAIT_READY(update); ASSERT_EQ(TASK_LOST, update.get().state()); ASSERT_EQ(TaskStatus::SOURCE_MASTER, update.get().source()); ASSERT_EQ(TaskStatus::REASON_RECONCILIATION, update.get().reason()); ASSERT_FALSE(update.get().has_slave_id()); // Now send the acknowledgement. driver.acknowledgeStatusUpdate(update.get()); // Settle the clock to ensure driver processes the acknowledgement, // which should get dropped due to the missing slave id. Clock::pause(); Clock::settle(); driver.stop(); driver.join(); }
// Returns a JSON object modeled on a TaskStatus. JSON::Object model(const TaskStatus& status) { JSON::Object object; object.values["state"] = TaskState_Name(status.state()); object.values["timestamp"] = status.timestamp(); return object; }
TEST(ResourceOffersTest, TaskUsesNoResources) { ASSERT_TRUE(GTEST_IS_THREADSAFE); PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false); MockScheduler sched; MesosSchedulerDriver driver(&sched, "", DEFAULT_EXECUTOR_INFO, master); vector<Offer> offers; trigger resourceOffersCall; EXPECT_CALL(sched, registered(&driver, _)) .Times(1); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(DoAll(SaveArg<1>(&offers), Trigger(&resourceOffersCall))) .WillRepeatedly(Return()); driver.start(); WAIT_UNTIL(resourceOffersCall); EXPECT_NE(0, offers.size()); TaskDescription task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers[0].slave_id()); vector<TaskDescription> tasks; tasks.push_back(task); TaskStatus status; trigger statusUpdateCall; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(DoAll(SaveArg<1>(&status), Trigger(&statusUpdateCall))); driver.launchTasks(offers[0].id(), tasks); WAIT_UNTIL(statusUpdateCall); EXPECT_EQ(task.task_id(), status.task_id()); EXPECT_EQ(TASK_LOST, status.state()); EXPECT_TRUE(status.has_message()); EXPECT_EQ("Task uses no resources", status.message()); driver.stop(); driver.join(); local::shutdown(); }
// This test verifies that reconciliation of an unknown task that // belongs to a known slave results in TASK_LOST. TEST_F(ReconciliationTest, UnknownTask) { Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); Future<SlaveRegisteredMessage> slaveRegisteredMessage = FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _); Try<PID<Slave> > slave = StartSlave(); ASSERT_SOME(slave); // Wait for the slave to register and get the slave id. AWAIT_READY(slaveRegisteredMessage); const SlaveID slaveId = slaveRegisteredMessage.get().slave_id(); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillRepeatedly(Return()); // Ignore offers. driver.start(); // Wait until the framework is registered. AWAIT_READY(frameworkId); Future<TaskStatus> update; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&update)); vector<TaskStatus> statuses; // Create a task status with a random task id. TaskStatus status; status.mutable_task_id()->set_value(UUID::random().toString()); status.mutable_slave_id()->CopyFrom(slaveId); status.set_state(TASK_RUNNING); statuses.push_back(status); driver.reconcileTasks(statuses); // Framework should receive TASK_LOST for unknown task. AWAIT_READY(update); EXPECT_EQ(TASK_LOST, update.get().state()); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }
void reaped( ExecutorDriver* driver, const TaskID& taskId, pid_t pid, const Future<Option<int> >& status_) { TaskState state; string message; Timer::cancel(escalationTimer); if (!status_.isReady()) { state = TASK_FAILED; message = "Failed to get exit status for Command: " + (status_.isFailed() ? status_.failure() : "future discarded"); } else if (status_.get().isNone()) { state = TASK_FAILED; message = "Failed to get exit status for Command"; } else { int status = status_.get().get(); CHECK(WIFEXITED(status) || WIFSIGNALED(status)) << status; if (WIFEXITED(status) && WEXITSTATUS(status) == 0) { state = TASK_FINISHED; } else if (killed) { // Send TASK_KILLED if the task was killed as a result of // killTask() or shutdown(). state = TASK_KILLED; } else { state = TASK_FAILED; } message = string("Command") + (WIFEXITED(status) ? " exited with status " : " terminated with signal ") + (WIFEXITED(status) ? stringify(WEXITSTATUS(status)) : strsignal(WTERMSIG(status))); } cout << message << " (pid: " << pid << ")" << endl; TaskStatus taskStatus; taskStatus.mutable_task_id()->MergeFrom(taskId); taskStatus.set_state(state); taskStatus.set_message(message); driver->sendStatusUpdate(taskStatus); // A hack for now ... but we need to wait until the status update // is sent to the slave before we shut ourselves down. os::sleep(Seconds(1)); driver->stop(); }
void run(ExecutorDriver* driver, const TaskInfo& task) { os::sleep(Seconds(random() % 10)); TaskStatus status; status.mutable_task_id()->MergeFrom(task.task_id()); status.set_state(TASK_FINISHED); driver->sendStatusUpdate(status); }
void CephExecutor::shutdown(ExecutorDriver* driver) { LOG(INFO) << "Killing this container process"; LOG(INFO) << runShellCommand("docker rm -f " + containerName); TaskStatus status; status.mutable_task_id()->MergeFrom(myTaskId); status.set_state(TASK_KILLED); driver->sendStatusUpdate(status); }
void statusUpdate(SchedulerDriver* driver, const TaskStatus& status) { if (!flags.long_running) { if (status.state() == TASK_FAILED && status.reason() == TaskStatus::REASON_CONTAINER_LIMITATION_MEMORY) { // NOTE: We expect TASK_FAILED when this scheduler is launched by the // balloon_framework_test.sh shell script. The abort here ensures the // script considers the test result as "PASS". driver->abort(); } else if (status.state() == TASK_FAILED || status.state() == TASK_FINISHED || status.state() == TASK_KILLED || status.state() == TASK_LOST || status.state() == TASK_ERROR) { driver->stop(); } } if (stringify(tasksLaunched - 1) != status.task_id().value()) { // We might receive messages from older tasks. Ignore them. LOG(INFO) << "Ignoring status update from older task " << status.task_id(); return; } switch (status.state()) { case TASK_FINISHED: taskActive = false; ++metrics.tasks_finished; break; case TASK_FAILED: taskActive = false; if (status.reason() == TaskStatus::REASON_CONTAINER_LIMITATION_MEMORY) { ++metrics.tasks_oomed; break; } // NOTE: Fetching the executor (e.g. `--executor_uri`) may fail // occasionally if the URI is rate limited. This case is common // enough that it makes sense to track this failure metric separately. if (status.reason() == TaskStatus::REASON_CONTAINER_LAUNCH_FAILED) { ++metrics.launch_failures; break; } case TASK_KILLED: case TASK_LOST: case TASK_ERROR: taskActive = false; ++metrics.abnormal_terminations; break; default: break; } }
void ChapelScheduler::statusUpdate(SchedulerDriver* driver, const TaskStatus& status) { if (status.state() == TASK_FINISHED) { tasksFinished+=1; cout << "ChapelScheduler::statusUpdate\tTask " << status.task_id().value() << " finished of # tasksLaunched " << tasksLaunched << " # finished " << tasksFinished << endl; } if (status.state() == TASK_FAILED) { cout << "ChapelScheduler::statusUpdate\tTask " << status.task_id().value() << " FAILED!" << endl; terminateAllTasks(schedulerDriver); taskExecError=true; driver->stop(); } if (status.state() == TASK_LOST) { cout << "ChapelScheduler::statusUpdate\tTask " << status.task_id().value() << " LOST!" << endl; terminateAllTasks(schedulerDriver); taskExecError=true; map<string, TaskInfo>::iterator rm = launchedTsks.find(status.task_id().value()); if(rm != launchedTsks.end()) { launchedTsks.erase(rm); } } if (status.state() == TASK_KILLED) { cout << "ChapelScheduler::statusUpdate\tTask " << status.task_id().value() << " KILLED!" << endl; terminateAllTasks(schedulerDriver); taskExecError=true; map<string, TaskInfo>::iterator rm = launchedTsks.find(status.task_id().value()); if(rm != launchedTsks.end()) { launchedTsks.erase(rm); } } cout << "ChapelScheduler::statusUpdate\tMet termination criteria?\t" << (tasksFinished == tasksLaunched) << " " << tasksFinished << " " << tasksLaunched << " " << taskExecError << endl; if( taskExecError || ((tasksFinished == tasksLaunched) || (tasksFinished == cpusReq))) { if(tasksLaunched < tasksFinished) { cout << "ChapelScheduler::statusUpdate\tError getting nodes launched for the batch job! Try re-running the code!" << endl; } // Wait to receive any pending framework messages // // If some framework messages are lost, it may hang indefinitely // to solve the indefinite "hang", numAttempts caps out and then // terminates the while loop. // int attempts = 0; while(tasksFinished != tasksLaunched && attempts < numAttempts) { cout << "ChapelScheduler::statusUpdate\tExecution halted! Waiting for remote nodes to catch up! Attempts\t" << attempts << endl; sleep(1); attempts+=1; } cout << "All Chapel task for this framework instance are complete! Shutting down!" << endl; driver->stop(); } }
virtual void statusUpdate( SchedulerDriver* driver, const TaskStatus& status) { CHECK_EQ(name, status.task_id().value()); cout << "Received status update " << status.state() << " for task " << status.task_id() << endl; if (protobuf::isTerminalState(status.state())) { driver->stop(); } }
// data format is: // <MessageToExecutor>.<OSDID> for OSD executor // or just <MessageToExecutor> for MON executor void CephExecutor::frameworkMessage(ExecutorDriver* driver, const string& data) { LOG(INFO) << "Got framework message: " << data; MessageToExecutor msg; vector<string> tokens = StringUtil::explode(data,'.'); msg = (MessageToExecutor)lexical_cast<int>(tokens[0]); switch (msg){ case MessageToExecutor::REGISTER_OSD: LOG(INFO) << "Will register an OSD, and return the OSD ID"; driver->sendFrameworkMessage(registerOSD()); break; case MessageToExecutor::LAUNCH_OSD: if (tokens.size() == 2){ LOG(INFO) << "Will launch OSD docker with OSD ID: " << tokens[1]; string dockerCommand = constructOSDCommand( localSharedConfigDirRoot + "/" + localConfigDirName, tokens[1], containerName); myPID = fork(); if (0 == myPID){ //child long running docker thread //TODO: we use fork here. Need to check why below line will hung the executor //thread(&CephExecutor::startLongRunning,*this,"docker", dockerCommand).detach(); startLongRunning("docker",dockerCommand); } else { bool started = block_until_started(containerName, "30"); TaskStatus status; status.mutable_task_id()->MergeFrom(myTaskId); if (started) { LOG(INFO) << "Starting OSD task " << myTaskId.value(); //send the OSD id back to let scheduler remove it //format: <MessageToScheduler::CONSUMED_OSD_ID>.OSDID string msg = lexical_cast<string>(static_cast<int>(MessageToScheduler::CONSUMED_OSD_ID)) + "." + tokens[1]; status.set_message(msg); status.set_state(TASK_RUNNING); } else { LOG(INFO) << "Failed to start OSD task " << myTaskId.value(); status.set_state(TASK_FAILED); } driver->sendStatusUpdate(status); }//end else "0==pid" } else { LOG(INFO) << "No OSD ID given!"; } break; default: LOG(INFO) << "unknown message from scheduler"; } }
virtual void statusUpdate(SchedulerDriver* driver, const TaskStatus& status) { int taskId = lexical_cast<int>(status.task_id().value()); cout << "Task " << taskId << " is in state " << status.state() << endl; if (status.state() == TASK_FINISHED) tasksFinished++; if (tasksFinished == totalTasks) driver->stop(); }
void CephSchedulerAgent<T>::statusUpdate( T* driver, const TaskStatus& status) { LOG(INFO) << "Got status update from " << status.source(); string taskId = status.task_id().value(); if (status.state() == TASK_RUNNING) { LOG(INFO) << taskId << " is Running!"; stateMachine->updateTaskToRunning(taskId); if (status.has_message()){ vector<string> tokens = StringUtil::explode(status.message(), '.'); if ((MessageToScheduler)lexical_cast<int>(tokens[0]) == MessageToScheduler::CONSUMED_OSD_ID ){ string consumedOSDId = tokens[1]; LOG(INFO) << "Got message of \"consumed_OSD_ID\": "<<consumedOSDId; } } } else if (status.state() == TASK_STARTING) { LOG(INFO) << taskId << " is Waiting OSDID, ready for assign osd id!"; stateMachine->updateTaskToWaitingOSDID(taskId); } else if (status.state() == TASK_FAILED) { LOG(INFO) << taskId << " failed"; stateMachine->updateTaskToFailed(taskId); //TODO: if has message , add the OSD ID back to StateMachine } else if (status.state() == TASK_FINISHED) { //only disk executor will have this finished status if (status.has_message()){ vector<string> tokens = StringUtil::explode(status.message(), '.'); if ((MessageToScheduler)lexical_cast<int>(tokens[0]) == MessageToScheduler::DISK_READY ){ string failedDevsStr = tokens[1]; LOG(INFO) << "Got message of \"DISK_READY\": "<<failedDevsStr; vector<string> failedDevs = StringUtil::explode(failedDevsStr, ':'); string hostname = failedDevs[0]; vector<string> devs; if ("-" != failedDevs[1]) { vector<string> devs = StringUtil::explode(failedDevs[1], ','); } HostConfig* hostconfig = stateMachine->getConfig(hostname); //TODO: get this "4" from yml config hostconfig->updateDiskPartition(devs,lexical_cast<int>("4")); hostconfig->setDiskPreparationDone(); } } } }
TaskStatus createTaskStatus( const TaskID& taskId, const TaskState& state, const id::UUID& uuid, double timestamp) { TaskStatus status; status.set_uuid(uuid.toBytes()); status.set_timestamp(timestamp); status.mutable_task_id()->CopyFrom(taskId); status.set_state(state); return status; }
Option<bool> getTaskHealth(const Task& task) { Option<bool> healthy = None(); if (task.statuses_size() > 0) { // The statuses list only keeps the most recent TaskStatus for // each state, and appends later states at the end. Thus the last // status is either a terminal state (where health is // irrelevant), or the latest RUNNING status. TaskStatus lastStatus = task.statuses(task.statuses_size() - 1); if (lastStatus.has_healthy()) { healthy = lastStatus.healthy(); } } return healthy; }
TEST(TestTaskStatus, Forgiveness) { TaskStatus s; // Forgive transient backoff updateStatus(&s, TaskStatus::errorBackoff("err")); EXPECT_TRUE(s.isInBackoff(s.timestamp())); s.forgive(); EXPECT_FALSE(s.isInBackoff(s.timestamp())); // Forgive permanent failure updateStatus(&s, TaskStatus::failed()); EXPECT_TRUE(s.isFailed()); s.forgive(); EXPECT_FALSE(s.isFailed()); }
virtual void statusUpdate(SchedulerDriver* driver, const TaskStatus& status) { std::cout << "Task in state " << status.state() << std::endl; if (status.has_message()) { std::cout << "Reason: " << status.message() << std::endl; } if (protobuf::isTerminalState(status.state())) { // NOTE: We expect TASK_FAILED here. The abort here ensures the shell // script invoking this test, considers the test result as 'PASS'. if (status.state() == TASK_FAILED) { driver->abort(); } else { driver->stop(); } } }
virtual void statusUpdate(SchedulerDriver* driver, const TaskStatus& status) { string taskId = status.task_id().value(); cout << "Container " << taskId << " is in state " << taskState[status.state()] << endl; if (status.state() == TASK_FINISHED) containersFinished++; if (status.state() == TASK_FAILED) driver->stop(); cout << "Total complete: " << stringify(containersFinished) << " out of " << stringify(containersAssigned) << endl; if (containersFinished == containersAssigned) driver->stop(); }
void update(const TaskInfo& task, const TaskState& state) { UUID uuid = UUID::random(); TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.mutable_executor_id()->CopyFrom(executorId); status.set_state(state); status.set_source(TaskStatus::SOURCE_EXECUTOR); status.set_timestamp(process::Clock::now().secs()); status.set_uuid(uuid.toBytes()); Call call; call.mutable_framework_id()->CopyFrom(frameworkId); call.mutable_executor_id()->CopyFrom(executorId); call.set_type(Call::UPDATE); call.mutable_update()->mutable_status()->CopyFrom(status); // Capture the status update. updates[uuid] = call.update(); mesos->send(call); }
virtual void launchTask(ExecutorDriver* driver, const TaskInfo& task) { cout << "Starting task " << task.task_id().value() << endl; TaskStatus status; status.mutable_task_id()->MergeFrom(task.task_id()); status.set_state(TASK_RUNNING); driver->sendStatusUpdate(status); // This is where one would perform the requested task. cout << "Finishing task " << task.task_id().value() << endl; status.mutable_task_id()->MergeFrom(task.task_id()); status.set_state(TASK_FINISHED); driver->sendStatusUpdate(status); }
virtual void statusUpdate(SchedulerDriver* driver, const TaskStatus& status) { if (status.state() == TASK_FINISHED) { cout << "Task " << status.task_id().value() << " finished" << endl; tasksFinished++; } if (tasksFinished == tasksLaunched && crawlQueue.empty() && renderQueue.empty()) { // Wait to receive any pending framework messages // If some framework messages are lost, it may hang indefinitely. while (frameworkMessagesReceived != tasksFinished) { sleep(1); } shutdown(); driver->stop(); } }
// This test verifies that reconciliation of a task that belongs to an // unknown slave results in TASK_LOST. TEST_F(ReconciliationTest, UnknownSlave) { Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); driver.start(); // Wait until the framework is registered. AWAIT_READY(frameworkId); Future<TaskStatus> update; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&update)); vector<TaskStatus> statuses; // Create a task status with a random slave id (and task id). TaskStatus status; status.mutable_task_id()->set_value(UUID::random().toString()); status.mutable_slave_id()->set_value(UUID::random().toString()); status.set_state(TASK_RUNNING); statuses.push_back(status); driver.reconcileTasks(statuses); // Framework should receive TASK_LOST because the slave is unknown. AWAIT_READY(update); EXPECT_EQ(TASK_LOST, update.get().state()); driver.stop(); driver.join(); Shutdown(); }
virtual Result<Labels> slaveTaskStatusLabelDecorator( const FrameworkID& frameworkId, const TaskStatus& status) { LOG(INFO) << "CalicoHook::task status label decorator"; if (!status.has_executor_id()) { LOG(WARNING) << "CalicoHook:: task status has no valid executor id"; return None(); } const ExecutorID executorId = status.executor_id(); if (!executors->contains(executorId)) { LOG(WARNING) << "CalicoHook:: no valid container id for: " << executorId; return None(); } const ContainerID containerId = executors->at(executorId); if (infos == NULL || !infos->contains(containerId)) { LOG(WARNING) << "CalicoHook:: no valid infos for: " << containerId; return None(); } const Info* info = (*infos)[containerId]; if (info->ipAddress.isNone()) { LOG(WARNING) << "CalicoHook:: no valid IP address"; return None(); } Labels labels; if (status.has_labels()) { labels.CopyFrom(status.labels()); } // Set IPAddress label. Label* label = labels.add_labels(); label->set_key(ipAddressLabelKey); label->set_value(info->ipAddress.get()); LOG(INFO) << "CalicoHook:: added label " << label->key() << ":" << label->value(); return labels; }
virtual void launchTask(ExecutorDriver* driver, const TaskInfo& task) { cout << "Starting task " << task.task_id().value() << endl; TaskStatus status; status.mutable_task_id()->MergeFrom(task.task_id()); status.set_state(TASK_RUNNING); driver->sendStatusUpdate(status); sleep(1); cout << "Finishing task " << task.task_id().value() << endl; status.mutable_task_id()->MergeFrom(task.task_id()); status.set_state(TASK_FINISHED); driver->sendStatusUpdate(status); }
// TODO(vinod): Make SlaveID optional because 'StatusUpdate.SlaveID' // is optional. StatusUpdate createStatusUpdate( const FrameworkID& frameworkId, const Option<SlaveID>& slaveId, const TaskID& taskId, const TaskState& state, const TaskStatus::Source& source, const string& message = "", const Option<TaskStatus::Reason>& reason = None(), const Option<ExecutorID>& executorId = None(), const Option<bool>& healthy = None()) { StatusUpdate update; update.set_timestamp(process::Clock::now().secs()); update.set_uuid(UUID::random().toBytes()); update.mutable_framework_id()->MergeFrom(frameworkId); if (slaveId.isSome()) { update.mutable_slave_id()->MergeFrom(slaveId.get()); } if (executorId.isSome()) { update.mutable_executor_id()->MergeFrom(executorId.get()); } TaskStatus* status = update.mutable_status(); status->mutable_task_id()->MergeFrom(taskId); if (slaveId.isSome()) { status->mutable_slave_id()->MergeFrom(slaveId.get()); } status->set_state(state); status->set_source(source); status->set_message(message); status->set_timestamp(update.timestamp()); if (reason.isSome()) { status->set_reason(reason.get()); } if (healthy.isSome()) { status->set_healthy(healthy.get()); } return update; }
virtual Result<TaskStatus> slaveTaskStatusDecorator( const FrameworkID& frameworkId, const TaskStatus& status) { LOG(INFO) << "Executing 'slaveTaskStatusDecorator' hook"; Labels labels; // Set one known label. Label* newLabel = labels.add_labels(); newLabel->set_key("bar"); newLabel->set_value("qux"); // Remove label which was set by test. foreach (const Label& oldLabel, status.labels().labels()) { if (oldLabel.key() != "foo") { labels.add_labels()->CopyFrom(oldLabel); } } TaskStatus result; result.mutable_labels()->CopyFrom(labels); // Set an IP address, a network isolation group, and a known label // in network info. This data is later validated by the // 'HookTest.VerifySlaveTaskStatusDecorator' test. NetworkInfo* networkInfo = result.mutable_container_status()->add_network_infos(); // TODO(CD): Deprecated -- remove after 0.27.0. networkInfo->set_ip_address("4.3.2.1"); NetworkInfo::IPAddress* ipAddress = networkInfo->add_ip_addresses(); ipAddress->set_ip_address("4.3.2.1"); networkInfo->add_groups("public"); Label* networkInfoLabel = networkInfo->mutable_labels()->add_labels(); networkInfoLabel->set_key("net_foo"); networkInfoLabel->set_value("net_bar"); return result; }
void SerializedTaskStatus( const TaskStatus &status, std::string *serialized_str) { assert(serialized_str != NULL); serialized_str->clear(); boost::shared_ptr<TMemoryBuffer> str_buff(new TMemoryBuffer()); boost::shared_ptr<TBinaryProtocol> binary_protocol(new TBinaryProtocol(str_buff)); status.write(binary_protocol.get()); serialized_str->assign(str_buff->getBufferAsString()); }