bool operator == (const TaskStatus& left, const TaskStatus& right) { return left.task_id() == right.task_id() && left.state() == right.state() && left.data() == right.data() && left.message() == right.message() && left.slave_id() == right.slave_id() && left.timestamp() == right.timestamp() && left.executor_id() == right.executor_id() && left.healthy() == right.healthy() && left.source() == right.source() && left.reason() == right.reason() && left.uuid() == right.uuid(); }
void CephSchedulerAgent<T>::statusUpdate( T* driver, const TaskStatus& status) { LOG(INFO) << "Got status update from " << status.source(); string taskId = status.task_id().value(); if (status.state() == TASK_RUNNING) { LOG(INFO) << taskId << " is Running!"; stateMachine->updateTaskToRunning(taskId); if (status.has_message()){ vector<string> tokens = StringUtil::explode(status.message(), '.'); if ((MessageToScheduler)lexical_cast<int>(tokens[0]) == MessageToScheduler::CONSUMED_OSD_ID ){ string consumedOSDId = tokens[1]; LOG(INFO) << "Got message of \"consumed_OSD_ID\": "<<consumedOSDId; } } } else if (status.state() == TASK_STARTING) { LOG(INFO) << taskId << " is Waiting OSDID, ready for assign osd id!"; stateMachine->updateTaskToWaitingOSDID(taskId); } else if (status.state() == TASK_FAILED) { LOG(INFO) << taskId << " failed"; stateMachine->updateTaskToFailed(taskId); //TODO: if has message , add the OSD ID back to StateMachine } else if (status.state() == TASK_FINISHED) { //only disk executor will have this finished status if (status.has_message()){ vector<string> tokens = StringUtil::explode(status.message(), '.'); if ((MessageToScheduler)lexical_cast<int>(tokens[0]) == MessageToScheduler::DISK_READY ){ string failedDevsStr = tokens[1]; LOG(INFO) << "Got message of \"DISK_READY\": "<<failedDevsStr; vector<string> failedDevs = StringUtil::explode(failedDevsStr, ':'); string hostname = failedDevs[0]; vector<string> devs; if ("-" != failedDevs[1]) { vector<string> devs = StringUtil::explode(failedDevs[1], ','); } HostConfig* hostconfig = stateMachine->getConfig(hostname); //TODO: get this "4" from yml config hostconfig->updateDiskPartition(devs,lexical_cast<int>("4")); hostconfig->setDiskPreparationDone(); } } } }
void update(const TaskStatus& status) { CHECK_EQ(SUBSCRIBED, state); LOG(INFO) << "Task " << status.task_id().value() << " is in state " << TaskState_Name(status.state()) << (status.has_message() ? " with message: " + status.message() : ""); if (status.has_uuid()) { Call call; call.set_type(Call::ACKNOWLEDGE); CHECK(framework.has_id()); call.mutable_framework_id()->CopyFrom(framework.id()); Call::Acknowledge* acknowledge = call.mutable_acknowledge(); acknowledge->mutable_agent_id()->CopyFrom(status.agent_id()); acknowledge->mutable_task_id()->CopyFrom(status.task_id()); acknowledge->set_uuid(status.uuid()); mesos->send(call); } if (status.state() == TaskState::TASK_KILLED || status.state() == TaskState::TASK_LOST || status.state() == TaskState::TASK_FAILED || status.state() == TaskState::TASK_ERROR) { ++metrics.abnormal_terminations; } }
TEST(ResourceOffersTest, TaskUsesNoResources) { ASSERT_TRUE(GTEST_IS_THREADSAFE); PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false); MockScheduler sched; MesosSchedulerDriver driver(&sched, "", DEFAULT_EXECUTOR_INFO, master); vector<Offer> offers; trigger resourceOffersCall; EXPECT_CALL(sched, registered(&driver, _)) .Times(1); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(DoAll(SaveArg<1>(&offers), Trigger(&resourceOffersCall))) .WillRepeatedly(Return()); driver.start(); WAIT_UNTIL(resourceOffersCall); EXPECT_NE(0, offers.size()); TaskDescription task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers[0].slave_id()); vector<TaskDescription> tasks; tasks.push_back(task); TaskStatus status; trigger statusUpdateCall; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(DoAll(SaveArg<1>(&status), Trigger(&statusUpdateCall))); driver.launchTasks(offers[0].id(), tasks); WAIT_UNTIL(statusUpdateCall); EXPECT_EQ(task.task_id(), status.task_id()); EXPECT_EQ(TASK_LOST, status.state()); EXPECT_TRUE(status.has_message()); EXPECT_EQ("Task uses no resources", status.message()); driver.stop(); driver.join(); local::shutdown(); }
virtual void statusUpdate(SchedulerDriver* driver, const TaskStatus& status) { std::cout << "Task in state " << status.state() << std::endl; if (status.has_message()) { std::cout << "Reason: " << status.message() << std::endl; } if (protobuf::isTerminalState(status.state())) { // NOTE: We expect TASK_FAILED here. The abort here ensures the shell // script invoking this test, considers the test result as 'PASS'. if (status.state() == TASK_FAILED) { driver->abort(); } else { driver->stop(); } } }
virtual void statusUpdate( SchedulerDriver* driver, const TaskStatus& status) { CHECK_EQ(name, status.task_id().value()); cout << "Received status update " << status.state() << " for task " << status.task_id() << endl; cout << "RECEIVED UPDATE:" << endl; cout << "Message: " << status.message() << endl; if(status.state() == TASK_FINISHED) { cout << "=== Dumping data ===" << endl; cout << status.data() << endl; cout << "====================" << endl; } if (mesos::internal::protobuf::isTerminalState(status.state())) { driver->stop(); } }
TEST(ResourceOffersTest, ResourcesGetReofferedAfterTaskDescriptionError) { ASSERT_TRUE(GTEST_IS_THREADSAFE); PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false); MockScheduler sched1; MesosSchedulerDriver driver1(&sched1, "", DEFAULT_EXECUTOR_INFO, master); vector<Offer> offers; trigger sched1ResourceOffersCall; EXPECT_CALL(sched1, registered(&driver1, _)) .Times(1); EXPECT_CALL(sched1, resourceOffers(&driver1, _)) .WillOnce(DoAll(SaveArg<1>(&offers), Trigger(&sched1ResourceOffersCall))) .WillRepeatedly(Return()); driver1.start(); WAIT_UNTIL(sched1ResourceOffersCall); EXPECT_NE(0, offers.size()); TaskDescription task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers[0].slave_id()); Resource* cpus = task.add_resources(); cpus->set_name("cpus"); cpus->set_type(Value::SCALAR); cpus->mutable_scalar()->set_value(0); Resource* mem = task.add_resources(); mem->set_name("mem"); mem->set_type(Value::SCALAR); mem->mutable_scalar()->set_value(1 * Gigabyte); vector<TaskDescription> tasks; tasks.push_back(task); TaskStatus status; trigger sched1StatusUpdateCall; EXPECT_CALL(sched1, statusUpdate(&driver1, _)) .WillOnce(DoAll(SaveArg<1>(&status), Trigger(&sched1StatusUpdateCall))); driver1.launchTasks(offers[0].id(), tasks); WAIT_UNTIL(sched1StatusUpdateCall); EXPECT_EQ(task.task_id(), status.task_id()); EXPECT_EQ(TASK_LOST, status.state()); EXPECT_TRUE(status.has_message()); EXPECT_EQ("Task uses invalid resources", status.message()); driver1.stop(); driver1.join(); MockScheduler sched2; MesosSchedulerDriver driver2(&sched2, "", DEFAULT_EXECUTOR_INFO, master); trigger sched2ResourceOffersCall; EXPECT_CALL(sched2, registered(&driver2, _)) .Times(1); EXPECT_CALL(sched2, resourceOffers(&driver2, _)) .WillOnce(Trigger(&sched2ResourceOffersCall)) .WillRepeatedly(Return()); EXPECT_CALL(sched2, offerRescinded(&driver2, _)) .Times(AtMost(1)); driver2.start(); WAIT_UNTIL(sched2ResourceOffersCall); driver2.stop(); driver2.join(); local::shutdown(); }