// We aggregate resources from multiple slaves into the sorter. // Since non-scalar resources don't aggregate well across slaves, // we need to keep track of the SlaveIDs of the resources. This // tests that no resources vanish in the process of aggregation // by inspecting the result of 'allocation'. TEST(SorterTest, MultipleSlaves) { DRFSorter sorter; SlaveID slaveA; slaveA.set_value("agentA"); SlaveID slaveB; slaveB.set_value("agentB"); sorter.add("framework"); Resources slaveResources = Resources::parse("cpus:2;mem:512;ports:[31000-32000]").get(); sorter.add(slaveA, slaveResources); sorter.add(slaveB, slaveResources); sorter.allocated("framework", slaveA, slaveResources); sorter.allocated("framework", slaveB, slaveResources); EXPECT_EQ(2u, sorter.allocation("framework").size()); EXPECT_EQ(slaveResources, sorter.allocation("framework", slaveA)); EXPECT_EQ(slaveResources, sorter.allocation("framework", slaveB)); }
// Some resources are split across multiple resource objects (e.g. // persistent volumes). This test ensures that the shares for these // are accounted correctly. TEST(SorterTest, SplitResourceShares) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); sorter.add("a"); sorter.add("b"); Resource disk1 = Resources::parse("disk", "5", "*").get(); disk1.mutable_disk()->mutable_persistence()->set_id("ID2"); disk1.mutable_disk()->mutable_volume()->set_container_path("data"); Resource disk2 = Resources::parse("disk", "5", "*").get(); disk2.mutable_disk()->mutable_persistence()->set_id("ID2"); disk2.mutable_disk()->mutable_volume()->set_container_path("data"); sorter.add( slaveId, Resources::parse("cpus:100;mem:100;disk:95").get() + disk1 + disk2); // Now, allocate resources to "a" and "b". Note that "b" will have // more disk if the shares are accounted correctly! sorter.allocated( "a", slaveId, Resources::parse("cpus:9;mem:9;disk:9").get()); sorter.allocated( "b", slaveId, Resources::parse("cpus:9;mem:9").get() + disk1 + disk2); EXPECT_EQ(list<string>({"a", "b"}), sorter.sort()); }
TEST(SorterTest, UpdateAllocation) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); sorter.add("a"); sorter.add("b"); sorter.add(slaveId, Resources::parse("cpus:10;mem:10;disk:10").get()); sorter.allocated( "a", slaveId, Resources::parse("cpus:10;mem:10;disk:10").get()); // Construct an offer operation. Resource volume = Resources::parse("disk", "5", "*").get(); volume.mutable_disk()->mutable_persistence()->set_id("ID"); volume.mutable_disk()->mutable_volume()->set_container_path("data"); // Compute the updated allocation. Resources oldAllocation = sorter.allocation("a", slaveId); Try<Resources> newAllocation = oldAllocation.apply(CREATE(volume)); ASSERT_SOME(newAllocation); // Update the resources for the client. sorter.update("a", slaveId, oldAllocation, newAllocation.get()); hashmap<SlaveID, Resources> allocation = sorter.allocation("a"); EXPECT_EQ(1u, allocation.size()); EXPECT_EQ(newAllocation.get(), allocation[slaveId]); EXPECT_EQ(newAllocation.get(), sorter.allocation("a", slaveId)); }
TEST(SorterTest, WDRFSorterUpdateWeight) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); Resources totalResources = Resources::parse("cpus:100;mem:100").get(); sorter.add(slaveId, totalResources); sorter.add("a"); sorter.allocated("a", slaveId, Resources::parse("cpus:5;mem:5").get()); sorter.add("b"); sorter.allocated("b", slaveId, Resources::parse("cpus:6;mem:6").get()); // shares: a = .05, b = .06 EXPECT_EQ(list<string>({"a", "b"}), sorter.sort()); // Increase b's weight to flip the sort order. sorter.update("b", 2); // shares: a = .05, b = .03 EXPECT_EQ(list<string>({"b", "a"}), sorter.sort()); }
SlaveID devolve(const v1::AgentID& agentId) { // NOTE: Not using 'devolve<v1::AgentID, SlaveID>(agentId)' since // this will be a common 'devolve' call and we wanted to speed up // performance. SlaveID id; id.set_value(agentId.value()); return id; }
// Ensures that the driver can handle the FAILURE event. TEST_F(SchedulerDriverEventTest, Failure) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<Message> frameworkRegisteredMessage = FUTURE_MESSAGE(Eq(FrameworkRegisteredMessage().GetTypeName()), _, _); driver.start(); AWAIT_READY(frameworkRegisteredMessage); UPID frameworkPid = frameworkRegisteredMessage.get().to; // Send a failure for an executor, which should trigger executorLost callback. SlaveID slaveId; slaveId.set_value("S"); ExecutorID executorId = DEFAULT_EXECUTOR_ID; const int32_t status = 255; Event event; event.set_type(Event::FAILURE); event.mutable_failure()->mutable_slave_id()->CopyFrom(slaveId); event.mutable_failure()->mutable_executor_id()->CopyFrom(executorId); event.mutable_failure()->set_status(status); Future<Nothing> executorLost; EXPECT_CALL(sched, executorLost(&driver, executorId, slaveId, status)) .WillOnce(FutureSatisfy(&executorLost)); process::post(master.get()->pid, frameworkPid, event); AWAIT_READY(executorLost); // Now, post a failure for a slave and expect a 'slaveLost'. event.mutable_failure()->clear_executor_id(); Future<Nothing> slaveLost; EXPECT_CALL(sched, slaveLost(&driver, slaveId)) .WillOnce(FutureSatisfy(&slaveLost)); process::post(master.get()->pid, frameworkPid, event); AWAIT_READY(slaveLost); driver.stop(); driver.join(); }
// Similar to the above 'UpdateTotal' test, but tests the scenario // when there are multiple slaves. TEST(SorterTest, MultipleSlavesUpdateTotal) { DRFSorter sorter; SlaveID slaveA; slaveA.set_value("agentA"); SlaveID slaveB; slaveB.set_value("agentB"); sorter.add("a"); sorter.add("b"); sorter.add(slaveA, Resources::parse("cpus:5;mem:50").get()); sorter.add(slaveB, Resources::parse("cpus:5;mem:50").get()); // Dominant share of "a" is 0.2 (cpus). sorter.allocated( "a", slaveA, Resources::parse("cpus:2;mem:1").get()); // Dominant share of "b" is 0.1 (cpus). sorter.allocated( "b", slaveB, Resources::parse("cpus:1;mem:3").get()); list<string> sorted = sorter.sort(); ASSERT_EQ(2u, sorted.size()); EXPECT_EQ("b", sorted.front()); EXPECT_EQ("a", sorted.back()); // Update the total resources of slaveA by removing the previous // total and adding the new total. sorter.remove(slaveA, Resources::parse("cpus:5;mem:50").get()); sorter.add(slaveA, Resources::parse("cpus:95;mem:50").get()); // Now the dominant share of "a" is 0.02 (cpus) and "b" is 0.03 // (mem), which should change the sort order. sorted = sorter.sort(); ASSERT_EQ(2u, sorted.size()); EXPECT_EQ("a", sorted.front()); EXPECT_EQ("b", sorted.back()); }
TEST(TypeUtilsTest, TaskGroupEquality) { SlaveID slaveId; slaveId.set_value("default-agent"); Resources resources = Resources::parse("cpus:0.1;mem:32;disk:32").get(); TaskInfo task1 = createTask(slaveId, resources, "default-command1"); TaskInfo task2 = createTask(slaveId, resources, "default-command2"); // Compare task groups with identical tasks. { TaskGroupInfo taskGroup1; taskGroup1.add_tasks()->CopyFrom(task1); taskGroup1.add_tasks()->CopyFrom(task2); TaskGroupInfo taskGroup2; taskGroup2.add_tasks()->CopyFrom(task1); taskGroup2.add_tasks()->CopyFrom(task2); EXPECT_EQ(taskGroup1, taskGroup2); } // Compare task groups with identical tasks but ordered differently. { TaskGroupInfo taskGroup1; taskGroup1.add_tasks()->CopyFrom(task1); taskGroup1.add_tasks()->CopyFrom(task2); TaskGroupInfo taskGroup2; taskGroup2.add_tasks()->CopyFrom(task2); taskGroup2.add_tasks()->CopyFrom(task1); EXPECT_EQ(taskGroup1, taskGroup2); } // Compare task groups with unequal tasks. { TaskGroupInfo taskGroup1; taskGroup1.add_tasks()->CopyFrom(task1); taskGroup1.add_tasks()->CopyFrom(task2); TaskGroupInfo taskGroup2; taskGroup2.add_tasks()->CopyFrom(task1); EXPECT_FALSE(taskGroup1 == taskGroup2); } }
// We aggregate resources from multiple slaves into the sorter. Since // non-scalar resources don't aggregate well across slaves, we need to // keep track of the SlaveIDs of the resources. This tests that no // resources vanish in the process of aggregation by performing update // allocations from unreserved to reserved resources. TEST(SorterTest, MultipleSlavesUpdateAllocation) { DRFSorter sorter; SlaveID slaveA; slaveA.set_value("agentA"); SlaveID slaveB; slaveB.set_value("agentB"); sorter.add("framework"); Resources slaveResources = Resources::parse("cpus:2;mem:512;disk:10;ports:[31000-32000]").get(); sorter.add(slaveA, slaveResources); sorter.add(slaveB, slaveResources); sorter.allocated("framework", slaveA, slaveResources); sorter.allocated("framework", slaveB, slaveResources); // Construct an offer operation. Resource volume = Resources::parse("disk", "5", "*").get(); volume.mutable_disk()->mutable_persistence()->set_id("ID"); volume.mutable_disk()->mutable_volume()->set_container_path("data"); // Compute the updated allocation. Try<Resources> newAllocation = slaveResources.apply(CREATE(volume)); ASSERT_SOME(newAllocation); // Update the resources for the client. sorter.update("framework", slaveA, slaveResources, newAllocation.get()); sorter.update("framework", slaveB, slaveResources, newAllocation.get()); EXPECT_EQ(2u, sorter.allocation("framework").size()); EXPECT_EQ(newAllocation.get(), sorter.allocation("framework", slaveA)); EXPECT_EQ(newAllocation.get(), sorter.allocation("framework", slaveB)); }
TEST(SorterTest, WDRFSorter) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); sorter.add(slaveId, Resources::parse("cpus:100;mem:100").get()); sorter.add("a"); sorter.allocated("a", slaveId, Resources::parse("cpus:5;mem:5").get()); sorter.add("b", 2); sorter.allocated("b", slaveId, Resources::parse("cpus:6;mem:6").get()); // shares: a = .05, b = .03 EXPECT_EQ(list<string>({"b", "a"}), sorter.sort()); sorter.add("c"); sorter.allocated("c", slaveId, Resources::parse("cpus:4;mem:4").get()); // shares: a = .05, b = .03, c = .04 EXPECT_EQ(list<string>({"b", "c", "a"}), sorter.sort()); sorter.add("d", 10); sorter.allocated("d", slaveId, Resources::parse("cpus:10;mem:20").get()); // shares: a = .05, b = .03, c = .04, d = .02 EXPECT_EQ(list<string>({"d", "b", "c", "a"}), sorter.sort()); sorter.remove("b"); EXPECT_EQ(list<string>({"d", "c", "a"}), sorter.sort()); sorter.allocated("d", slaveId, Resources::parse("cpus:10;mem:25").get()); // shares: a = .05, c = .04, d = .045 EXPECT_EQ(list<string>({"c", "d", "a"}), sorter.sort()); sorter.add("e", .1); sorter.allocated("e", slaveId, Resources::parse("cpus:1;mem:1").get()); // shares: a = .05, c = .04, d = .045, e = .1 EXPECT_EQ(list<string>({"c", "d", "a", "e"}), sorter.sort()); sorter.remove("a"); EXPECT_EQ(list<string>({"c", "d", "e"}), sorter.sort()); }
// This test measures the performance of the `master::call::GetState` // v1 api (and also measures master v0 '/state' endpoint as the // baseline). We set up a lot of master state from artificial agents // similar to the master failover benchmark. TEST_P(MasterStateQuery_BENCHMARK_Test, GetState) { size_t agentCount; size_t frameworksPerAgent; size_t tasksPerFramework; size_t completedFrameworksPerAgent; size_t tasksPerCompletedFramework; tie(agentCount, frameworksPerAgent, tasksPerFramework, completedFrameworksPerAgent, tasksPerCompletedFramework) = GetParam(); // Disable authentication to avoid the overhead, since we don't care about // it in this test. master::Flags masterFlags = CreateMasterFlags(); masterFlags.authenticate_agents = false; Try<Owned<cluster::Master>> master = StartMaster(masterFlags); ASSERT_SOME(master); vector<Owned<TestSlave>> slaves; for (size_t i = 0; i < agentCount; i++) { SlaveID slaveId; slaveId.set_value("agent" + stringify(i)); slaves.push_back(Owned<TestSlave>(new TestSlave( master.get()->pid, slaveId, frameworksPerAgent, tasksPerFramework, completedFrameworksPerAgent, tasksPerCompletedFramework))); } cout << "Test setup: " << agentCount << " agents with a total of " << frameworksPerAgent * tasksPerFramework * agentCount << " running tasks and " << completedFrameworksPerAgent * tasksPerCompletedFramework * agentCount << " completed tasks" << endl; list<Future<Nothing>> reregistered; foreach (const Owned<TestSlave>& slave, slaves) { reregistered.push_back(slave->reregister()); }
// Ensures that the driver can handle the FAILURE event. TEST_F(SchedulerDriverEventTest, Failure) { Try<PID<Master>> master = StartMaster(); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<Message> frameworkRegisteredMessage = FUTURE_MESSAGE(Eq(FrameworkRegisteredMessage().GetTypeName()), _, _); driver.start(); AWAIT_READY(frameworkRegisteredMessage); UPID frameworkPid = frameworkRegisteredMessage.get().to; // Send a failure for an executor, this should be dropped // to match the existing behavior of the scheduler driver. SlaveID slaveId; slaveId.set_value("S"); Event event; event.set_type(Event::FAILURE); event.mutable_failure()->mutable_slave_id()->CopyFrom(slaveId); event.mutable_failure()->mutable_executor_id()->set_value("E"); process::post(master.get(), frameworkPid, event); // Now, post a failure for a slave and expect a 'slaveLost'. event.mutable_failure()->clear_executor_id(); Future<Nothing> slaveLost; EXPECT_CALL(sched, slaveLost(&driver, slaveId)) .WillOnce(FutureSatisfy(&slaveLost)); process::post(master.get(), frameworkPid, event); AWAIT_READY(slaveLost); }
// This test verifies that revocable resources are properly accounted // for in the DRF sorter. TEST(SorterTest, RevocableResources) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); sorter.add("a"); sorter.add("b"); // Create a total resource pool of 10 revocable cpus and 10 cpus and // 10 MB mem. Resource revocable = Resources::parse("cpus", "10", "*").get(); revocable.mutable_revocable(); Resources total = Resources::parse("cpus:10;mem:100").get() + revocable; sorter.add(slaveId, revocable); // Dominant share of "a" is 0.1 (cpus). Resources a = Resources::parse("cpus:2;mem:1").get(); sorter.allocated("a", slaveId, a); // Dominant share of "b" is 0.5 (cpus). revocable = Resources::parse("cpus", "9", "*").get(); revocable.mutable_revocable(); Resources b = Resources::parse("cpus:1;mem:1").get() + revocable; sorter.allocated("b", slaveId, b); // Check that the allocations are correct. ASSERT_EQ(a, sorter.allocation("a", slaveId)); ASSERT_EQ(b, sorter.allocation("b", slaveId)); // Check that the sort is correct. list<string> sorted = sorter.sort(); ASSERT_EQ(2u, sorted.size()); EXPECT_EQ("a", sorted.front()); EXPECT_EQ("b", sorted.back()); }
// This test verifies that when the total pool of resources is updated // the sorting order of clients reflects the new total. TEST(SorterTest, UpdateTotal) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); sorter.add("a"); sorter.add("b"); sorter.add(slaveId, Resources::parse("cpus:10;mem:100").get()); // Dominant share of "a" is 0.2 (cpus). sorter.allocated( "a", slaveId, Resources::parse("cpus:2;mem:1").get()); // Dominant share of "b" is 0.1 (cpus). sorter.allocated( "b", slaveId, Resources::parse("cpus:1;mem:2").get()); list<string> sorted = sorter.sort(); ASSERT_EQ(2u, sorted.size()); EXPECT_EQ("b", sorted.front()); EXPECT_EQ("a", sorted.back()); // Update the total resources by removing the previous total and // adding back the new total. sorter.remove(slaveId, Resources::parse("cpus:10;mem:100").get()); sorter.add(slaveId, Resources::parse("cpus:100;mem:10").get()); // Now the dominant share of "a" is 0.1 (mem) and "b" is 0.2 (mem), // which should change the sort order. sorted = sorter.sort(); ASSERT_EQ(2u, sorted.size()); EXPECT_EQ("a", sorted.front()); EXPECT_EQ("b", sorted.back()); }
TEST_F(RegistrarTest, recover) { Registrar registrar(state); SlaveInfo slave; slave.set_hostname("localhost"); SlaveID id; id.set_value("1"); slave.mutable_id()->CopyFrom(id); // Operations preceding recovery will fail. AWAIT_EXPECT_FAILED(registrar.admit(slave)); AWAIT_EXPECT_FAILED(registrar.readmit(slave)); AWAIT_EXPECT_FAILED(registrar.remove(slave)); MasterInfo info; info.set_id("foobar"); info.set_ip(0); info.set_port(5050); info.set_pid("0:5050"); Future<Registry> registry = registrar.recover(info); // Before waiting for the recovery to complete, invoke some // operations to ensure they do not fail. Future<bool> admit = registrar.admit(slave); Future<bool> readmit = registrar.readmit(slave); Future<bool> remove = registrar.remove(slave); AWAIT_READY(registry); EXPECT_EQ(info, registry.get().master().info()); AWAIT_EQ(true, admit); AWAIT_EQ(true, readmit); AWAIT_EQ(true, remove); }
// Ensures the scheduler driver can handle the UPDATE event. TEST_F(SchedulerDriverEventTest, Update) { Try<PID<Master>> master = StartMaster(); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<Message> frameworkRegisteredMessage = FUTURE_MESSAGE(Eq(FrameworkRegisteredMessage().GetTypeName()), _, _); driver.start(); AWAIT_READY(frameworkRegisteredMessage); UPID frameworkPid = frameworkRegisteredMessage.get().to; FrameworkRegisteredMessage message; ASSERT_TRUE(message.ParseFromString(frameworkRegisteredMessage.get().body)); FrameworkID frameworkId = message.framework_id(); SlaveID slaveId; slaveId.set_value("S"); TaskID taskId; taskId.set_value("T"); ExecutorID executorId; executorId.set_value("E"); // Generate an update that needs no acknowledgement. Event event; event.set_type(Event::UPDATE); event.mutable_update()->mutable_status()->CopyFrom( protobuf::createStatusUpdate( frameworkId, slaveId, taskId, TASK_RUNNING, TaskStatus::SOURCE_MASTER, None(), "message", None(), executorId).status()); Future<Nothing> statusUpdate; Future<Nothing> statusUpdate2; EXPECT_CALL(sched, statusUpdate(&driver, event.update().status())) .WillOnce(FutureSatisfy(&statusUpdate)) .WillOnce(FutureSatisfy(&statusUpdate2)); process::post(master.get(), frameworkPid, event); AWAIT_READY(statusUpdate); // Generate an update that requires acknowledgement. event.mutable_update()->mutable_status()->set_uuid(UUID::random().toBytes()); Future<mesos::scheduler::Call> acknowledgement = DROP_CALL( mesos::scheduler::Call(), mesos::scheduler::Call::ACKNOWLEDGE, _, _); process::post(master.get(), frameworkPid, event); AWAIT_READY(statusUpdate2); AWAIT_READY(acknowledgement); }
TEST(SorterTest, DRFSorter) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); Resources totalResources = Resources::parse("cpus:100;mem:100").get(); sorter.add(slaveId, totalResources); sorter.add("a"); Resources aResources = Resources::parse("cpus:5;mem:5").get(); sorter.allocated("a", slaveId, aResources); Resources bResources = Resources::parse("cpus:6;mem:6").get(); sorter.add("b"); sorter.allocated("b", slaveId, bResources); // shares: a = .05, b = .06 EXPECT_EQ(list<string>({"a", "b"}), sorter.sort()); Resources cResources = Resources::parse("cpus:1;mem:1").get(); sorter.add("c"); sorter.allocated("c", slaveId, cResources); Resources dResources = Resources::parse("cpus:3;mem:1").get(); sorter.add("d"); sorter.allocated("d", slaveId, dResources); // shares: a = .05, b = .06, c = .01, d = .03 EXPECT_EQ(list<string>({"c", "d", "a", "b"}), sorter.sort()); sorter.remove("a"); Resources bUnallocated = Resources::parse("cpus:4;mem:4").get(); sorter.unallocated("b", slaveId, bUnallocated); // shares: b = .02, c = .01, d = .03 EXPECT_EQ(list<string>({"c", "b", "d"}), sorter.sort()); Resources eResources = Resources::parse("cpus:1;mem:5").get(); sorter.add("e"); sorter.allocated("e", slaveId, eResources); Resources removedResources = Resources::parse("cpus:50;mem:0").get(); sorter.remove(slaveId, removedResources); // total resources is now cpus = 50, mem = 100 // shares: b = .04, c = .02, d = .06, e = .05 EXPECT_EQ(list<string>({"c", "b", "e", "d"}), sorter.sort()); Resources addedResources = Resources::parse("cpus:0;mem:100").get(); sorter.add(slaveId, addedResources); // total resources is now cpus = 50, mem = 200 Resources fResources = Resources::parse("cpus:5;mem:1").get(); sorter.add("f"); sorter.allocated("f", slaveId, fResources); Resources cResources2 = Resources::parse("cpus:0;mem:15").get(); sorter.allocated("c", slaveId, cResources2); // shares: b = .04, c = .08, d = .06, e = .025, f = .1 EXPECT_EQ(list<string>({"e", "b", "d", "c", "f"}), sorter.sort()); EXPECT_TRUE(sorter.contains("b")); EXPECT_FALSE(sorter.contains("a")); EXPECT_EQ(5, sorter.count()); sorter.deactivate("d"); EXPECT_TRUE(sorter.contains("d")); EXPECT_EQ(list<string>({"e", "b", "c", "f"}), sorter.sort()); EXPECT_EQ(5, sorter.count()); sorter.activate("d"); EXPECT_EQ(list<string>({"e", "b", "d", "c", "f"}), sorter.sort()); }
void CephSchedulerAgent<T>::resourceOffers( T* driver, const vector<Offer>& offers) { LOG(INFO) << "Received " << offers.size() << " offers! "; TaskType taskType; int token; int isInitialMonNode = 0; //handle waiting OSD task, give them osdID to start docker handleWaitingOSDTasks(driver); Phase currentPhase = stateMachine->getCurrentPhase(); //try start new node foreach (const Offer& offer, offers) { //check offer with the correct role LOG(INFO) << "Hostname: " << offer.hostname(); if (!hasRole(offer, config->role)) { LOG(INFO) << "Decline this offer. Host " << offer.hostname() << " don't have correct role:" << config->role; Filters refuse; refuse.set_refuse_seconds(86400.0); driver->declineOffer(offer.id(),refuse); continue; } //reload or new hostconfig stateMachine->addConfig(offer.hostname()); tryLaunchDiskTask(driver, offer, offer.hostname()); bool accept = stateMachine->nextMove(taskType,token,offer.hostname()); if (!accept) { LOG(INFO) << "In the " << static_cast<int>(currentPhase) << " Staging Phase, cannot accept offer from " << offer.hostname() << " in this phase"; driver->declineOffer(offer.id()); continue; } LOG(INFO) << "Check offer's resources from " <<offer.hostname(); if (offerNotEnoughResources(offer,taskType)) { LOG(INFO) << "Not enough, decline it from " << offer.hostname(); driver->declineOffer(offer.id()); continue; } if (currentPhase == Phase::WAINTING_REQUEST){ accept = fetchPendingRESTfulRequest(); if (!accept){ LOG(INFO) << "No pending OSD RESTful request."; driver->declineOffer(offer.id()); stateMachine->decreaseOSDIndex(); continue; } } LOG(INFO) << "Accepted offer from" << offer.hostname() << ", launch " << static_cast<int>(taskType) <<":" << token << " node"; if (taskType == TaskType::MON && token == 0) { LOG(INFO) << "This is the initial MON"; isInitialMonNode = 1; } string taskId; string executorId; launchNode( driver, offer, taskType, token, isInitialMonNode, taskId, executorId); stateMachine->addStagingTask( taskId, executorId, taskType, offer.hostname(), offer.slave_id().value()); if (!isInitialMonNode && taskType == TaskType::OSD) { ceph::TaskState initialMon = stateMachine->getInitialMon(); const string m = lexical_cast<string>(static_cast<int>(MessageToExecutor::REGISTER_OSD)); ExecutorID eId; eId.set_value(initialMon.executorId); SlaveID sId; sId.set_value(initialMon.slaveId); driver->sendFrameworkMessage( eId, sId, m); }//end if }//end foreach
// This test ensures we don't break the API when it comes to JSON // representation of tasks. Also, we want to ensure that tasks are // modeled the same way when using 'Task' vs. 'TaskInfo'. TEST(HTTP, ModelTask) { TaskID taskId; taskId.set_value("t"); SlaveID slaveId; slaveId.set_value("s"); ExecutorID executorId; executorId.set_value("t"); FrameworkID frameworkId; frameworkId.set_value("f"); TaskState state = TASK_RUNNING; vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->CopyFrom(taskId); status.set_state(state); status.mutable_slave_id()->CopyFrom(slaveId); status.mutable_executor_id()->CopyFrom(executorId); status.set_timestamp(0.0); statuses.push_back(status); TaskInfo task; task.set_name("task"); task.mutable_task_id()->CopyFrom(taskId); task.mutable_slave_id()->CopyFrom(slaveId); task.mutable_command()->set_value("echo hello"); Task task_ = protobuf::createTask(task, state, frameworkId); task_.add_statuses()->CopyFrom(statuses[0]); JSON::Value object = model(task, frameworkId, state, statuses); JSON::Value object_ = model(task_); Try<JSON::Value> expected = JSON::parse( "{" " \"executor_id\":\"\"," " \"framework_id\":\"f\"," " \"id\":\"t\"," " \"name\":\"task\"," " \"resources\":" " {" " \"cpus\":0," " \"disk\":0," " \"mem\":0" " }," " \"slave_id\":\"s\"," " \"state\":\"TASK_RUNNING\"," " \"statuses\":" " [" " {" " \"state\":\"TASK_RUNNING\"," " \"timestamp\":0" " }" " ]" "}"); ASSERT_SOME(expected); EXPECT_EQ(expected.get(), object); EXPECT_EQ(expected.get(), object_); // Ensure both are modeled the same. EXPECT_EQ(object, object_); }
// This test ensures we don't break the API when it comes to JSON // representation of tasks. TEST(HTTPTest, ModelTask) { TaskID taskId; taskId.set_value("t"); SlaveID slaveId; slaveId.set_value("s"); ExecutorID executorId; executorId.set_value("t"); FrameworkID frameworkId; frameworkId.set_value("f"); TaskState state = TASK_RUNNING; vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->CopyFrom(taskId); status.set_state(state); status.mutable_slave_id()->CopyFrom(slaveId); status.mutable_executor_id()->CopyFrom(executorId); status.set_timestamp(0.0); statuses.push_back(status); Labels labels; labels.add_labels()->CopyFrom(createLabel("ACTION", "port:7987 DENY")); Ports ports; Port* port = ports.add_ports(); port->set_number(80); port->mutable_labels()->CopyFrom(labels); DiscoveryInfo discovery; discovery.set_visibility(DiscoveryInfo::CLUSTER); discovery.set_name("discover"); discovery.mutable_ports()->CopyFrom(ports); TaskInfo taskInfo; taskInfo.set_name("task"); taskInfo.mutable_task_id()->CopyFrom(taskId); taskInfo.mutable_slave_id()->CopyFrom(slaveId); taskInfo.mutable_command()->set_value("echo hello"); taskInfo.mutable_discovery()->CopyFrom(discovery); Task task = createTask(taskInfo, state, frameworkId); task.add_statuses()->CopyFrom(statuses[0]); JSON::Value object = model(task); Try<JSON::Value> expected = JSON::parse( "{" " \"executor_id\":\"\"," " \"framework_id\":\"f\"," " \"id\":\"t\"," " \"name\":\"task\"," " \"resources\":" " {" " \"cpus\":0," " \"disk\":0," " \"gpus\":0," " \"mem\":0" " }," " \"slave_id\":\"s\"," " \"state\":\"TASK_RUNNING\"," " \"statuses\":" " [" " {" " \"state\":\"TASK_RUNNING\"," " \"timestamp\":0" " }" " ]," " \"discovery\":" " {" " \"name\":\"discover\"," " \"ports\":" " {" " \"ports\":" " [" " {" " \"number\":80," " \"labels\":" " {" " \"labels\":" " [" " {" " \"key\":\"ACTION\"," " \"value\":\"port:7987 DENY\"" " }" " ]" " }" " }" " ]" " }," " \"visibility\":\"CLUSTER\"" " }" "}"); ASSERT_SOME(expected); EXPECT_EQ(expected.get(), object); }