TEST(SorterTest, Update) { DRFSorter sorter; sorter.add("a"); sorter.add("b"); sorter.add(Resources::parse("cpus:10;mem:10;disk:10").get()); sorter.allocated("a", Resources::parse("cpus:10;mem:10;disk:10").get()); // Construct an offer operation. Resource volume = Resources::parse("disk", "5", "*").get(); volume.mutable_disk()->mutable_persistence()->set_id("ID"); volume.mutable_disk()->mutable_volume()->set_container_path("data"); Offer::Operation create; create.set_type(Offer::Operation::CREATE); create.mutable_create()->add_volumes()->CopyFrom(volume); // Compute the updated allocation. Resources allocation = sorter.allocation("a"); Try<Resources> newAllocation = allocation.apply(create); ASSERT_SOME(newAllocation); // Update the resources for the client. sorter.update("a", allocation, newAllocation.get()); EXPECT_EQ(newAllocation.get(), sorter.allocation("a")); }
// We aggregate resources from multiple slaves into the sorter. // Since non-scalar resources don't aggregate well across slaves, // we need to keep track of the SlaveIDs of the resources. This // tests that no resources vanish in the process of aggregation // by inspecting the result of 'allocation'. TEST(SorterTest, MultipleSlaves) { DRFSorter sorter; SlaveID slaveA; slaveA.set_value("agentA"); SlaveID slaveB; slaveB.set_value("agentB"); sorter.add("framework"); Resources slaveResources = Resources::parse("cpus:2;mem:512;ports:[31000-32000]").get(); sorter.add(slaveA, slaveResources); sorter.add(slaveB, slaveResources); sorter.allocated("framework", slaveA, slaveResources); sorter.allocated("framework", slaveB, slaveResources); EXPECT_EQ(2u, sorter.allocation("framework").size()); EXPECT_EQ(slaveResources, sorter.allocation("framework", slaveA)); EXPECT_EQ(slaveResources, sorter.allocation("framework", slaveB)); }
TEST(SorterTest, UpdateAllocation) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); sorter.add("a"); sorter.add("b"); sorter.add(slaveId, Resources::parse("cpus:10;mem:10;disk:10").get()); sorter.allocated( "a", slaveId, Resources::parse("cpus:10;mem:10;disk:10").get()); // Construct an offer operation. Resource volume = Resources::parse("disk", "5", "*").get(); volume.mutable_disk()->mutable_persistence()->set_id("ID"); volume.mutable_disk()->mutable_volume()->set_container_path("data"); // Compute the updated allocation. Resources oldAllocation = sorter.allocation("a", slaveId); Try<Resources> newAllocation = oldAllocation.apply(CREATE(volume)); ASSERT_SOME(newAllocation); // Update the resources for the client. sorter.update("a", slaveId, oldAllocation, newAllocation.get()); hashmap<SlaveID, Resources> allocation = sorter.allocation("a"); EXPECT_EQ(1u, allocation.size()); EXPECT_EQ(newAllocation.get(), allocation[slaveId]); EXPECT_EQ(newAllocation.get(), sorter.allocation("a", slaveId)); }
// We aggregate resources from multiple slaves into the sorter. Since // non-scalar resources don't aggregate well across slaves, we need to // keep track of the SlaveIDs of the resources. This tests that no // resources vanish in the process of aggregation by performing update // allocations from unreserved to reserved resources. TEST(SorterTest, MultipleSlavesUpdateAllocation) { DRFSorter sorter; SlaveID slaveA; slaveA.set_value("agentA"); SlaveID slaveB; slaveB.set_value("agentB"); sorter.add("framework"); Resources slaveResources = Resources::parse("cpus:2;mem:512;disk:10;ports:[31000-32000]").get(); sorter.add(slaveA, slaveResources); sorter.add(slaveB, slaveResources); sorter.allocated("framework", slaveA, slaveResources); sorter.allocated("framework", slaveB, slaveResources); // Construct an offer operation. Resource volume = Resources::parse("disk", "5", "*").get(); volume.mutable_disk()->mutable_persistence()->set_id("ID"); volume.mutable_disk()->mutable_volume()->set_container_path("data"); // Compute the updated allocation. Try<Resources> newAllocation = slaveResources.apply(CREATE(volume)); ASSERT_SOME(newAllocation); // Update the resources for the client. sorter.update("framework", slaveA, slaveResources, newAllocation.get()); sorter.update("framework", slaveB, slaveResources, newAllocation.get()); EXPECT_EQ(2u, sorter.allocation("framework").size()); EXPECT_EQ(newAllocation.get(), sorter.allocation("framework", slaveA)); EXPECT_EQ(newAllocation.get(), sorter.allocation("framework", slaveB)); }
// This test verifies that revocable resources are properly accounted // for in the DRF sorter. TEST(SorterTest, RevocableResources) { DRFSorter sorter; SlaveID slaveId; slaveId.set_value("agentId"); sorter.add("a"); sorter.add("b"); // Create a total resource pool of 10 revocable cpus and 10 cpus and // 10 MB mem. Resource revocable = Resources::parse("cpus", "10", "*").get(); revocable.mutable_revocable(); Resources total = Resources::parse("cpus:10;mem:100").get() + revocable; sorter.add(slaveId, revocable); // Dominant share of "a" is 0.1 (cpus). Resources a = Resources::parse("cpus:2;mem:1").get(); sorter.allocated("a", slaveId, a); // Dominant share of "b" is 0.5 (cpus). revocable = Resources::parse("cpus", "9", "*").get(); revocable.mutable_revocable(); Resources b = Resources::parse("cpus:1;mem:1").get() + revocable; sorter.allocated("b", slaveId, b); // Check that the allocations are correct. ASSERT_EQ(a, sorter.allocation("a", slaveId)); ASSERT_EQ(b, sorter.allocation("b", slaveId)); // Check that the sort is correct. list<string> sorted = sorter.sort(); ASSERT_EQ(2u, sorted.size()); EXPECT_EQ("a", sorted.front()); EXPECT_EQ("b", sorted.back()); }