TEST_F(ProvisionerPathTest, ListProvisionerContainers) { ContainerID child1; // parent1/child1 ContainerID child2; // parent1/child2 ContainerID child3; // parent2/child3 ContainerID parent1; // parent1 ContainerID parent2; // parent2 child1.set_value("child1"); child1.mutable_parent()->set_value("parent1"); child2.set_value("child2"); child2.mutable_parent()->set_value("parent1"); child3.set_value("child3"); child3.mutable_parent()->set_value("parent2"); parent1.set_value("parent1"); parent2.set_value("parent2"); const string provisionerDir = os::getcwd(); const string containerDir1 = paths::getContainerDir(provisionerDir, child1); const string containerDir2 = paths::getContainerDir(provisionerDir, child2); const string containerDir3 = paths::getContainerDir(provisionerDir, child3); ASSERT_SOME(os::mkdir(containerDir1)); ASSERT_SOME(os::mkdir(containerDir2)); ASSERT_SOME(os::mkdir(containerDir3)); Try<hashset<ContainerID>> containerIds = paths::listContainers(provisionerDir); ASSERT_SOME(containerIds); EXPECT_TRUE(containerIds->contains(parent1)); EXPECT_TRUE(containerIds->contains(parent2)); EXPECT_TRUE(containerIds->contains(child1)); EXPECT_TRUE(containerIds->contains(child2)); EXPECT_TRUE(containerIds->contains(child3)); EXPECT_EQ(5u, containerIds->size()); }
TEST(Ldd, BinSh) { Try<vector<ldcache::Entry>> cache = ldcache::parse(); ASSERT_SOME(cache); Try<hashset<string>> dependencies = ldd("/bin/sh", cache.get()); ASSERT_SOME(dependencies); EXPECT_FALSE(dependencies->contains("/bin/sh")); auto libc = std::find_if( dependencies->begin(), dependencies->end(), [](const string& dependency) { // On most Linux systems, libc would be in libc.so.6, but // checking the unversioned prefix is robust and is enough // to know that ldd() worked. string basename = Path(dependency).basename(); return strings::startsWith(basename, "libc.so"); }); EXPECT_TRUE(libc != dependencies->end()); }
// This test verifies that a provisioner can recover the rootfs // provisioned by a previous provisioner and then destroy it. Note // that we use the copy backend in this test so Linux is not required. TEST_F(ProvisionerAppcTest, Recover) { // Create provisioner. slave::Flags flags; flags.image_providers = "APPC"; flags.appc_store_dir = path::join(os::getcwd(), "store"); flags.image_provisioner_backend = "copy"; flags.work_dir = "work_dir"; Fetcher fetcher; Try<Owned<Provisioner>> provisioner1 = Provisioner::create(flags, &fetcher); ASSERT_SOME(provisioner1); // Create a simple image in the store: // <store> // |--images // |--<id> // |--manifest // |--rootfs/tmp/test JSON::Value manifest = JSON::parse( "{" " \"acKind\": \"ImageManifest\"," " \"acVersion\": \"0.6.1\"," " \"name\": \"foo.com/bar\"" "}").get(); // The 'imageId' below has the correct format but it's not computed // by hashing the tarball of the image. It's OK here as we assume // the images under 'images' have passed such check when they are // downloaded and validated. string imageId = "sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e" "797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0"; string imagePath = path::join(flags.appc_store_dir, "images", imageId); ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp"))); ASSERT_SOME( os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test")); ASSERT_SOME( os::write(path::join(imagePath, "manifest"), stringify(manifest))); // Recover. This is when the image in the store is loaded. AWAIT_READY(provisioner1.get()->recover({}, {})); Image image; image.mutable_appc()->set_name("foo.com/bar"); ContainerID containerId; containerId.set_value(UUID::random().toString()); Future<string> rootfs = provisioner1.get()->provision(containerId, image); AWAIT_READY(rootfs); // Create a new provisioner to recover the state from the container. Try<Owned<Provisioner>> provisioner2 = Provisioner::create(flags, &fetcher); ASSERT_SOME(provisioner2); mesos::slave::ContainerState state; // Here we are using an ExecutorInfo in the ContainerState without a // ContainerInfo. This is the situation where the Image is specified // via --default_container_info so it's not part of the recovered // ExecutorInfo. state.mutable_container_id()->CopyFrom(containerId); AWAIT_READY(provisioner2.get()->recover({state}, {})); // It's possible for the user to provision two different rootfses // from the same image. AWAIT_READY(provisioner2.get()->provision(containerId, image)); string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir); string containerDir = slave::provisioner::paths::getContainerDir( provisionerDir, containerId); Try<hashmap<string, hashset<string>>> rootfses = slave::provisioner::paths::listContainerRootfses( provisionerDir, containerId); ASSERT_SOME(rootfses); // Verify that the rootfs is successfully provisioned. ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend)); EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size()); Future<bool> destroy = provisioner2.get()->destroy(containerId); AWAIT_READY(destroy); EXPECT_TRUE(destroy.get()); // The container directory is successfully cleaned up. EXPECT_FALSE(os::exists(containerDir)); }
// This test verifies that the provisioner can provision an rootfs // from an image that is already put into the store directory. TEST_F(ProvisionerAppcTest, ROOT_Provision) { // Create provisioner. slave::Flags flags; flags.image_providers = "APPC"; flags.appc_store_dir = path::join(os::getcwd(), "store"); flags.image_provisioner_backend = "bind"; flags.work_dir = "work_dir"; Fetcher fetcher; Try<Owned<Provisioner>> provisioner = Provisioner::create(flags, &fetcher); ASSERT_SOME(provisioner); // Create a simple image in the store: // <store> // |--images // |--<id> // |--manifest // |--rootfs/tmp/test JSON::Value manifest = JSON::parse( "{" " \"acKind\": \"ImageManifest\"," " \"acVersion\": \"0.6.1\"," " \"name\": \"foo.com/bar\"," " \"labels\": [" " {" " \"name\": \"version\"," " \"value\": \"1.0.0\"" " }," " {" " \"name\": \"arch\"," " \"value\": \"amd64\"" " }," " {" " \"name\": \"os\"," " \"value\": \"linux\"" " }" " ]," " \"annotations\": [" " {" " \"name\": \"created\"," " \"value\": \"1438983392\"" " }" " ]" "}").get(); // The 'imageId' below has the correct format but it's not computed // by hashing the tarball of the image. It's OK here as we assume // the images under 'images' have passed such check when they are // downloaded and validated. string imageId = "sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e" "797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0"; string imagePath = path::join(flags.appc_store_dir, "images", imageId); ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp"))); ASSERT_SOME( os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test")); ASSERT_SOME( os::write(path::join(imagePath, "manifest"), stringify(manifest))); // Recover. This is when the image in the store is loaded. AWAIT_READY(provisioner.get()->recover({}, {})); // Simulate a task that requires an image. Image image; image.mutable_appc()->set_name("foo.com/bar"); ContainerID containerId; containerId.set_value("12345"); Future<string> rootfs = provisioner.get()->provision(containerId, image); AWAIT_READY(rootfs); string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir); string containerDir = slave::provisioner::paths::getContainerDir( provisionerDir, containerId); Try<hashmap<string, hashset<string>>> rootfses = slave::provisioner::paths::listContainerRootfses( provisionerDir, containerId); ASSERT_SOME(rootfses); // Verify that the rootfs is successfully provisioned. ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend)); ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size()); EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(), Path(rootfs.get()).basename()); Future<bool> destroy = provisioner.get()->destroy(containerId); AWAIT_READY(destroy); // One rootfs is destroyed. EXPECT_TRUE(destroy.get()); // The container directory is successfully cleaned up. EXPECT_FALSE(os::exists(containerDir)); }
// Test that simultaneous requests on a single endpoint for two // different principals return different results. TEST_F(MasterLoadTest, Principals) { // Set up a proper authorizer for this test. master::Flags flags = CreateMasterFlags(); { // Default principal is allowed to view frameworks. mesos::ACL::ViewFramework* acl = flags.acls->add_view_frameworks(); acl->mutable_principals()->add_values(DEFAULT_CREDENTIAL.principal()); acl->mutable_users()->set_type(mesos::ACL::Entity::ANY); } { // Default principal 2 is not allowed to view frameworks. mesos::ACL::ViewFramework* acl = flags.acls->add_view_frameworks(); acl->mutable_principals()->add_values(DEFAULT_CREDENTIAL_2.principal()); acl->mutable_users()->set_type(mesos::ACL::Entity::NONE); } Authorizer* localAuthorizer = Authorizer::create(flags.acls.get()).get(); prepareCluster(localAuthorizer); // Set up the requests with correct principals. RequestDescriptor descriptor1; descriptor1.endpoint = "/frameworks"; descriptor1.principal = DEFAULT_CREDENTIAL.principal(); descriptor1.headers = createBasicAuthHeaders(DEFAULT_CREDENTIAL); RequestDescriptor descriptor2 = descriptor1; descriptor2.principal = DEFAULT_CREDENTIAL_2.principal(); descriptor2.headers = createBasicAuthHeaders(DEFAULT_CREDENTIAL_2); auto responses = launchSimultaneousRequests({descriptor1, descriptor2}); JSON::Value expected = JSON::parse( "{" "\"frameworks\": [{" "\"id\": \"" + stringify(frameworkId_) + "\"" "}]" "}" ).get(); foreachpair ( const RequestDescriptor& request, Future<Response>& response, responses) { AWAIT_READY(response); Try<JSON::Value> jsonResponse = JSON::parse(response->body); ASSERT_SOME(jsonResponse); if (request.principal == DEFAULT_CREDENTIAL.principal()) { EXPECT_TRUE(jsonResponse->contains(expected)) << "Principal " << request.principal << " got HTTP response: " << response->body; } else { EXPECT_FALSE(jsonResponse->contains(expected)) << "Principal " << request.principal << " got HTTP response: " << response->body; } }
// This test verifies that a provisioner can recover the rootfs // provisioned by a previous provisioner and then destroy it. Note // that we use the copy backend in this test so Linux is not required. TEST_F(ProvisionerAppcTest, Recover) { // Create provisioner. slave::Flags flags; flags.image_providers = "APPC"; flags.appc_store_dir = path::join(os::getcwd(), "store"); flags.image_provisioner_backend = "copy"; flags.work_dir = "work_dir"; Try<Owned<Provisioner>> provisioner1 = Provisioner::create(flags); ASSERT_SOME(provisioner1); Try<string> createImage = createTestImage( flags.appc_store_dir, getManifest()); ASSERT_SOME(createImage); // Recover. This is when the image in the store is loaded. AWAIT_READY(provisioner1.get()->recover({}, {})); Image image; image.mutable_appc()->CopyFrom(getTestImage()); ContainerID containerId; containerId.set_value(UUID::random().toString()); Future<slave::ProvisionInfo> provisionInfo = provisioner1.get()->provision(containerId, image); AWAIT_READY(provisionInfo); // Create a new provisioner to recover the state from the container. Try<Owned<Provisioner>> provisioner2 = Provisioner::create(flags); ASSERT_SOME(provisioner2); mesos::slave::ContainerState state; // Here we are using an ExecutorInfo in the ContainerState without a // ContainerInfo. This is the situation where the Image is specified // via --default_container_info so it's not part of the recovered // ExecutorInfo. state.mutable_container_id()->CopyFrom(containerId); AWAIT_READY(provisioner2.get()->recover({state}, {})); // It's possible for the user to provision two different rootfses // from the same image. AWAIT_READY(provisioner2.get()->provision(containerId, image)); string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir); string containerDir = slave::provisioner::paths::getContainerDir( provisionerDir, containerId); Try<hashmap<string, hashset<string>>> rootfses = slave::provisioner::paths::listContainerRootfses( provisionerDir, containerId); ASSERT_SOME(rootfses); // Verify that the rootfs is successfully provisioned. ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend)); EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size()); Future<bool> destroy = provisioner2.get()->destroy(containerId); AWAIT_READY(destroy); EXPECT_TRUE(destroy.get()); // The container directory is successfully cleaned up. EXPECT_FALSE(os::exists(containerDir)); }
// This test verifies that the provisioner can provision an rootfs // from an image that is already put into the store directory. TEST_F(ProvisionerAppcTest, ROOT_Provision) { // Create provisioner. slave::Flags flags; flags.image_providers = "APPC"; flags.appc_store_dir = path::join(os::getcwd(), "store"); flags.image_provisioner_backend = "bind"; flags.work_dir = "work_dir"; Try<Owned<Provisioner>> provisioner = Provisioner::create(flags); ASSERT_SOME(provisioner); Try<string> createImage = createTestImage( flags.appc_store_dir, getManifest()); ASSERT_SOME(createImage); // Recover. This is when the image in the store is loaded. AWAIT_READY(provisioner.get()->recover({}, {})); // Simulate a task that requires an image. Image image; image.mutable_appc()->CopyFrom(getTestImage()); ContainerID containerId; containerId.set_value("12345"); Future<slave::ProvisionInfo> provisionInfo = provisioner.get()->provision(containerId, image); AWAIT_READY(provisionInfo); string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir); string containerDir = slave::provisioner::paths::getContainerDir( provisionerDir, containerId); Try<hashmap<string, hashset<string>>> rootfses = slave::provisioner::paths::listContainerRootfses( provisionerDir, containerId); ASSERT_SOME(rootfses); // Verify that the rootfs is successfully provisioned. ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend)); ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size()); EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(), Path(provisionInfo.get().rootfs).basename()); Future<bool> destroy = provisioner.get()->destroy(containerId); AWAIT_READY(destroy); // One rootfs is destroyed. EXPECT_TRUE(destroy.get()); // The container directory is successfully cleaned up. EXPECT_FALSE(os::exists(containerDir)); }
TEST_F(OsTest, Killtree) { Try<ProcessTree> tree = Fork(&dosetsid, // Child. Fork(None(), // Grandchild. Fork(None(), // Great-grandchild. Fork(&dosetsid, // Great-great-granchild. Exec("sleep 10")), Exec("sleep 10")), Exec("exit 0")), Exec("sleep 10"))(); ASSERT_SOME(tree); // The process tree we instantiate initially looks like this: // // -+- child sleep 10 // \-+- grandchild exit 0 // \-+- greatGrandchild sleep 10 // \--- greatGreatGrandchild sleep 10 // // But becomes two process trees after the grandchild exits: // // -+- child sleep 10 // \--- grandchild (exit 0) // // -+- greatGrandchild sleep 10 // \--- greatGreatGrandchild sleep 10 // Grab the pids from the instantiated process tree. ASSERT_EQ(1u, tree.get().children.size()); ASSERT_EQ(1u, tree.get().children.front().children.size()); ASSERT_EQ(1u, tree.get().children.front().children.front().children.size()); pid_t child = tree.get(); pid_t grandchild = tree.get().children.front(); pid_t greatGrandchild = tree.get().children.front().children.front(); pid_t greatGreatGrandchild = tree.get().children.front().children.front().children.front(); // Now wait for the grandchild to exit splitting the process tree. Duration elapsed = Duration::zero(); while (true) { Result<os::Process> process = os::process(grandchild); ASSERT_FALSE(process.isError()); if (process.isNone() || process.get().zombie) { break; } if (elapsed > Seconds(10)) { FAIL() << "Granchild process '" << process.get().pid << "' " << "(" << process.get().command << ") did not terminate"; } os::sleep(Milliseconds(5)); elapsed += Milliseconds(5); } // Kill the process tree and follow sessions and groups to make sure // we cross the broken link due to the grandchild. Try<list<ProcessTree> > trees = os::killtree(child, SIGKILL, true, true); ASSERT_SOME(trees); EXPECT_EQ(2u, trees.get().size()) << stringify(trees.get()); foreach (const ProcessTree& tree, trees.get()) { if (tree.process.pid == child) { // The 'grandchild' _might_ still be in the tree, just zombied, // unless the 'child' reaps the 'grandchild', which may happen // if the shell "sticks around" (i.e., some invocations of 'sh // -c' will 'exec' the command which will likely not do any // reaping, but in other cases an invocation of 'sh -c' will not // 'exec' the command, for example when the command is a // sequence of commands separated by ';'). EXPECT_FALSE(tree.contains(greatGrandchild)) << tree; EXPECT_FALSE(tree.contains(greatGreatGrandchild)) << tree; } else if (tree.process.pid == greatGrandchild) { EXPECT_TRUE(tree.contains(greatGreatGrandchild)) << tree; } else { FAIL() << "Not expecting a process tree rooted at " << tree.process.pid << "\n" << tree; } } // All processes should be reaped since we've killed everything. // The direct child must be reaped by us below. elapsed = Duration::zero(); while (true) { Result<os::Process> _child = os::process(child); ASSERT_SOME(_child); if (os::process(greatGreatGrandchild).isNone() && os::process(greatGrandchild).isNone() && os::process(grandchild).isNone() && _child.get().zombie) { break; } if (elapsed > Seconds(10)) { FAIL() << "Processes were not reaped after killtree invocation"; } os::sleep(Milliseconds(5)); elapsed += Milliseconds(5); } // Expect the pids to be wiped! EXPECT_NONE(os::process(greatGreatGrandchild)); EXPECT_NONE(os::process(greatGrandchild)); EXPECT_NONE(os::process(grandchild)); EXPECT_SOME(os::process(child)); EXPECT_TRUE(os::process(child).get().zombie); // We have to reap the child for running the tests in repetition. ASSERT_EQ(child, waitpid(child, NULL, 0)); }
// This test verifies that a provisioner can recover the rootfs // provisioned by a previous provisioner and then destroy it. Note // that we use the copy backend in this test so Linux is not required. TEST_F(ProvisionerAppcTest, Recover) { // Create provisioner. slave::Flags flags; flags.image_providers = "APPC"; flags.appc_store_dir = path::join(os::getcwd(), "store"); flags.image_provisioner_backend = COPY_BACKEND; flags.work_dir = path::join(sandbox.get(), "work_dir"); Try<Owned<Provisioner>> provisioner = Provisioner::create(flags); ASSERT_SOME(provisioner); Try<string> createImage = createTestImage( flags.appc_store_dir, getManifest()); ASSERT_SOME(createImage); // Recover. This is when the image in the store is loaded. AWAIT_READY(provisioner.get()->recover({})); Image image; image.mutable_appc()->CopyFrom(getTestImage()); ContainerID containerId; containerId.set_value(UUID::random().toString()); Future<slave::ProvisionInfo> provisionInfo = provisioner.get()->provision(containerId, image); AWAIT_READY(provisionInfo); provisioner->reset(); // Create a new provisioner to recover the state from the container. provisioner = Provisioner::create(flags); ASSERT_SOME(provisioner); AWAIT_READY(provisioner.get()->recover({containerId})); // It's possible for the user to provision two different rootfses // from the same image. AWAIT_READY(provisioner.get()->provision(containerId, image)); string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir); string containerDir = slave::provisioner::paths::getContainerDir( provisionerDir, containerId); Try<hashmap<string, hashset<string>>> rootfses = slave::provisioner::paths::listContainerRootfses( provisionerDir, containerId); ASSERT_SOME(rootfses); // Verify that the rootfs is successfully provisioned. ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend)); EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size()); Future<bool> destroy = provisioner.get()->destroy(containerId); AWAIT_READY(destroy); EXPECT_TRUE(destroy.get()); // The container directory is successfully cleaned up. EXPECT_FALSE(os::exists(containerDir)); }
// This test verifies that the provisioner can provision an rootfs // from an image for a child container. TEST_F(ProvisionerAppcTest, ROOT_ProvisionNestedContainer) { slave::Flags flags; flags.image_providers = "APPC"; flags.appc_store_dir = path::join(os::getcwd(), "store"); flags.image_provisioner_backend = BIND_BACKEND; flags.work_dir = path::join(sandbox.get(), "work_dir"); Try<Owned<Provisioner>> provisioner = Provisioner::create(flags); ASSERT_SOME(provisioner); Try<string> createImage = createTestImage( flags.appc_store_dir, getManifest()); ASSERT_SOME(createImage); // Recover. This is when the image in the store is loaded. AWAIT_READY(provisioner.get()->recover({})); Image image; image.mutable_appc()->CopyFrom(getTestImage()); ContainerID parent; ContainerID child; parent.set_value(UUID::random().toString()); child.set_value(UUID::random().toString()); child.mutable_parent()->CopyFrom(parent); Future<slave::ProvisionInfo> provisionInfo = provisioner.get()->provision(child, image); AWAIT_READY(provisionInfo); const string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir); const string containerDir = slave::provisioner::paths::getContainerDir( provisionerDir, child); Try<hashmap<string, hashset<string>>> rootfses = slave::provisioner::paths::listContainerRootfses( provisionerDir, child); ASSERT_SOME(rootfses); // Verify that the rootfs is successfully provisioned. ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend)); ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size()); EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(), Path(provisionInfo.get().rootfs).basename()); // TODO(jieyu): Verify that 'containerDir' is nested under its // parent container's 'containerDir'. Future<bool> destroy = provisioner.get()->destroy(child); AWAIT_READY(destroy); EXPECT_TRUE(destroy.get()); EXPECT_FALSE(os::exists(containerDir)); }