TEST(AgentCallValidationTest, RemoveNestedContainer)
{
  // Missing `remove_nested_container`.
  agent::Call call;
  call.set_type(agent::Call::REMOVE_NESTED_CONTAINER);

  Option<Error> error = validation::agent::call::validate(call);
  EXPECT_SOME(error);

  // Expecting a `container_id.parent`.
  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  agent::Call::RemoveNestedContainer* removeNestedContainer =
    call.mutable_remove_nested_container();

  removeNestedContainer->mutable_container_id()->CopyFrom(containerId);

  error = validation::agent::call::validate(call);
  EXPECT_SOME(error);

  // Test the valid case.
  ContainerID parentContainerId;
  parentContainerId.set_value(UUID::random().toString());

  removeNestedContainer->mutable_container_id()->mutable_parent()->CopyFrom(
      containerId);

  error = validation::agent::call::validate(call);
  EXPECT_NONE(error);
}
TEST(AgentValidationTest, ContainerID)
{
  ContainerID containerId;
  Option<Error> error;

  // No empty IDs.
  containerId.set_value("");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // No slashes.
  containerId.set_value("/");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  containerId.set_value("\\");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // No spaces.
  containerId.set_value("redis backup");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // No periods.
  containerId.set_value("redis.backup");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Cannot be '.'.
  containerId.set_value(".");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Cannot be '..'.
  containerId.set_value("..");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Valid.
  containerId.set_value("redis");
  error = validation::container::validateContainerId(containerId);
  EXPECT_NONE(error);

  // Valid with invalid parent (empty `ContainerID.value`).
  containerId.set_value("backup");
  containerId.mutable_parent();
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Valid with valid parent.
  containerId.set_value("backup");
  containerId.mutable_parent()->set_value("redis");
  error = validation::container::validateContainerId(containerId);
  EXPECT_NONE(error);
}
Exemple #3
0
// This test verifies that we can successfully launch a container with
// a big (>= 10 cpus) cpu quota. This is to catch the regression
// observed in MESOS-1049.
// TODO(vinod): Revisit this if/when the isolator restricts the number
// of cpus that an executor can use based on the slave cpus.
TEST_F(LimitedCpuIsolatorTest, ROOT_CGROUPS_Cfs_Big_Quota)
{
  Flags flags;

  // Enable CFS to cap CPU utilization.
  flags.cgroups_enable_cfs = true;

  Try<Isolator*> isolator = CgroupsCpushareIsolatorProcess::create(flags);
  CHECK_SOME(isolator);

  Try<Launcher*> launcher = LinuxLauncher::create(flags);
  CHECK_SOME(launcher);

  // Set the executor's resources to 100.5 cpu.
  ExecutorInfo executorInfo;
  executorInfo.mutable_resources()->CopyFrom(
      Resources::parse("cpus:100.5").get());

  ContainerID containerId;
  containerId.set_value("mesos_test_cfs_big_cpu_limit");

  AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));

  int pipes[2];
  ASSERT_NE(-1, ::pipe(pipes));

  lambda::function<int()> inChild = lambda::bind(&execute, "exit 0", pipes);

  Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
  ASSERT_SOME(pid);

  // Reap the forked child.
  Future<Option<int> > status = process::reap(pid.get());

  // Continue in the parent.
  ::close(pipes[0]);

  // Isolate the forked child.
  AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

  // Now signal the child to continue.
  int buf;
  ASSERT_LT(0, ::write(pipes[1],  &buf, sizeof(buf)));
  ::close(pipes[1]);

  // Wait for the command to complete successfully.
  AWAIT_READY(status);
  ASSERT_SOME_EQ(0, status.get());

  // Ensure all processes are killed.
  AWAIT_READY(launcher.get()->destroy(containerId));

  // Let the isolator clean up.
  AWAIT_READY(isolator.get()->cleanup(containerId));

  delete isolator.get();
  delete launcher.get();
}
Exemple #4
0
TEST_F(NamespacesPidIsolatorTest, ROOT_PidNamespace)
{
    slave::Flags flags = CreateSlaveFlags();
    flags.isolation = "namespaces/pid";

    string directory = os::getcwd(); // We're inside a temporary sandbox.

    Try<MesosContainerizer*> containerizer =
        MesosContainerizer::create(flags, false);
    ASSERT_SOME(containerizer);

    ContainerID containerId;
    containerId.set_value("test_container");

    // Write the command's pid namespace inode and init name to files.
    const string command =
        "stat -c %i /proc/self/ns/pid > ns && (cat /proc/1/comm > init)";

    process::Future<bool> launch = containerizer.get()->launch(
                                       containerId,
                                       CREATE_EXECUTOR_INFO("executor", command),
                                       directory,
                                       None(),
                                       SlaveID(),
                                       process::PID<Slave>(),
                                       false);
    AWAIT_READY(launch);
    ASSERT_TRUE(launch.get());

    // Wait on the container.
    process::Future<containerizer::Termination> wait =
        containerizer.get()->wait(containerId);
    AWAIT_READY(wait);

    // Check the executor exited correctly.
    EXPECT_TRUE(wait.get().has_status());
    EXPECT_EQ(0, wait.get().status());

    // Check that the command was run in a different pid namespace.
    Try<ino_t> testPidNamespace = ns::getns(::getpid(), "pid");
    ASSERT_SOME(testPidNamespace);

    Try<string> containerPidNamespace = os::read(path::join(directory, "ns"));
    ASSERT_SOME(containerPidNamespace);

    EXPECT_NE(stringify(testPidNamespace.get()),
              strings::trim(containerPidNamespace.get()));

    // Check that 'sh' is the container's 'init' process.
    // This verifies that /proc has been correctly mounted for the container.
    Try<string> init = os::read(path::join(directory, "init"));
    ASSERT_SOME(init);

    EXPECT_EQ("sh", strings::trim(init.get()));

    delete containerizer.get();
}
Exemple #5
0
TYPED_TEST(MemIsolatorTest, MemUsage)
{
  slave::Flags flags;

  Try<Isolator*> isolator = TypeParam::create(flags);
  CHECK_SOME(isolator);

  ExecutorInfo executorInfo;
  executorInfo.mutable_resources()->CopyFrom(
      Resources::parse("mem:1024").get());

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  // Use a relative temporary directory so it gets cleaned up
  // automatically with the test.
  Try<string> dir = os::mkdtemp(path::join(os::getcwd(), "XXXXXX"));
  ASSERT_SOME(dir);

  ContainerConfig containerConfig;
  containerConfig.mutable_executor_info()->CopyFrom(executorInfo);
  containerConfig.set_directory(dir.get());

  AWAIT_READY(isolator.get()->prepare(
      containerId,
      containerConfig));

  MemoryTestHelper helper;
  ASSERT_SOME(helper.spawn());
  ASSERT_SOME(helper.pid());

  // Set up the reaper to wait on the subprocess.
  Future<Option<int>> status = process::reap(helper.pid().get());

  // Isolate the subprocess.
  AWAIT_READY(isolator.get()->isolate(containerId, helper.pid().get()));

  const Bytes allocation = Megabytes(128);
  EXPECT_SOME(helper.increaseRSS(allocation));

  Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
  AWAIT_READY(usage);

  EXPECT_GE(usage.get().mem_rss_bytes(), allocation.bytes());

  // Ensure the process is killed.
  helper.cleanup();

  // Make sure the subprocess was reaped.
  AWAIT_READY(status);

  // Let the isolator clean up.
  AWAIT_READY(isolator.get()->cleanup(containerId));

  delete isolator.get();
}
// This test checks if destroy is called while container is being
// launched, the composing containerizer still calls the underlying
// containerizer's destroy and skip calling the rest of the
// containerizers.
TEST_F(ComposingContainerizerTest, DestroyWhileLaunching)
{
  vector<Containerizer*> containerizers;

  MockContainerizer* mockContainerizer = new MockContainerizer();
  MockContainerizer* mockContainerizer2 = new MockContainerizer();

  containerizers.push_back(mockContainerizer);
  containerizers.push_back(mockContainerizer2);

  ComposingContainerizer containerizer(containerizers);
  ContainerID containerId;
  containerId.set_value("container");
  TaskInfo taskInfo;
  ExecutorInfo executorInfo;
  SlaveID slaveId;
  std::map<std::string, std::string> environment;

  Promise<bool> launchPromise;

  EXPECT_CALL(*mockContainerizer, launch(_, _, _, _, _, _, _, _))
    .WillOnce(Return(launchPromise.future()));

  Future<Nothing> destroy;

  EXPECT_CALL(*mockContainerizer, destroy(_))
    .WillOnce(FutureSatisfy(&destroy));

  Future<bool> launch = containerizer.launch(
      containerId,
      taskInfo,
      executorInfo,
      "dir",
      "user",
      slaveId,
      environment,
      false);

  Resources resources = Resources::parse("cpus:1;mem:256").get();

  EXPECT_TRUE(launch.isPending());

  containerizer.destroy(containerId);

  EXPECT_CALL(*mockContainerizer2, launch(_, _, _, _, _, _, _, _))
    .Times(0);

  // We make sure the destroy is being called on the first containerizer.
  // The second containerizer shouldn't be called as well since the
  // container is already destroyed.
  AWAIT_READY(destroy);

  launchPromise.set(false);
  AWAIT_FAILED(launch);
}
// This test verifies that ContainerID is properly set in the
// ContainerStatus returned from 'status()' method.
TEST_F(MesosContainerizerTest, StatusWithContainerID)
{
  slave::Flags flags = CreateSlaveFlags();
  flags.launcher = "posix";
  flags.isolation = "posix/cpu";

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create = MesosContainerizer::create(
      flags,
      true,
      &fetcher);

  ASSERT_SOME(create);

  Owned<MesosContainerizer> containerizer(create.get());

  SlaveState state;
  state.id = SlaveID();

  AWAIT_READY(containerizer->recover(state));

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  Try<string> directory = environment->mkdtemp();
  ASSERT_SOME(directory);

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      createContainerConfig(
          None(),
          createExecutorInfo("executor", "sleep 1000", "cpus:1"),
          directory.get()),
      map<string, string>(),
      None());

  AWAIT_ASSERT_EQ(Containerizer::LaunchResult::SUCCESS, launch);

  Future<ContainerStatus> status = containerizer->status(containerId);
  AWAIT_READY(status);

  EXPECT_EQ(containerId, status->container_id());

  Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);

  containerizer->destroy(containerId);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait.get()->has_status());
  EXPECT_WTERMSIG_EQ(SIGKILL, wait.get()->status());
}
// This test ensures that destroy can be called at the end of the
// launch loop. The composing containerizer still calls the
// underlying containerizer's destroy (because it's not sure
// if the containerizer can handle the type of container being
// launched). If the launch is not supported by any containerizers
// both the launch and destroy futures should be false.
TEST_F(ComposingContainerizerTest, DestroyAfterLaunchLoop)
{
  vector<Containerizer*> containerizers;

  MockContainerizer* mockContainerizer1 = new MockContainerizer();
  containerizers.push_back(mockContainerizer1);

  ComposingContainerizer containerizer(containerizers);
  ContainerID containerId;
  containerId.set_value("container");
  TaskInfo taskInfo;
  ExecutorInfo executorInfo;
  SlaveID slaveId;
  std::map<std::string, std::string> environment;

  Promise<bool> launchPromise;

  EXPECT_CALL(*mockContainerizer1, launch(_, _, _, _, _, _, _, _))
    .WillOnce(Return(launchPromise.future()));

  Future<Nothing> destroy;
  Promise<bool> destroyPromise;
  EXPECT_CALL(*mockContainerizer1, destroy(_))
    .WillOnce(DoAll(FutureSatisfy(&destroy),
                    Return(destroyPromise.future())));

  Future<bool> launched = containerizer.launch(
      containerId,
      taskInfo,
      executorInfo,
      "dir",
      "user",
      slaveId,
      environment,
      false);

  Resources resources = Resources::parse("cpus:1;mem:256").get();

  EXPECT_TRUE(launched.isPending());

  Future<bool> destroyed = containerizer.destroy(containerId);

  // We make sure the destroy is being called on the containerizer.
  AWAIT_READY(destroy);

  launchPromise.set(false);
  destroyPromise.set(false);

  // `launch` should return false and `destroyed` should return false
  // because none of the containerizers support the launch.
  AWAIT_EXPECT_EQ(false, launched);
  AWAIT_EXPECT_EQ(false, destroyed);
}
// This test verifies that launching a task with a non-existent Seccomp profile
// leads to failure.
TEST_F(
    LinuxSeccompIsolatorTest,
    ROOT_SECCOMP_LaunchWithOverriddenNonExistentProfile)
{
  slave::Flags flags = CreateSlaveFlags();
  flags.seccomp_profile_name = createProfile(TEST_SECCOMP_PROFILE);

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, false, &fetcher);

  ASSERT_SOME(create);

  Owned<MesosContainerizer> containerizer(create.get());

  SlaveState state;
  state.id = SlaveID();

  AWAIT_READY(containerizer->recover(state));

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  Try<string> directory = environment->mkdtemp();
  ASSERT_SOME(directory);

  auto containerConfig =  createContainerConfig(
      None(),
      createExecutorInfo("executor", "exit 0", "cpus:1"),
      directory.get());

  ContainerInfo* container = containerConfig.mutable_container_info();
  container->set_type(ContainerInfo::MESOS);

  // Set a non-existent Seccomp profile for this particular task.
  SeccompInfo* seccomp = container->mutable_linux_info()->mutable_seccomp();
  seccomp->set_profile_name("absent");

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      containerConfig,
      map<string, string>(),
      None());

  AWAIT_FAILED(launch);
}
// Ensures the containerizer responds correctly (false Future) to
// a request to destroy an unknown container.
TEST_F(ComposingContainerizerTest, DestroyUnknownContainer)
{
  vector<Containerizer*> containerizers;

  MockContainerizer* mockContainerizer1 = new MockContainerizer();
  MockContainerizer* mockContainerizer2 = new MockContainerizer();

  containerizers.push_back(mockContainerizer1);
  containerizers.push_back(mockContainerizer2);

  ComposingContainerizer containerizer(containerizers);

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  AWAIT_EXPECT_FALSE(containerizer.destroy(containerId));
}
// This test verifies that we can launch shell commands when the default
// Seccomp profile is enabled.
TEST_F(LinuxSeccompIsolatorTest, ROOT_SECCOMP_LaunchWithDefaultProfile)
{
  slave::Flags flags = CreateSlaveFlags();
  flags.seccomp_profile_name = createProfile(TEST_SECCOMP_PROFILE);

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, false, &fetcher);

  ASSERT_SOME(create);

  Owned<MesosContainerizer> containerizer(create.get());

  SlaveState state;
  state.id = SlaveID();

  AWAIT_READY(containerizer->recover(state));

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  Try<string> directory = environment->mkdtemp();
  ASSERT_SOME(directory);

  const string command = "id && env && uname && hostname";

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      createContainerConfig(
          None(),
          createExecutorInfo("executor", command, "cpus:1"),
          directory.get()),
      map<string, string>(),
      None());

  AWAIT_ASSERT_EQ(Containerizer::LaunchResult::SUCCESS, launch);

  Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait.get()->has_status());
  EXPECT_WEXITSTATUS_EQ(0, wait.get()->status());
}
// Ensures the containerizer responds correctly (returns None)
// to a request to wait on an unknown container.
TEST_F(ComposingContainerizerTest, WaitUnknownContainer)
{
  vector<Containerizer*> containerizers;

  MockContainerizer* mockContainerizer1 = new MockContainerizer();
  MockContainerizer* mockContainerizer2 = new MockContainerizer();

  containerizers.push_back(mockContainerizer1);
  containerizers.push_back(mockContainerizer2);

  ComposingContainerizer containerizer(containerizers);

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  Future<Option<ContainerTermination>> wait = containerizer.wait(containerId);

  AWAIT_READY(wait);
  EXPECT_NONE(wait.get());
}
Exemple #13
0
// Test that the environment decorator hook adds a new environment
// variable to the executor runtime.
// Test hook adds a new environment variable "FOO" to the executor
// with a value "bar". We validate the hook by verifying the value
// of this environment variable.
TEST_F(HookTest, VerifySlaveExecutorEnvironmentDecorator)
{
  const string& directory = os::getcwd(); // We're inside a temporary sandbox.
  Fetcher fetcher;

  Try<MesosContainerizer*> _containerizer =
    MesosContainerizer::create(CreateSlaveFlags(), false, &fetcher);

  ASSERT_SOME(_containerizer);
  Owned<MesosContainerizer> containerizer(_containerizer.get());

  ContainerID containerId;
  containerId.set_value("test_container");

  // Test hook adds a new environment variable "FOO" to the executor
  // with a value "bar". A '0' (success) exit status for the following
  // command validates the hook.
  process::Future<bool> launch = containerizer->launch(
      containerId,
      CREATE_EXECUTOR_INFO("executor", "test $FOO = 'bar'"),
      directory,
      None(),
      SlaveID(),
      process::PID<Slave>(),
      false);
  AWAIT_READY(launch);
  ASSERT_TRUE(launch.get());

  // Wait on the container.
  process::Future<containerizer::Termination> wait =
    containerizer->wait(containerId);
  AWAIT_READY(wait);

  // Check the executor exited correctly.
  EXPECT_TRUE(wait.get().has_status());
  EXPECT_EQ(0, wait.get().status());
}
// This test verifies that the image specified in the volume will be
// properly provisioned and mounted into the container if container
// root filesystem is not specified.
TEST_P(VolumeImageIsolatorTest, ROOT_ImageInVolumeWithoutRootFilesystem)
{
  string registry = path::join(sandbox.get(), "registry");
  AWAIT_READY(DockerArchive::create(registry, "test_image"));

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "filesystem/linux,volume/image,docker/runtime";
  flags.docker_registry = registry;
  flags.docker_store_dir = path::join(sandbox.get(), "store");
  flags.image_providers = "docker";

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, true, &fetcher);

  ASSERT_SOME(create);

  Owned<Containerizer> containerizer(create.get());

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  ContainerInfo container = createContainerInfo(
      None(),
      {createVolumeFromDockerImage("rootfs", "test_image", Volume::RW)});

  CommandInfo command = createCommandInfo("test -d rootfs/bin");

  ExecutorInfo executor = createExecutorInfo(
      "test_executor",
      nesting ? createCommandInfo("sleep 1000") : command);

  if (!nesting) {
    executor.mutable_container()->CopyFrom(container);
  }

  string directory = path::join(flags.work_dir, "sandbox");
  ASSERT_SOME(os::mkdir(directory));

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      createContainerConfig(None(), executor, directory),
      map<string, string>(),
      None());

  AWAIT_ASSERT_EQ(Containerizer::LaunchResult::SUCCESS, launch);

  Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);

  if (nesting) {
    ContainerID nestedContainerId;
    nestedContainerId.mutable_parent()->CopyFrom(containerId);
    nestedContainerId.set_value(id::UUID::random().toString());

    launch = containerizer->launch(
        nestedContainerId,
        createContainerConfig(command, container),
        map<string, string>(),
        None());

    AWAIT_ASSERT_EQ(Containerizer::LaunchResult::SUCCESS, launch);

    wait = containerizer->wait(nestedContainerId);
  }

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait->get().has_status());
  EXPECT_WEXITSTATUS_EQ(0, wait->get().status());

  if (nesting) {
    Future<Option<ContainerTermination>> termination =
      containerizer->destroy(containerId);

    AWAIT_READY(termination);
    ASSERT_SOME(termination.get());
    ASSERT_TRUE(termination->get().has_status());
    EXPECT_WTERMSIG_EQ(SIGKILL, termination.get()->status());
  }
}
Exemple #15
0
TEST_F(SharedFilesystemIsolatorTest, ROOT_AbsoluteVolume)
{
    slave::Flags flags = CreateSlaveFlags();
    flags.isolation = "filesystem/shared";

    Try<Isolator*> isolator = SharedFilesystemIsolatorProcess::create(flags);
    CHECK_SOME(isolator);

    Try<Launcher*> launcher = LinuxLauncher::create(flags);
    CHECK_SOME(launcher);

    // We'll mount the absolute test work directory as /var/tmp in the
    // container.
    const string hostPath = flags.work_dir;
    const string containerPath = "/var/tmp";

    ContainerInfo containerInfo;
    containerInfo.set_type(ContainerInfo::MESOS);
    containerInfo.add_volumes()->CopyFrom(
        CREATE_VOLUME(containerPath, hostPath, Volume::RW));

    ExecutorInfo executorInfo;
    executorInfo.mutable_container()->CopyFrom(containerInfo);

    ContainerID containerId;
    containerId.set_value(UUID::random().toString());

    Future<Option<CommandInfo> > prepare =
        isolator.get()->prepare(containerId, executorInfo, flags.work_dir, None());
    AWAIT_READY(prepare);
    ASSERT_SOME(prepare.get());

    // Test the volume mounting by touching a file in the container's
    // /tmp, which should then be in flags.work_dir.
    const string filename = UUID::random().toString();
    ASSERT_FALSE(os::exists(path::join(containerPath, filename)));

    vector<string> args;
    args.push_back("/bin/sh");
    args.push_back("-x");
    args.push_back("-c");
    args.push_back(prepare.get().get().value() +
                   " && touch " +
                   path::join(containerPath, filename));

    Try<pid_t> pid = launcher.get()->fork(
                         containerId,
                         "/bin/sh",
                         args,
                         Subprocess::FD(STDIN_FILENO),
                         Subprocess::FD(STDOUT_FILENO),
                         Subprocess::FD(STDERR_FILENO),
                         None(),
                         None(),
                         None());
    ASSERT_SOME(pid);

    // Set up the reaper to wait on the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    AWAIT_READY(status);
    EXPECT_SOME_EQ(0, status.get());

    // Check the file was created in flags.work_dir.
    EXPECT_TRUE(os::exists(path::join(hostPath, filename)));

    // Check it didn't get created in the host's view of containerPath.
    EXPECT_FALSE(os::exists(path::join(containerPath, filename)));

    delete launcher.get();
    delete isolator.get();
}
Exemple #16
0
// Test that a container can create a private view of a system
// directory (/var/tmp). Check that a file written by a process inside
// the container doesn't appear on the host filesystem but does appear
// under the container's work directory.
TEST_F(SharedFilesystemIsolatorTest, ROOT_RelativeVolume)
{
    slave::Flags flags = CreateSlaveFlags();
    flags.isolation = "filesystem/shared";

    Try<Isolator*> isolator = SharedFilesystemIsolatorProcess::create(flags);
    CHECK_SOME(isolator);

    Try<Launcher*> launcher = LinuxLauncher::create(flags);
    CHECK_SOME(launcher);

    // Use /var/tmp so we don't mask the work directory (under /tmp).
    const string containerPath = "/var/tmp";
    ASSERT_TRUE(os::isdir(containerPath));

    // Use a host path relative to the container work directory.
    const string hostPath = strings::remove(containerPath, "/", strings::PREFIX);

    ContainerInfo containerInfo;
    containerInfo.set_type(ContainerInfo::MESOS);
    containerInfo.add_volumes()->CopyFrom(
        CREATE_VOLUME(containerPath, hostPath, Volume::RW));

    ExecutorInfo executorInfo;
    executorInfo.mutable_container()->CopyFrom(containerInfo);

    ContainerID containerId;
    containerId.set_value(UUID::random().toString());

    Future<Option<CommandInfo> > prepare =
        isolator.get()->prepare(containerId, executorInfo, flags.work_dir, None());
    AWAIT_READY(prepare);
    ASSERT_SOME(prepare.get());

    // The test will touch a file in container path.
    const string file = path::join(containerPath, UUID::random().toString());
    ASSERT_FALSE(os::exists(file));

    // Manually run the isolator's preparation command first, then touch
    // the file.
    vector<string> args;
    args.push_back("/bin/sh");
    args.push_back("-x");
    args.push_back("-c");
    args.push_back(prepare.get().get().value() + " && touch " + file);

    Try<pid_t> pid = launcher.get()->fork(
                         containerId,
                         "/bin/sh",
                         args,
                         Subprocess::FD(STDIN_FILENO),
                         Subprocess::FD(STDOUT_FILENO),
                         Subprocess::FD(STDERR_FILENO),
                         None(),
                         None(),
                         None());
    ASSERT_SOME(pid);

    // Set up the reaper to wait on the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    AWAIT_READY(status);
    EXPECT_SOME_EQ(0, status.get());

    // Check the correct hierarchy was created under the container work
    // directory.
    string dir = "/";
    foreach (const string& subdir, strings::tokenize(containerPath, "/")) {
        dir = path::join(dir, subdir);

        struct stat hostStat;
        EXPECT_EQ(0, ::stat(dir.c_str(), &hostStat));

        struct stat containerStat;
        EXPECT_EQ(0,
                  ::stat(path::join(flags.work_dir, dir).c_str(), &containerStat));

        EXPECT_EQ(hostStat.st_mode, containerStat.st_mode);
        EXPECT_EQ(hostStat.st_uid, containerStat.st_uid);
        EXPECT_EQ(hostStat.st_gid, containerStat.st_gid);
    }

    // Check it did *not* create a file in the host namespace.
    EXPECT_FALSE(os::exists(file));

    // Check it did create the file under the container's work directory
    // on the host.
    EXPECT_TRUE(os::exists(path::join(flags.work_dir, file)));

    delete launcher.get();
    delete isolator.get();
}
Exemple #17
0
TYPED_TEST(MemIsolatorTest, MemUsage)
{
    slave::Flags flags;

    Try<Isolator*> isolator = TypeParam::create(flags);
    CHECK_SOME(isolator);

    // A PosixLauncher is sufficient even when testing a cgroups isolator.
    Try<Launcher*> launcher = PosixLauncher::create(flags);

    ExecutorInfo executorInfo;
    executorInfo.mutable_resources()->CopyFrom(
        Resources::parse("mem:1024").get());

    ContainerID containerId;
    containerId.set_value("memory_usage");

    // Use a relative temporary directory so it gets cleaned up
    // automatically with the test.
    Try<string> dir = os::mkdtemp(path::join(os::getcwd(), "XXXXXX"));
    ASSERT_SOME(dir);

    AWAIT_READY(
        isolator.get()->prepare(containerId, executorInfo, dir.get(), None()));

    int pipes[2];
    ASSERT_NE(-1, ::pipe(pipes));

    Try<pid_t> pid = launcher.get()->fork(
                         containerId,
                         "/bin/sh",
                         vector<string>(),
                         Subprocess::FD(STDIN_FILENO),
                         Subprocess::FD(STDOUT_FILENO),
                         Subprocess::FD(STDERR_FILENO),
                         None(),
                         None(),
                         lambda::bind(&consumeMemory, Megabytes(256), Seconds(10), pipes));

    ASSERT_SOME(pid);

    // Set up the reaper to wait on the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    // Continue in the parent.
    ASSERT_SOME(os::close(pipes[0]));

    // Isolate the forked child.
    AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

    // Now signal the child to continue.
    char dummy;
    ASSERT_LT(0, ::write(pipes[1], &dummy, sizeof(dummy)));

    ASSERT_SOME(os::close(pipes[1]));

    // Wait up to 5 seconds for the child process to consume 256 MB of memory;
    ResourceStatistics statistics;
    Bytes threshold = Megabytes(256);
    Duration waited = Duration::zero();
    do {
        Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
        AWAIT_READY(usage);

        statistics = usage.get();

        // If we meet our usage expectations, we're done!
        if (statistics.mem_rss_bytes() >= threshold.bytes()) {
            break;
        }

        os::sleep(Seconds(1));
        waited += Seconds(1);
    } while (waited < Seconds(5));

    EXPECT_LE(threshold.bytes(), statistics.mem_rss_bytes());

    // Ensure all processes are killed.
    AWAIT_READY(launcher.get()->destroy(containerId));

    // Make sure the child was reaped.
    AWAIT_READY(status);

    // Let the isolator clean up.
    AWAIT_READY(isolator.get()->cleanup(containerId));

    delete isolator.get();
    delete launcher.get();
}
Exemple #18
0
// This test verifies that we can successfully launch a container with
// a big (>= 10 cpus) cpu quota. This is to catch the regression
// observed in MESOS-1049.
// TODO(vinod): Revisit this if/when the isolator restricts the number
// of cpus that an executor can use based on the slave cpus.
TEST_F(LimitedCpuIsolatorTest, ROOT_CGROUPS_Cfs_Big_Quota)
{
    slave::Flags flags;

    // Enable CFS to cap CPU utilization.
    flags.cgroups_enable_cfs = true;

    Try<Isolator*> isolator = CgroupsCpushareIsolatorProcess::create(flags);
    CHECK_SOME(isolator);

    Try<Launcher*> launcher = LinuxLauncher::create(flags);
    CHECK_SOME(launcher);

    // Set the executor's resources to 100.5 cpu.
    ExecutorInfo executorInfo;
    executorInfo.mutable_resources()->CopyFrom(
        Resources::parse("cpus:100.5").get());

    ContainerID containerId;
    containerId.set_value("mesos_test_cfs_big_cpu_limit");

    // Use a relative temporary directory so it gets cleaned up
    // automatically with the test.
    Try<string> dir = os::mkdtemp(path::join(os::getcwd(), "XXXXXX"));
    ASSERT_SOME(dir);

    AWAIT_READY(
        isolator.get()->prepare(containerId, executorInfo, dir.get(), None()));

    int pipes[2];
    ASSERT_NE(-1, ::pipe(pipes));

    vector<string> argv(3);
    argv[0] = "sh";
    argv[1] = "-c";
    argv[2] = "exit 0";

    Try<pid_t> pid = launcher.get()->fork(
                         containerId,
                         "/bin/sh",
                         argv,
                         Subprocess::FD(STDIN_FILENO),
                         Subprocess::FD(STDOUT_FILENO),
                         Subprocess::FD(STDERR_FILENO),
                         None(),
                         None(),
                         lambda::bind(&childSetup, pipes));

    ASSERT_SOME(pid);

    // Reap the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    // Continue in the parent.
    ASSERT_SOME(os::close(pipes[0]));

    // Isolate the forked child.
    AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

    // Now signal the child to continue.
    char dummy;
    ASSERT_LT(0, ::write(pipes[1], &dummy, sizeof(dummy)));

    ASSERT_SOME(os::close(pipes[1]));

    // Wait for the command to complete successfully.
    AWAIT_READY(status);
    ASSERT_SOME_EQ(0, status.get());

    // Ensure all processes are killed.
    AWAIT_READY(launcher.get()->destroy(containerId));

    // Let the isolator clean up.
    AWAIT_READY(isolator.get()->cleanup(containerId));

    delete isolator.get();
    delete launcher.get();
}
Exemple #19
0
TYPED_TEST(CpuIsolatorTest, UserCpuUsage)
{
    slave::Flags flags;

    Try<Isolator*> isolator = TypeParam::create(flags);
    CHECK_SOME(isolator);

    // A PosixLauncher is sufficient even when testing a cgroups isolator.
    Try<Launcher*> launcher = PosixLauncher::create(flags);

    ExecutorInfo executorInfo;
    executorInfo.mutable_resources()->CopyFrom(
        Resources::parse("cpus:1.0").get());

    ContainerID containerId;
    containerId.set_value("user_cpu_usage");

    // Use a relative temporary directory so it gets cleaned up
    // automatically with the test.
    Try<string> dir = os::mkdtemp(path::join(os::getcwd(), "XXXXXX"));
    ASSERT_SOME(dir);

    AWAIT_READY(
        isolator.get()->prepare(containerId, executorInfo, dir.get(), None()));

    const string& file = path::join(dir.get(), "mesos_isolator_test_ready");

    // Max out a single core in userspace. This will run for at most one second.
    string command = "while true ; do true ; done &"
                     "touch " + file + "; " // Signals the command is running.
                     "sleep 60";

    int pipes[2];
    ASSERT_NE(-1, ::pipe(pipes));

    vector<string> argv(3);
    argv[0] = "sh";
    argv[1] = "-c";
    argv[2] = command;

    Try<pid_t> pid = launcher.get()->fork(
                         containerId,
                         "/bin/sh",
                         argv,
                         Subprocess::FD(STDIN_FILENO),
                         Subprocess::FD(STDOUT_FILENO),
                         Subprocess::FD(STDERR_FILENO),
                         None(),
                         None(),
                         lambda::bind(&childSetup, pipes));

    ASSERT_SOME(pid);

    // Reap the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    // Continue in the parent.
    ASSERT_SOME(os::close(pipes[0]));

    // Isolate the forked child.
    AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

    // Now signal the child to continue.
    char dummy;
    ASSERT_LT(0, ::write(pipes[1], &dummy, sizeof(dummy)));

    ASSERT_SOME(os::close(pipes[1]));

    // Wait for the command to start.
    while (!os::exists(file));

    // Wait up to 1 second for the child process to induce 1/8 of a second of
    // user cpu time.
    ResourceStatistics statistics;
    Duration waited = Duration::zero();
    do {
        Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
        AWAIT_READY(usage);

        statistics = usage.get();

        // If we meet our usage expectations, we're done!
        if (statistics.cpus_user_time_secs() >= 0.125) {
            break;
        }

        os::sleep(Milliseconds(200));
        waited += Milliseconds(200);
    } while (waited < Seconds(1));

    EXPECT_LE(0.125, statistics.cpus_user_time_secs());

    // Ensure all processes are killed.
    AWAIT_READY(launcher.get()->destroy(containerId));

    // Make sure the child was reaped.
    AWAIT_READY(status);

    // Let the isolator clean up.
    AWAIT_READY(isolator.get()->cleanup(containerId));

    delete isolator.get();
    delete launcher.get();
}
Exemple #20
0
TEST_F(LimitedCpuIsolatorTest, ROOT_CGROUPS_Cfs)
{
  Flags flags;

  // Enable CFS to cap CPU utilization.
  flags.cgroups_enable_cfs = true;

  Try<Isolator*> isolator = CgroupsCpushareIsolatorProcess::create(flags);
  CHECK_SOME(isolator);

  Try<Launcher*> launcher = LinuxLauncher::create(flags);
  CHECK_SOME(launcher);

  // Set the executor's resources to 0.5 cpu.
  ExecutorInfo executorInfo;
  executorInfo.mutable_resources()->CopyFrom(
      Resources::parse("cpus:0.5").get());

  ContainerID containerId;
  containerId.set_value("mesos_test_cfs_cpu_limit");

  AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));

  // Generate random numbers to max out a single core. We'll run this for 0.5
  // seconds of wall time so it should consume approximately 250 ms of total
  // cpu time when limited to 0.5 cpu. We use /dev/urandom to prevent blocking
  // on Linux when there's insufficient entropy.
  string command = "cat /dev/urandom > /dev/null & "
    "export MESOS_TEST_PID=$! && "
    "sleep 0.5 && "
    "kill $MESOS_TEST_PID";

  int pipes[2];
  ASSERT_NE(-1, ::pipe(pipes));

  lambda::function<int()> inChild = lambda::bind(&execute, command, pipes);

  Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
  ASSERT_SOME(pid);

  // Reap the forked child.
  Future<Option<int> > status = process::reap(pid.get());

  // Continue in the parent.
  ::close(pipes[0]);

  // Isolate the forked child.
  AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

  // Now signal the child to continue.
  int buf;
  ASSERT_LT(0, ::write(pipes[1],  &buf, sizeof(buf)));
  ::close(pipes[1]);

  // Wait for the command to complete.
  AWAIT_READY(status);

  Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
  AWAIT_READY(usage);

  // Expect that no more than 300 ms of cpu time has been consumed. We also
  // check that at least 50 ms of cpu time has been consumed so this test will
  // fail if the host system is very heavily loaded. This behavior is correct
  // because under such conditions we aren't actually testing the CFS cpu
  // limiter.
  double cpuTime = usage.get().cpus_system_time_secs() +
                   usage.get().cpus_user_time_secs();

  EXPECT_GE(0.30, cpuTime);
  EXPECT_LE(0.05, cpuTime);

  // Ensure all processes are killed.
  AWAIT_READY(launcher.get()->destroy(containerId));

  // Let the isolator clean up.
  AWAIT_READY(isolator.get()->cleanup(containerId));

  delete isolator.get();
  delete launcher.get();
}
Exemple #21
0
TYPED_TEST(MemIsolatorTest, MemUsage)
{
  Flags flags;

  Try<Isolator*> isolator = TypeParam::create(flags);
  CHECK_SOME(isolator);

  // A PosixLauncher is sufficient even when testing a cgroups isolator.
  Try<Launcher*> launcher = PosixLauncher::create(flags);

  ExecutorInfo executorInfo;
  executorInfo.mutable_resources()->CopyFrom(
      Resources::parse("mem:1024").get());

  ContainerID containerId;
  containerId.set_value("memory_usage");

  AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));

  int pipes[2];
  ASSERT_NE(-1, ::pipe(pipes));

  lambda::function<int()> inChild = lambda::bind(
      &consumeMemory,
      Megabytes(256),
      Seconds(10),
      pipes);

  Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
  ASSERT_SOME(pid);

  // Set up the reaper to wait on the forked child.
  Future<Option<int> > status = process::reap(pid.get());

  // Continue in the parent.
  ::close(pipes[0]);

  // Isolate the forked child.
  AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

  // Now signal the child to continue.
  int buf;
  ASSERT_LT(0, ::write(pipes[1], &buf, sizeof(buf)));
  ::close(pipes[1]);

  // Wait up to 5 seconds for the child process to consume 256 MB of memory;
  ResourceStatistics statistics;
  Bytes threshold = Megabytes(256);
  Duration waited = Duration::zero();
  do {
    Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
    AWAIT_READY(usage);

    statistics = usage.get();

    // If we meet our usage expectations, we're done!
    if (statistics.mem_rss_bytes() >= threshold.bytes()) {
      break;
    }

    os::sleep(Seconds(1));
    waited += Seconds(1);
  } while (waited < Seconds(5));

  EXPECT_LE(threshold.bytes(), statistics.mem_rss_bytes());

  // Ensure all processes are killed.
  AWAIT_READY(launcher.get()->destroy(containerId));

  // Make sure the child was reaped.
  AWAIT_READY(status);

  // Let the isolator clean up.
  AWAIT_READY(isolator.get()->cleanup(containerId));

  delete isolator.get();
  delete launcher.get();
}
// This test verifies that a provisioner can recover the rootfs
// provisioned by a previous provisioner and then destroy it. Note
// that we use the copy backend in this test so Linux is not required.
TEST_F(ProvisionerAppcTest, Recover)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "copy";
  flags.work_dir = "work_dir";

  Try<Owned<Provisioner>> provisioner1 = Provisioner::create(flags);
  ASSERT_SOME(provisioner1);

  Try<string> createImage = createTestImage(
      flags.appc_store_dir,
      getManifest());

  ASSERT_SOME(createImage);

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner1.get()->recover({}, {}));

  Image image;
  image.mutable_appc()->CopyFrom(getTestImage());

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  Future<slave::ProvisionInfo> provisionInfo =
    provisioner1.get()->provision(containerId, image);
  AWAIT_READY(provisionInfo);

  // Create a new provisioner to recover the state from the container.
  Try<Owned<Provisioner>> provisioner2 = Provisioner::create(flags);
  ASSERT_SOME(provisioner2);

  mesos::slave::ContainerState state;

  // Here we are using an ExecutorInfo in the ContainerState without a
  // ContainerInfo. This is the situation where the Image is specified
  // via --default_container_info so it's not part of the recovered
  // ExecutorInfo.
  state.mutable_container_id()->CopyFrom(containerId);

  AWAIT_READY(provisioner2.get()->recover({state}, {}));

  // It's possible for the user to provision two different rootfses
  // from the same image.
  AWAIT_READY(provisioner2.get()->provision(containerId, image));

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size());

  Future<bool> destroy = provisioner2.get()->destroy(containerId);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
// This test verifies that the provisioner can provision an rootfs
// from an image that is already put into the store directory.
TEST_F(ProvisionerAppcTest, ROOT_Provision)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "bind";
  flags.work_dir = "work_dir";

  Try<Owned<Provisioner>> provisioner = Provisioner::create(flags);
  ASSERT_SOME(provisioner);

  Try<string> createImage = createTestImage(
      flags.appc_store_dir,
      getManifest());

  ASSERT_SOME(createImage);

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner.get()->recover({}, {}));

  // Simulate a task that requires an image.
  Image image;
  image.mutable_appc()->CopyFrom(getTestImage());

  ContainerID containerId;
  containerId.set_value("12345");

  Future<slave::ProvisionInfo> provisionInfo =
    provisioner.get()->provision(containerId, image);
  AWAIT_READY(provisionInfo);

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
  EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
            Path(provisionInfo.get().rootfs).basename());

  Future<bool> destroy = provisioner.get()->destroy(containerId);
  AWAIT_READY(destroy);

  // One rootfs is destroyed.
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
// This test ensures that destroy can be called while in the
// launch loop. The composing containerizer still calls the
// underlying containerizer's destroy (because it's not sure
// if the containerizer can handle the type of container being
// launched). If the launch is not supported by the 1st containerizer,
// the composing containerizer should stop the launch loop and
// set the value of destroy future to true.
TEST_F(ComposingContainerizerTest, DestroyDuringUnsupportedLaunchLoop)
{
  vector<Containerizer*> containerizers;

  MockContainerizer* mockContainerizer1 = new MockContainerizer();
  MockContainerizer* mockContainerizer2 = new MockContainerizer();

  containerizers.push_back(mockContainerizer1);
  containerizers.push_back(mockContainerizer2);

  ComposingContainerizer containerizer(containerizers);
  ContainerID containerId;
  containerId.set_value("container");
  TaskInfo taskInfo;
  ExecutorInfo executorInfo;
  SlaveID slaveId;
  std::map<std::string, std::string> environment;

  Promise<bool> launchPromise;

  EXPECT_CALL(*mockContainerizer1, launch(_, _, _, _, _, _, _, _))
    .WillOnce(Return(launchPromise.future()));

  Future<Nothing> destroy;
  Promise<bool> destroyPromise;
  EXPECT_CALL(*mockContainerizer1, destroy(_))
    .WillOnce(DoAll(FutureSatisfy(&destroy),
                    Return(destroyPromise.future())));

  Future<bool> launched = containerizer.launch(
      containerId,
      taskInfo,
      executorInfo,
      "dir",
      "user",
      slaveId,
      environment,
      false);

  Resources resources = Resources::parse("cpus:1;mem:256").get();

  EXPECT_TRUE(launched.isPending());

  Future<bool> destroyed = containerizer.destroy(containerId);

  EXPECT_CALL(*mockContainerizer2, launch(_, _, _, _, _, _, _, _))
    .Times(0);

  // We make sure the destroy is being called on the first containerizer.
  // The second containerizer shouldn't be called as well since the
  // container is already destroyed.
  AWAIT_READY(destroy);

  launchPromise.set(false);
  destroyPromise.set(false);

  // `launched` should be a failure and `destroyed` should be true
  // because the launch was stopped from being tried on the 2nd
  // containerizer because of the destroy.
  AWAIT_FAILED(launched);
  AWAIT_EXPECT_EQ(true, destroyed);
}
Exemple #25
0
TYPED_TEST(CpuIsolatorTest, SystemCpuUsage)
{
  Flags flags;

  Try<Isolator*> isolator = TypeParam::create(flags);
  CHECK_SOME(isolator);

  // A PosixLauncher is sufficient even when testing a cgroups isolator.
  Try<Launcher*> launcher = PosixLauncher::create(flags);

  ExecutorInfo executorInfo;
  executorInfo.mutable_resources()->CopyFrom(
      Resources::parse("cpus:1.0").get());

  ContainerID containerId;
  containerId.set_value("system_cpu_usage");

  AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));

  Try<string> dir = os::mkdtemp();
  ASSERT_SOME(dir);
  const string& file = path::join(dir.get(), "mesos_isolator_test_ready");

  // Generating random numbers is done by the kernel and will max out a single
  // core and run almost exclusively in the kernel, i.e., system time.
  string command = "cat /dev/urandom > /dev/null & "
    "touch " + file + "; " // Signals the command is running.
    "sleep 60";

  int pipes[2];
  ASSERT_NE(-1, ::pipe(pipes));

  lambda::function<int()> inChild = lambda::bind(&execute, command, pipes);

  Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
  ASSERT_SOME(pid);

  // Reap the forked child.
  Future<Option<int> > status = process::reap(pid.get());

  // Continue in the parent.
  ::close(pipes[0]);

  // Isolate the forked child.
  AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

  // Now signal the child to continue.
  int buf;
  ASSERT_LT(0, ::write(pipes[1],  &buf, sizeof(buf)));
  ::close(pipes[1]);

  // Wait for the command to start.
  while (!os::exists(file));

  // Wait up to 1 second for the child process to induce 1/8 of a second of
  // system cpu time.
  ResourceStatistics statistics;
  Duration waited = Duration::zero();
  do {
    Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
    AWAIT_READY(usage);

    statistics = usage.get();

    // If we meet our usage expectations, we're done!
    if (statistics.cpus_system_time_secs() >= 0.125) {
      break;
    }

    os::sleep(Milliseconds(200));
    waited += Milliseconds(200);
  } while (waited < Seconds(1));

  EXPECT_LE(0.125, statistics.cpus_system_time_secs());

  // Ensure all processes are killed.
  AWAIT_READY(launcher.get()->destroy(containerId));

  // Make sure the child was reaped.
  AWAIT_READY(status);

  // Let the isolator clean up.
  AWAIT_READY(isolator.get()->cleanup(containerId));

  delete isolator.get();
  delete launcher.get();

  CHECK_SOME(os::rmdir(dir.get()));
}
Exemple #26
0
TYPED_TEST(UserCgroupIsolatorTest, ROOT_CGROUPS_UserCgroup)
{
    slave::Flags flags;
    flags.perf_events = "cpu-cycles"; // Needed for CgroupsPerfEventIsolator.

    Try<Isolator*> isolator = TypeParam::create(flags);
    CHECK_SOME(isolator);

    ExecutorInfo executorInfo;
    executorInfo.mutable_resources()->CopyFrom(
        Resources::parse("mem:1024;cpus:1").get()); // For cpu/mem isolators.

    ContainerID containerId;
    containerId.set_value("container");

    AWAIT_READY(isolator.get()->prepare(
                    containerId,
                    executorInfo,
                    os::getcwd(),
                    UNPRIVILEGED_USERNAME));

    // Isolators don't provide a way to determine the cgroups they use
    // so we'll inspect the cgroups for an isolated dummy process.
    pid_t pid = fork();
    if (pid == 0) {
        // Child just sleeps.
        ::sleep(100);

        ABORT("Child process should not reach here");
    }
    ASSERT_GT(pid, 0);

    AWAIT_READY(isolator.get()->isolate(containerId, pid));

    // Get the container's cgroups from /proc/$PID/cgroup. We're only
    // interested in the non-root cgroups, i.e., we exclude those with
    // paths "/", e.g., only cpu and cpuacct from this example:
    // 6:blkio:/
    // 5:perf_event:/
    // 4:memory:/
    // 3:freezer:/
    // 2:cpuacct:/mesos
    // 1:cpu:/mesos
    // awk will then output "cpuacct/mesos\ncpu/mesos" as the cgroup(s).
    ostringstream output;
    Try<int> status = os::shell(
                          &output,
                          "grep -v '/$' /proc/" +
                          stringify(pid) +
                          "/cgroup | awk -F ':' '{print $2$3}'");

    ASSERT_SOME(status);

    // Kill the dummy child process.
    ::kill(pid, SIGKILL);
    int exitStatus;
    EXPECT_NE(-1, ::waitpid(pid, &exitStatus, 0));

    vector<string> cgroups = strings::tokenize(output.str(), "\n");
    ASSERT_FALSE(cgroups.empty());

    foreach (const string& cgroup, cgroups) {
        // Check the user cannot manipulate the container's cgroup control
        // files.
        EXPECT_NE(0, os::system(
                      "su - " + UNPRIVILEGED_USERNAME +
                      " -c 'echo $$ >" +
                      path::join(flags.cgroups_hierarchy, cgroup, "cgroup.procs") +
                      "'"));

        // Check the user can create a cgroup under the container's
        // cgroup.
        string userCgroup = path::join(cgroup, "user");

        EXPECT_EQ(0, os::system(
                      "su - " +
                      UNPRIVILEGED_USERNAME +
                      " -c 'mkdir " +
                      path::join(flags.cgroups_hierarchy, userCgroup) +
                      "'"));

        // Check the user can manipulate control files in the created
        // cgroup.
        EXPECT_EQ(0, os::system(
                      "su - " +
                      UNPRIVILEGED_USERNAME +
                      " -c 'echo $$ >" +
                      path::join(flags.cgroups_hierarchy, userCgroup, "cgroup.procs") +
                      "'"));
    }
// This test verifies that the provisioner can provision an rootfs
// from an image that is already put into the store directory.
TEST_F(ProvisionerAppcTest, ROOT_Provision)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "bind";
  flags.work_dir = "work_dir";

  Fetcher fetcher;

  Try<Owned<Provisioner>> provisioner = Provisioner::create(flags, &fetcher);
  ASSERT_SOME(provisioner);

  // Create a simple image in the store:
  // <store>
  // |--images
  //    |--<id>
  //       |--manifest
  //       |--rootfs/tmp/test
  JSON::Value manifest = JSON::parse(
      "{"
      "  \"acKind\": \"ImageManifest\","
      "  \"acVersion\": \"0.6.1\","
      "  \"name\": \"foo.com/bar\","
      "  \"labels\": ["
      "    {"
      "      \"name\": \"version\","
      "      \"value\": \"1.0.0\""
      "    },"
      "    {"
      "      \"name\": \"arch\","
      "      \"value\": \"amd64\""
      "    },"
      "    {"
      "      \"name\": \"os\","
      "      \"value\": \"linux\""
      "    }"
      "  ],"
      "  \"annotations\": ["
      "    {"
      "      \"name\": \"created\","
      "      \"value\": \"1438983392\""
      "    }"
      "  ]"
      "}").get();

  // The 'imageId' below has the correct format but it's not computed
  // by hashing the tarball of the image. It's OK here as we assume
  // the images under 'images' have passed such check when they are
  // downloaded and validated.
  string imageId =
    "sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e"
    "797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0";

  string imagePath = path::join(flags.appc_store_dir, "images", imageId);

  ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp")));
  ASSERT_SOME(
      os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test"));
  ASSERT_SOME(
      os::write(path::join(imagePath, "manifest"), stringify(manifest)));

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner.get()->recover({}, {}));

  // Simulate a task that requires an image.
  Image image;
  image.mutable_appc()->set_name("foo.com/bar");

  ContainerID containerId;
  containerId.set_value("12345");

  Future<string> rootfs = provisioner.get()->provision(containerId, image);
  AWAIT_READY(rootfs);

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
  EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
            Path(rootfs.get()).basename());

  Future<bool> destroy = provisioner.get()->destroy(containerId);
  AWAIT_READY(destroy);

  // One rootfs is destroyed.
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
Exemple #28
0
// A test to verify the number of processes and threads in a
// container.
TEST_F(LimitedCpuIsolatorTest, ROOT_CGROUPS_Pids_and_Tids)
{
  slave::Flags flags;
  flags.cgroups_cpu_enable_pids_and_tids_count = true;

  Try<Isolator*> isolator = CgroupsCpushareIsolatorProcess::create(flags);
  CHECK_SOME(isolator);

  Try<Launcher*> launcher = LinuxLauncher::create(flags);
  CHECK_SOME(launcher);

  ExecutorInfo executorInfo;
  executorInfo.mutable_resources()->CopyFrom(
      Resources::parse("cpus:0.5;mem:512").get());

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  // Use a relative temporary directory so it gets cleaned up
  // automatically with the test.
  Try<string> dir = os::mkdtemp(path::join(os::getcwd(), "XXXXXX"));
  ASSERT_SOME(dir);

  ContainerConfig containerConfig;
  containerConfig.mutable_executor_info()->CopyFrom(executorInfo);
  containerConfig.set_directory(dir.get());

  Future<Option<ContainerLaunchInfo>> prepare =
    isolator.get()->prepare(
        containerId,
        containerConfig);

  AWAIT_READY(prepare);

  // Right after the creation of the cgroup, which happens in
  // 'prepare', we check that it is empty.
  Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
  AWAIT_READY(usage);
  EXPECT_EQ(0U, usage.get().processes());
  EXPECT_EQ(0U, usage.get().threads());

  int pipes[2];
  ASSERT_NE(-1, ::pipe(pipes));

  vector<string> argv(1);
  argv[0] = "cat";

  Try<pid_t> pid = launcher.get()->fork(
      containerId,
      "cat",
      argv,
      Subprocess::FD(STDIN_FILENO),
      Subprocess::FD(STDOUT_FILENO),
      Subprocess::FD(STDERR_FILENO),
      None(),
      None(),
      lambda::bind(&childSetup, pipes),
      prepare.get().isSome() ? prepare.get().get().namespaces() : 0);

  ASSERT_SOME(pid);

  // Reap the forked child.
  Future<Option<int>> status = process::reap(pid.get());

  // Continue in the parent.
  ASSERT_SOME(os::close(pipes[0]));

  // Before isolation, the cgroup is empty.
  usage = isolator.get()->usage(containerId);
  AWAIT_READY(usage);
  EXPECT_EQ(0U, usage.get().processes());
  EXPECT_EQ(0U, usage.get().threads());

  // Isolate the forked child.
  AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

  // After the isolation, the cgroup is not empty, even though the
  // process hasn't exec'd yet.
  usage = isolator.get()->usage(containerId);
  AWAIT_READY(usage);
  EXPECT_EQ(1U, usage.get().processes());
  EXPECT_EQ(1U, usage.get().threads());

  // Now signal the child to continue.
  char dummy;
  ASSERT_LT(0, ::write(pipes[1], &dummy, sizeof(dummy)));

  ASSERT_SOME(os::close(pipes[1]));

  // Process count should be 1 since 'sleep' is still sleeping.
  usage = isolator.get()->usage(containerId);
  AWAIT_READY(usage);
  EXPECT_EQ(1U, usage.get().processes());
  EXPECT_EQ(1U, usage.get().threads());

  // Ensure all processes are killed.
  AWAIT_READY(launcher.get()->destroy(containerId));

  // Wait for the command to complete.
  AWAIT_READY(status);

  // After the process is killed, the cgroup should be empty again.
  usage = isolator.get()->usage(containerId);
  AWAIT_READY(usage);
  EXPECT_EQ(0U, usage.get().processes());
  EXPECT_EQ(0U, usage.get().threads());

  // Let the isolator clean up.
  AWAIT_READY(isolator.get()->cleanup(containerId));

  delete isolator.get();
  delete launcher.get();
}
Exemple #29
0
TEST_F(PerfEventIsolatorTest, ROOT_CGROUPS_Sample)
{
  Flags flags;

  flags.perf_events = "cycles,task-clock";
  flags.perf_duration = Milliseconds(250);
  flags.perf_interval = Milliseconds(500);

  Try<Isolator*> isolator = CgroupsPerfEventIsolatorProcess::create(flags);
  CHECK_SOME(isolator);

  ExecutorInfo executorInfo;

  ContainerID containerId;
  containerId.set_value("test");

  AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));

  // This first sample is likely to be empty because perf hasn't
  // completed yet but we should still have the required fields.
  Future<ResourceStatistics> statistics1 = isolator.get()->usage(containerId);
  AWAIT_READY(statistics1);
  ASSERT_TRUE(statistics1.get().has_perf());
  EXPECT_TRUE(statistics1.get().perf().has_timestamp());
  EXPECT_TRUE(statistics1.get().perf().has_duration());

  // Wait until we get the next sample. We use a generous timeout of
  // two seconds because we currently have a one second reap interval;
  // when running perf with perf_duration of 250ms we won't notice the
  // exit for up to one second.
  ResourceStatistics statistics2;
  Duration waited = Duration::zero();
  do {
    Future<ResourceStatistics> statistics = isolator.get()->usage(containerId);
    AWAIT_READY(statistics);

    statistics2 = statistics.get();

    ASSERT_TRUE(statistics2.has_perf());

    if (statistics1.get().perf().timestamp() !=
        statistics2.perf().timestamp()) {
      break;
    }

    os::sleep(Milliseconds(250));
    waited += Milliseconds(250);
  } while (waited < Seconds(2));

  sleep(2);

  EXPECT_NE(statistics1.get().perf().timestamp(),
            statistics2.perf().timestamp());

  EXPECT_TRUE(statistics2.perf().has_cycles());
  EXPECT_LE(0u, statistics2.perf().cycles());

  EXPECT_TRUE(statistics2.perf().has_task_clock());
  EXPECT_LE(0.0, statistics2.perf().task_clock());

  AWAIT_READY(isolator.get()->cleanup(containerId));

  delete isolator.get();
}
// This test verifies that a provisioner can recover the rootfs
// provisioned by a previous provisioner and then destroy it. Note
// that we use the copy backend in this test so Linux is not required.
TEST_F(ProvisionerAppcTest, Recover)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "copy";
  flags.work_dir = "work_dir";

  Fetcher fetcher;
  Try<Owned<Provisioner>> provisioner1 = Provisioner::create(flags, &fetcher);
  ASSERT_SOME(provisioner1);

  // Create a simple image in the store:
  // <store>
  // |--images
  //    |--<id>
  //       |--manifest
  //       |--rootfs/tmp/test
  JSON::Value manifest = JSON::parse(
      "{"
      "  \"acKind\": \"ImageManifest\","
      "  \"acVersion\": \"0.6.1\","
      "  \"name\": \"foo.com/bar\""
      "}").get();

  // The 'imageId' below has the correct format but it's not computed
  // by hashing the tarball of the image. It's OK here as we assume
  // the images under 'images' have passed such check when they are
  // downloaded and validated.
  string imageId =
    "sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e"
    "797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0";

  string imagePath = path::join(flags.appc_store_dir, "images", imageId);

  ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp")));
  ASSERT_SOME(
      os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test"));
  ASSERT_SOME(
      os::write(path::join(imagePath, "manifest"), stringify(manifest)));

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner1.get()->recover({}, {}));

  Image image;
  image.mutable_appc()->set_name("foo.com/bar");

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  Future<string> rootfs = provisioner1.get()->provision(containerId, image);
  AWAIT_READY(rootfs);

  // Create a new provisioner to recover the state from the container.
  Try<Owned<Provisioner>> provisioner2 = Provisioner::create(flags, &fetcher);
  ASSERT_SOME(provisioner2);

  mesos::slave::ContainerState state;

  // Here we are using an ExecutorInfo in the ContainerState without a
  // ContainerInfo. This is the situation where the Image is specified
  // via --default_container_info so it's not part of the recovered
  // ExecutorInfo.
  state.mutable_container_id()->CopyFrom(containerId);

  AWAIT_READY(provisioner2.get()->recover({state}, {}));

  // It's possible for the user to provision two different rootfses
  // from the same image.
  AWAIT_READY(provisioner2.get()->provision(containerId, image));

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size());

  Future<bool> destroy = provisioner2.get()->destroy(containerId);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}