// This test verifies that a provisioner can recover the rootfs
// provisioned by a previous provisioner and then destroy it. Note
// that we use the copy backend in this test so Linux is not required.
TEST_F(ProvisionerAppcTest, Recover)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "copy";
  flags.work_dir = "work_dir";

  Try<Owned<Provisioner>> provisioner1 = Provisioner::create(flags);
  ASSERT_SOME(provisioner1);

  Try<string> createImage = createTestImage(
      flags.appc_store_dir,
      getManifest());

  ASSERT_SOME(createImage);

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner1.get()->recover({}, {}));

  Image image;
  image.mutable_appc()->CopyFrom(getTestImage());

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  Future<slave::ProvisionInfo> provisionInfo =
    provisioner1.get()->provision(containerId, image);
  AWAIT_READY(provisionInfo);

  // Create a new provisioner to recover the state from the container.
  Try<Owned<Provisioner>> provisioner2 = Provisioner::create(flags);
  ASSERT_SOME(provisioner2);

  mesos::slave::ContainerState state;

  // Here we are using an ExecutorInfo in the ContainerState without a
  // ContainerInfo. This is the situation where the Image is specified
  // via --default_container_info so it's not part of the recovered
  // ExecutorInfo.
  state.mutable_container_id()->CopyFrom(containerId);

  AWAIT_READY(provisioner2.get()->recover({state}, {}));

  // It's possible for the user to provision two different rootfses
  // from the same image.
  AWAIT_READY(provisioner2.get()->provision(containerId, image));

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size());

  Future<bool> destroy = provisioner2.get()->destroy(containerId);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
Beispiel #2
0
Try<RunState> RunState::recover(
    const string& rootDir,
    const SlaveID& slaveId,
    const FrameworkID& frameworkId,
    const ExecutorID& executorId,
    const ContainerID& containerId,
    bool strict)
{
  RunState state;
  state.id = containerId;
  string message;

  // See if the sentinel file exists. This is done first so it is
  // known even if partial state is returned, e.g., if the libprocess
  // pid file is not recovered. It indicates the slave removed the
  // executor.
  string path = paths::getExecutorSentinelPath(
      rootDir, slaveId, frameworkId, executorId, containerId);

  state.completed = os::exists(path);

  // Find the tasks.
  Try<list<string> > tasks = paths::getTaskPaths(
      rootDir,
      slaveId,
      frameworkId,
      executorId,
      containerId);

  if (tasks.isError()) {
    return Error(
        "Failed to find tasks for executor run " + containerId.value() +
        ": " + tasks.error());
  }

  // Recover tasks.
  foreach (const string& path, tasks.get()) {
    TaskID taskId;
    taskId.set_value(Path(path).basename());

    Try<TaskState> task = TaskState::recover(
        rootDir, slaveId, frameworkId, executorId, containerId, taskId, strict);

    if (task.isError()) {
      return Error(
          "Failed to recover task " + taskId.value() + ": " + task.error());
    }

    state.tasks[taskId] = task.get();
    state.errors += task.get().errors;
  }

  // Read the forked pid.
  path = paths::getForkedPidPath(
      rootDir, slaveId, frameworkId, executorId, containerId);
  if (!os::exists(path)) {
    // This could happen if the slave died before the isolator
    // checkpointed the forked pid.
    LOG(WARNING) << "Failed to find executor forked pid file '" << path << "'";
    return state;
  }

  Try<string> pid = os::read(path);

  if (pid.isError()) {
    message = "Failed to read executor forked pid from '" + path +
              "': " + pid.error();

    if (strict) {
      return Error(message);
    } else {
      LOG(WARNING) << message;
      state.errors++;
      return state;
    }
  }

  if (pid.get().empty()) {
    // This could happen if the slave died after opening the file for
    // writing but before it checkpointed anything.
    LOG(WARNING) << "Found empty executor forked pid file '" << path << "'";
    return state;
  }

  Try<pid_t> forkedPid = numify<pid_t>(pid.get());
  if (forkedPid.isError()) {
    return Error("Failed to parse forked pid " + pid.get() +
                 ": " + forkedPid.error());
  }

  state.forkedPid = forkedPid.get();

  // Read the libprocess pid.
  path = paths::getLibprocessPidPath(
      rootDir, slaveId, frameworkId, executorId, containerId);

  if (!os::exists(path)) {
    // This could happen if the slave died before the executor
    // registered with the slave.
    LOG(WARNING)
      << "Failed to find executor libprocess pid file '" << path << "'";
    return state;
  }

  pid = os::read(path);

  if (pid.isError()) {
    message = "Failed to read executor libprocess pid from '" + path +
              "': " + pid.error();

    if (strict) {
      return Error(message);
    } else {
      LOG(WARNING) << message;
      state.errors++;
      return state;
    }
  }

  if (pid.get().empty()) {
    // This could happen if the slave died after opening the file for
    // writing but before it checkpointed anything.
    LOG(WARNING) << "Found empty executor libprocess pid file '" << path << "'";
    return state;
  }

  state.libprocessPid = process::UPID(pid.get());

  return state;
}
// This test verifies that a provisioner can recover the rootfs
// provisioned by a previous provisioner and then destroy it. Note
// that we use the copy backend in this test so Linux is not required.
TEST_F(ProvisionerAppcTest, Recover)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = COPY_BACKEND;
  flags.work_dir = path::join(sandbox.get(), "work_dir");

  Try<Owned<Provisioner>> provisioner = Provisioner::create(flags);
  ASSERT_SOME(provisioner);

  Try<string> createImage = createTestImage(
      flags.appc_store_dir,
      getManifest());

  ASSERT_SOME(createImage);

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner.get()->recover({}));

  Image image;
  image.mutable_appc()->CopyFrom(getTestImage());

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  Future<slave::ProvisionInfo> provisionInfo =
    provisioner.get()->provision(containerId, image);
  AWAIT_READY(provisionInfo);

  provisioner->reset();

  // Create a new provisioner to recover the state from the container.
  provisioner = Provisioner::create(flags);
  ASSERT_SOME(provisioner);

  AWAIT_READY(provisioner.get()->recover({containerId}));

  // It's possible for the user to provision two different rootfses
  // from the same image.
  AWAIT_READY(provisioner.get()->provision(containerId, image));

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size());

  Future<bool> destroy = provisioner.get()->destroy(containerId);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
TEST(AgentCallValidationTest, LaunchNestedContainerSession)
{
  // Missing `launch_nested_container_session`.
  agent::Call call;
  call.set_type(agent::Call::LAUNCH_NESTED_CONTAINER_SESSION);

  Option<Error> error = validation::agent::call::validate(call);
  EXPECT_SOME(error);

  // `container_id` is not valid.
  ContainerID badContainerId;
  badContainerId.set_value("no spaces allowed");

  agent::Call::LaunchNestedContainerSession* launch =
    call.mutable_launch_nested_container_session();

  launch->mutable_container_id()->CopyFrom(badContainerId);

  error = validation::agent::call::validate(call);
  EXPECT_SOME(error);

  // Valid `container_id` but missing `container_id.parent`.
  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  launch->mutable_container_id()->CopyFrom(containerId);

  error = validation::agent::call::validate(call);
  EXPECT_SOME(error);

  // Valid `container_id.parent` but invalid `command.environment`. Set
  // an invalid environment variable to check that the common validation
  // code for the command's environment is being executed.
  ContainerID parentContainerId;
  parentContainerId.set_value(UUID::random().toString());

  launch->mutable_container_id()->mutable_parent()->CopyFrom(parentContainerId);
  launch->mutable_command()->CopyFrom(createCommandInfo("exit 0"));

  Environment::Variable* variable = launch
    ->mutable_command()
    ->mutable_environment()
    ->mutable_variables()
    ->Add();
  variable->set_name("ENV_VAR_KEY");
  variable->set_type(mesos::Environment::Variable::VALUE);

  error = validation::agent::call::validate(call);
  EXPECT_SOME(error);
  EXPECT_EQ(
      "'launch_nested_container_session.command' is invalid: Environment "
      "variable 'ENV_VAR_KEY' of type 'VALUE' must have a value set",
      error->message);

  // Test the valid case.
  variable->set_value("env_var_value");
  error = validation::agent::call::validate(call);
  EXPECT_NONE(error);

  // Any number of parents is valid.
  ContainerID grandparentContainerId;
  grandparentContainerId.set_value(UUID::random().toString());

  launch->mutable_container_id()->mutable_parent()->mutable_parent()->CopyFrom(
      grandparentContainerId);

  error = validation::agent::call::validate(call);
  EXPECT_NONE(error);
}
// This test verifies that sandbox path volume allows two containers
// nested under the same parent container to share data.
// TODO(jieyu): Parameterize this test to test both linux and posix
// launcher and filesystem isolator.
TEST_F(VolumeSandboxPathIsolatorTest, SharedVolume)
{
  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "volume/sandbox_path";

  Fetcher fetcher;

  Try<MesosContainerizer*> create = MesosContainerizer::create(
      flags,
      true,
      &fetcher);

  ASSERT_SOME(create);

  Owned<MesosContainerizer> containerizer(create.get());

  SlaveState state;
  state.id = SlaveID();

  AWAIT_READY(containerizer->recover(state));

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  ExecutorInfo executor = createExecutorInfo("executor", "sleep 99", "cpus:1");

  Try<string> directory = environment->mkdtemp();
  ASSERT_SOME(directory);

  Future<bool> launch = containerizer->launch(
      containerId,
      None(),
      executor,
      directory.get(),
      None(),
      state.id,
      map<string, string>(),
      true); // TODO(benh): Ever want to check not-checkpointing?

  AWAIT_ASSERT_TRUE(launch);

  ContainerID nestedContainerId1;
  nestedContainerId1.mutable_parent()->CopyFrom(containerId);
  nestedContainerId1.set_value(UUID::random().toString());

  ContainerInfo containerInfo;
  containerInfo.set_type(ContainerInfo::MESOS);

  Volume* volume = containerInfo.add_volumes();
  volume->set_mode(Volume::RW);
  volume->set_container_path("parent");

  Volume::Source* source = volume->mutable_source();
  source->set_type(Volume::Source::SANDBOX_PATH);

  Volume::Source::SandboxPath* sandboxPath = source->mutable_sandbox_path();
  sandboxPath->set_type(Volume::Source::SandboxPath::PARENT);
  sandboxPath->set_path("shared");

  launch = containerizer->launch(
      nestedContainerId1,
      createCommandInfo("touch parent/file; sleep 1000"),
      containerInfo,
      None(),
      state.id);

  AWAIT_ASSERT_TRUE(launch);

  ContainerID nestedContainerId2;
  nestedContainerId2.mutable_parent()->CopyFrom(containerId);
  nestedContainerId2.set_value(UUID::random().toString());

  launch = containerizer->launch(
      nestedContainerId2,
      createCommandInfo(
        "while true; do if [ -f parent/file ]; then exit 0; fi; done"),
      containerInfo,
      None(),
      state.id);

  AWAIT_ASSERT_TRUE(launch);

  Future<Option<ContainerTermination>> wait =
    containerizer->wait(nestedContainerId2);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait.get()->has_status());
  EXPECT_WEXITSTATUS_EQ(0, wait.get()->status());

  wait = containerizer->wait(containerId);

  containerizer->destroy(containerId);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait.get()->has_status());
  EXPECT_WTERMSIG_EQ(SIGKILL, wait.get()->status());
}
Beispiel #6
0
  Future<bool> launch(
      const ContainerID& containerId,
      const ContainerConfig& containerConfig,
      const map<string, string>& environment,
      const Option<string>& pidCheckpointPath)
  {
    CHECK(!terminatedContainers.contains(containerId))
      << "Failed to launch nested container " << containerId
      << " for executor '" << containerConfig.executor_info().executor_id()
      << "' of framework " << containerConfig.executor_info().framework_id()
      << " because this ContainerID is being re-used with"
      << " a previously terminated container";

    CHECK(!containers_.contains(containerId))
      << "Failed to launch container " << containerId
      << " for executor '" << containerConfig.executor_info().executor_id()
      << "' of framework " << containerConfig.executor_info().framework_id()
      << " because it is already launched";

    containers_[containerId] = Owned<ContainerData>(new ContainerData());

    if (containerId.has_parent()) {
      // Launching a nested container via the test containerizer is a
      // no-op for now.
      return true;
    }

    CHECK(executors.contains(containerConfig.executor_info().executor_id()))
      << "Failed to launch executor '"
      << containerConfig.executor_info().executor_id()
      << "' of framework " << containerConfig.executor_info().framework_id()
      << " because it is unknown to the containerizer";

    containers_.at(containerId)->executorId =
      containerConfig.executor_info().executor_id();

    containers_.at(containerId)->frameworkId =
      containerConfig.executor_info().framework_id();

    // We need to synchronize all reads and writes to the environment
    // as this is global state.
    //
    // TODO(jmlvanre): Even this is not sufficient, as other aspects
    // of the code may read an environment variable while we are
    // manipulating it. The better solution is to pass the environment
    // variables into the fork, or to set them on the command line.
    // See MESOS-3475.
    static std::mutex mutex;

    synchronized(mutex) {
      // Since the constructor for `MesosExecutorDriver` reads
      // environment variables to load flags, even it needs to
      // be within this synchronization section.
      //
      // Prepare additional environment variables for the executor.
      // TODO(benh): Need to get flags passed into the TestContainerizer
      // in order to properly use here.
      slave::Flags flags;
      flags.recovery_timeout = Duration::zero();

      // We need to save the original set of environment variables so we
      // can reset the environment after calling 'driver->start()' below.
      hashmap<string, string> original = os::environment();

      foreachpair (const string& name, const string& variable, environment) {
        os::setenv(name, variable);
      }

      // TODO(benh): Can this be removed and done exclusively in the
      // 'executorEnvironment()' function? There are other places in the
      // code where we do this as well and it's likely we can do this once
      // in 'executorEnvironment()'.
      foreach (const Environment::Variable& variable,
               containerConfig.executor_info()
                 .command().environment().variables()) {
        os::setenv(variable.name(), variable.value());
      }

      os::setenv("MESOS_LOCAL", "1");

      const Owned<ExecutorData>& executorData =
        executors.at(containerConfig.executor_info().executor_id());

      if (executorData->executor != nullptr) {
        executorData->driver = Owned<MesosExecutorDriver>(
            new MesosExecutorDriver(executorData->executor));
        executorData->driver->start();
      } else {
        shared_ptr<v1::MockHTTPExecutor> executor =
          executorData->v1ExecutorMock;
        executorData->v1Library = Owned<v1::executor::TestMesos>(
          new v1::executor::TestMesos(ContentType::PROTOBUF, executor));
      }

      os::unsetenv("MESOS_LOCAL");

      // Unset the environment variables we set by resetting them to their
      // original values and also removing any that were not part of the
      // original environment.
      foreachpair (const string& name, const string& value, original) {
        os::setenv(name, value);
      }
// This test verifies that the provisioner can provision an rootfs
// from an image that is already put into the store directory.
TEST_F(ProvisionerAppcTest, ROOT_Provision)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "bind";
  flags.work_dir = "work_dir";

  Fetcher fetcher;

  Try<Owned<Provisioner>> provisioner = Provisioner::create(flags, &fetcher);
  ASSERT_SOME(provisioner);

  // Create a simple image in the store:
  // <store>
  // |--images
  //    |--<id>
  //       |--manifest
  //       |--rootfs/tmp/test
  JSON::Value manifest = JSON::parse(
      "{"
      "  \"acKind\": \"ImageManifest\","
      "  \"acVersion\": \"0.6.1\","
      "  \"name\": \"foo.com/bar\","
      "  \"labels\": ["
      "    {"
      "      \"name\": \"version\","
      "      \"value\": \"1.0.0\""
      "    },"
      "    {"
      "      \"name\": \"arch\","
      "      \"value\": \"amd64\""
      "    },"
      "    {"
      "      \"name\": \"os\","
      "      \"value\": \"linux\""
      "    }"
      "  ],"
      "  \"annotations\": ["
      "    {"
      "      \"name\": \"created\","
      "      \"value\": \"1438983392\""
      "    }"
      "  ]"
      "}").get();

  // The 'imageId' below has the correct format but it's not computed
  // by hashing the tarball of the image. It's OK here as we assume
  // the images under 'images' have passed such check when they are
  // downloaded and validated.
  string imageId =
    "sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e"
    "797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0";

  string imagePath = path::join(flags.appc_store_dir, "images", imageId);

  ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp")));
  ASSERT_SOME(
      os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test"));
  ASSERT_SOME(
      os::write(path::join(imagePath, "manifest"), stringify(manifest)));

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner.get()->recover({}, {}));

  // Simulate a task that requires an image.
  Image image;
  image.mutable_appc()->set_name("foo.com/bar");

  ContainerID containerId;
  containerId.set_value("12345");

  Future<string> rootfs = provisioner.get()->provision(containerId, image);
  AWAIT_READY(rootfs);

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
  EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
            Path(rootfs.get()).basename());

  Future<bool> destroy = provisioner.get()->destroy(containerId);
  AWAIT_READY(destroy);

  // One rootfs is destroyed.
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
Beispiel #8
0
Future<Nothing> NetworkCniIsolatorProcess::_attach(
    const ContainerID& containerId,
    const string& networkName,
    const string& plugin,
    const tuple<Future<Option<int>>, Future<string>>& t)
{
  CHECK(infos.contains(containerId));
  CHECK(infos[containerId]->containerNetworks.contains(networkName));

  Future<Option<int>> status = std::get<0>(t);
  if (!status.isReady()) {
    return Failure(
        "Failed to get the exit status of the CNI plugin '" +
        plugin + "' subprocess: " +
        (status.isFailed() ? status.failure() : "discarded"));
  }

  if (status->isNone()) {
    return Failure(
        "Failed to reap the CNI plugin '" + plugin + "' subprocess");
  }

  // CNI plugin will print result (in case of success) or error (in
  // case of failure) to stdout.
  Future<string> output = std::get<1>(t);
  if (!output.isReady()) {
    return Failure(
        "Failed to read stdout from the CNI plugin '" +
        plugin + "' subprocess: " +
        (output.isFailed() ? output.failure() : "discarded"));
  }

  if (status.get() != 0) {
    return Failure(
        "The CNI plugin '" + plugin + "' failed to attach container " +
        containerId.value() + " to CNI network '" + networkName +
        "': " + output.get());
  }

  // Parse the output of CNI plugin.
  Try<spec::NetworkInfo> parse = spec::parseNetworkInfo(output.get());
  if (parse.isError()) {
    return Failure(
        "Failed to parse the output of the CNI plugin '" +
        plugin + "': " + parse.error());
  }

  if (parse.get().has_ip4()) {
    LOG(INFO) << "Got assigned IPv4 address '" << parse.get().ip4().ip()
              << "' from CNI network '" << networkName
              << "' for container " << containerId;
  }

  if (parse.get().has_ip6()) {
    LOG(INFO) << "Got assigned IPv6 address '" << parse.get().ip6().ip()
              << "' from CNI network '" << networkName
              << "' for container " << containerId;
  }

  // Checkpoint the output of CNI plugin.
  // The destruction of the container cannot happen in the middle of
  // 'attach()' and '_attach()' because the containerizer will wait
  // for 'isolate()' to finish before destroying the container.
  ContainerNetwork& containerNetwork =
      infos[containerId]->containerNetworks[networkName];

  const string networkInfoPath = paths::getNetworkInfoPath(
      rootDir.get(),
      containerId.value(),
      networkName,
      containerNetwork.ifName);

  Try<Nothing> write = os::write(networkInfoPath, output.get());
  if (write.isError()) {
    return Failure(
        "Failed to checkpoint the output of CNI plugin'" +
        output.get() + "': " + write.error());
  }

  containerNetwork.cniNetworkInfo = parse.get();

  return Nothing();
}
Beispiel #9
0
Future<ExecutorInfo> ExternalContainerizerProcess::launch(
    const ContainerID& containerId,
    const TaskInfo& taskInfo,
    const FrameworkID& frameworkId,
    const std::string& directory,
    const Option<std::string>& user,
    const SlaveID& slaveId,
    const PID<Slave>& slavePid,
    bool checkpoint)
{
  LOG(INFO) << "Launching container '" << containerId << "'";

  // Get the executor from our task. If no executor is associated with
  // the given task, this function renders an ExecutorInfo using the
  // mesos-executor as its command.
  ExecutorInfo executor = containerExecutorInfo(flags, taskInfo, frameworkId);
  executor.mutable_resources()->MergeFrom(taskInfo.resources());

  if (containers.contains(containerId)) {
    return Failure("Cannot start already running container '"
      + containerId.value() + "'");
  }

  sandboxes.put(containerId, Owned<Sandbox>(new Sandbox(directory, user)));

  map<string, string> environment = executorEnvironment(
      executor,
      directory,
      slaveId,
      slavePid,
      checkpoint,
      flags.recovery_timeout);

  if (!flags.hadoop_home.empty()) {
    environment["HADOOP_HOME"] = flags.hadoop_home;
  }

  TaskInfo task;
  task.CopyFrom(taskInfo);
  CommandInfo* command = task.has_executor()
    ? task.mutable_executor()->mutable_command()
    : task.mutable_command();
  // When the selected command has no container attached, use the
  // default from the slave startup flags, if available.
  if (!command->has_container()) {
    if (flags.default_container_image.isSome()) {
      command->mutable_container()->set_image(
          flags.default_container_image.get());
    } else {
      LOG(INFO) << "No container specified in task and no default given. "
                << "The external containerizer will have to fill in "
                << "defaults.";
    }
  }

  ExternalTask external;
  external.mutable_task()->CopyFrom(task);
  external.set_mesos_executor_path(
      path::join(flags.launcher_dir, "mesos-executor"));

  stringstream output;
  external.SerializeToOstream(&output);

  Try<Subprocess> invoked = invoke(
      "launch",
      containerId,
      output.str(),
      environment);

  if (invoked.isError()) {
    return Failure("Launch of container '" + containerId.value()
      + "' failed (error: " + invoked.error() + ")");
  }

  // Record the process.
  containers.put(
      containerId,
      Owned<Container>(new Container(invoked.get().pid())));

  VLOG(2) << "Now awaiting data from pipe...";

  // Read from the result-pipe and invoke callbacks when reaching EOF.
  return await(read(invoked.get().out()), invoked.get().status())
    .then(defer(
        PID<ExternalContainerizerProcess>(this),
        &ExternalContainerizerProcess::_launch,
        containerId,
        frameworkId,
        executor,
        slaveId,
        checkpoint,
        lambda::_1));
}
Beispiel #10
0
inline std::size_t hash_value(const ContainerID& containerId)
{
  size_t seed = 0;
  boost::hash_combine(seed, containerId.value());
  return seed;
}
Beispiel #11
0
Future<Nothing> NetworkCniIsolatorProcess::attach(
    const ContainerID& containerId,
    const std::string& networkName,
    const std::string& netNsHandle)
{
  CHECK(infos.contains(containerId));
  CHECK(infos[containerId]->containerNetworks.contains(networkName));

  const ContainerNetwork& containerNetwork =
      infos[containerId]->containerNetworks[networkName];

  const string ifDir = paths::getInterfaceDir(
      rootDir.get(),
      containerId.value(),
      networkName,
      containerNetwork.ifName);

  Try<Nothing> mkdir = os::mkdir(ifDir);
  if (mkdir.isError()) {
    return Failure(
        "Failed to create interface directory for the interface '" +
        containerNetwork.ifName + "' of the network '" +
        containerNetwork.networkName + "': "+ mkdir.error());
  }

  // Prepare environment variables for CNI plugin.
  map<string, string> environment;
  environment["CNI_COMMAND"] = "ADD";
  environment["CNI_CONTAINERID"] = containerId.value();
  environment["CNI_PATH"] = pluginDir.get();
  environment["CNI_IFNAME"] = containerNetwork.ifName;
  environment["CNI_NETNS"] = netNsHandle;

  // Some CNI plugins need to run "iptables" to set up IP Masquerade,
  // so we need to set the "PATH" environment variable so that the
  // plugin can locate the "iptables" executable file.
  Option<string> value = os::getenv("PATH");
  if (value.isSome()) {
    environment["PATH"] = value.get();
  } else {
    environment["PATH"] =
        "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin";
  }

  const NetworkConfigInfo& networkConfig =
      networkConfigs[containerNetwork.networkName];

  // Invoke the CNI plugin.
  const string& plugin = networkConfig.config.type();
  Try<Subprocess> s = subprocess(
      path::join(pluginDir.get(), plugin),
      {plugin},
      Subprocess::PATH(networkConfig.path),
      Subprocess::PIPE(),
      Subprocess::PATH("/dev/null"),
      NO_SETSID,
      None(),
      environment);

  if (s.isError()) {
    return Failure(
        "Failed to execute the CNI plugin '" + plugin + "': " + s.error());
  }

  return await(s->status(), io::read(s->out().get()))
    .then(defer(
        PID<NetworkCniIsolatorProcess>(this),
        &NetworkCniIsolatorProcess::_attach,
        containerId,
        networkName,
        plugin,
        lambda::_1));
}
// This test verifies that the image specified in the volume will be
// properly provisioned and mounted into the container if container
// root filesystem is not specified.
TEST_P(VolumeImageIsolatorTest, ROOT_ImageInVolumeWithoutRootFilesystem)
{
  string registry = path::join(sandbox.get(), "registry");
  AWAIT_READY(DockerArchive::create(registry, "test_image"));

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "filesystem/linux,volume/image,docker/runtime";
  flags.docker_registry = registry;
  flags.docker_store_dir = path::join(sandbox.get(), "store");
  flags.image_providers = "docker";

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, true, &fetcher);

  ASSERT_SOME(create);

  Owned<Containerizer> containerizer(create.get());

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  ContainerInfo container = createContainerInfo(
      None(),
      {createVolumeFromDockerImage("rootfs", "test_image", Volume::RW)});

  CommandInfo command = createCommandInfo("test -d rootfs/bin");

  ExecutorInfo executor = createExecutorInfo(
      "test_executor",
      nesting ? createCommandInfo("sleep 1000") : command);

  if (!nesting) {
    executor.mutable_container()->CopyFrom(container);
  }

  string directory = path::join(flags.work_dir, "sandbox");
  ASSERT_SOME(os::mkdir(directory));

  Future<bool> launch = containerizer->launch(
      containerId,
      None(),
      executor,
      directory,
      None(),
      SlaveID(),
      map<string, string>(),
      false);

  AWAIT_ASSERT_TRUE(launch);

  Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);

  if (nesting) {
    ContainerID nestedContainerId;
    nestedContainerId.mutable_parent()->CopyFrom(containerId);
    nestedContainerId.set_value(UUID::random().toString());

    launch = containerizer->launch(
        nestedContainerId,
        command,
        container,
        None(),
        SlaveID());

    AWAIT_ASSERT_TRUE(launch);

    wait = containerizer->wait(nestedContainerId);
  }

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait->get().has_status());
  EXPECT_WEXITSTATUS_EQ(0, wait->get().status());

  if (nesting) {
    wait = containerizer->wait(containerId);

    containerizer->destroy(containerId);

    AWAIT_READY(wait);
    ASSERT_SOME(wait.get());
    ASSERT_TRUE(wait->get().has_status());
    EXPECT_WTERMSIG_EQ(SIGKILL, wait.get()->status());
  }
}
Beispiel #13
0
Future<Nothing> LinuxFilesystemIsolatorProcess::update(
    const ContainerID& containerId,
    const Resources& resources)
{
  if (containerId.has_parent()) {
    return Failure("Not supported for nested containers");
  }

  // Mount persistent volumes. We do this in the host namespace and
  // rely on mount propagation for them to be visible inside the
  // container.
  if (!infos.contains(containerId)) {
    return Failure("Unknown container");
  }

  const Owned<Info>& info = infos[containerId];

  Resources current = info->resources;

  // We first remove unneeded persistent volumes.
  foreach (const Resource& resource, current.persistentVolumes()) {
    // This is enforced by the master.
    CHECK(resource.disk().has_volume());

    // Ignore absolute and nested paths.
    const string& containerPath = resource.disk().volume().container_path();
    if (strings::contains(containerPath, "/")) {
      LOG(WARNING) << "Skipping updating mount for persistent volume "
                   << resource << " of container " << containerId
                   << " because the container path '" << containerPath
                   << "' contains slash";
      continue;
    }

    if (resources.contains(resource)) {
      continue;
    }

    // Determine the target of the mount.
    string target = path::join(info->directory, containerPath);

    LOG(INFO) << "Removing mount '" << target << "' for persistent volume "
              << resource << " of container " << containerId;

    // The unmount will fail if the task/executor is still using files
    // or directories under 'target'.
    Try<Nothing> unmount = fs::unmount(target);
    if (unmount.isError()) {
      return Failure(
          "Failed to unmount unneeded persistent volume at '" +
          target + "': " + unmount.error());
    }

    // NOTE: This is a non-recursive rmdir.
    Try<Nothing> rmdir = os::rmdir(target, false);
    if (rmdir.isError()) {
      return Failure(
          "Failed to remove persistent volume mount point at '" +
          target + "': " + rmdir.error());
    }
  }

  // Get user and group info for this task based on the task's sandbox.
  struct stat s;
  if (::stat(info->directory.c_str(), &s) < 0) {
    return Failure("Failed to get ownership for '" + info->directory +
                   "': " + os::strerror(errno));
  }

  const uid_t uid = s.st_uid;
  const gid_t gid = s.st_gid;

  // We then mount new persistent volumes.
  foreach (const Resource& resource, resources.persistentVolumes()) {
    // This is enforced by the master.
    CHECK(resource.disk().has_volume());

    // Ignore absolute and nested paths.
    const string& containerPath = resource.disk().volume().container_path();
    if (strings::contains(containerPath, "/")) {
      LOG(WARNING) << "Skipping updating mount for persistent volume "
                   << resource << " of container " << containerId
                   << " because the container path '" << containerPath
                   << "' contains slash";
      continue;
    }

    if (current.contains(resource)) {
      continue;
    }

    // Determine the source of the mount.
    string source = paths::getPersistentVolumePath(flags.work_dir, resource);

    bool isVolumeInUse = false;

    foreachvalue (const Owned<Info>& info, infos) {
      if (info->resources.contains(resource)) {
        isVolumeInUse = true;
        break;
      }
    }

    // Set the ownership of the persistent volume to match that of the sandbox
    // directory if the volume is not already in use. If the volume is
    // currently in use by other containers, tasks in this container may fail
    // to read from or write to the persistent volume due to incompatible
    // ownership and file system permissions.
    if (!isVolumeInUse) {
      LOG(INFO) << "Changing the ownership of the persistent volume at '"
                << source << "' with uid " << uid << " and gid " << gid;

      Try<Nothing> chown = os::chown(uid, gid, source, false);

      if (chown.isError()) {
        return Failure(
            "Failed to change the ownership of the persistent volume at '" +
            source + "' with uid " + stringify(uid) +
            " and gid " + stringify(gid) + ": " + chown.error());
      }
    }

    // Determine the target of the mount.
    string target = path::join(info->directory, containerPath);

    if (os::exists(target)) {
      // NOTE: There are two scenarios that we may have the mount
      // target existed:
      // 1. This is possible because 'info->resources' will be reset
      //    when slave restarts and recovers. When the slave calls
      //    'containerizer->update' after the executor re-registers,
      //    we'll try to re-mount all the already mounted volumes.
      // 2. There may be multiple references to the persistent
      //    volume's mount target. E.g., a host volume and a
      //    persistent volume are both specified, and the source
      //    of the host volume is the same as the container path
      //    of the persistent volume.

      // Check the source of the mount matches the entry with the
      // same target in the mount table if one can be found. If
      // not, mount the persistent volume as we did below. This is
      // possible because the slave could crash after it unmounts the
      // volume but before it is able to delete the mount point.
      Try<fs::MountInfoTable> table = fs::MountInfoTable::read();
      if (table.isError()) {
        return Failure("Failed to get mount table: " + table.error());
      }

      // Check a particular persistent volume is mounted or not.
      bool volumeMounted = false;

      foreach (const fs::MountInfoTable::Entry& entry, table->entries) {
        // TODO(gilbert): Check source of the mount matches the entry's
        // root. Note that the root is relative to the root of its parent
        // mount. See:
        // http://man7.org/linux/man-pages/man5/proc.5.html
        if (target == entry.target) {
          volumeMounted = true;
          break;
        }
      }

      if (volumeMounted) {
        continue;
      }
    }

    Try<Nothing> mkdir = os::mkdir(target);
    if (mkdir.isError()) {
      return Failure(
          "Failed to create persistent volume mount point at '" +
          target + "': " + mkdir.error());
    }

    LOG(INFO) << "Mounting '" << source << "' to '" << target
              << "' for persistent volume " << resource
              << " of container " << containerId;

    Try<Nothing> mount = fs::mount(source, target, None(), MS_BIND, nullptr);
    if (mount.isError()) {
      return Failure(
          "Failed to mount persistent volume from '" +
          source + "' to '" + target + "': " + mount.error());
    }

    // If the mount needs to be read-only, do a remount.
    if (resource.disk().volume().mode() == Volume::RO) {
      mount = fs::mount(
          None(), target, None(), MS_BIND | MS_RDONLY | MS_REMOUNT, nullptr);

      if (mount.isError()) {
        return Failure(
            "Failed to remount persistent volume as read-only from '" +
            source + "' to '" + target + "': " + mount.error());
      }
    }
  }

  // Store the new resources;
  info->resources = resources;

  return Nothing();
}
// This test verifies that the provisioner can recover the rootfses
// for the child containers if there is no image specified for its
// parent container.
TEST_F(ProvisionerAppcTest, RecoverNestedContainerNoParentImage)
{
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = COPY_BACKEND;
  flags.work_dir = path::join(sandbox.get(), "work_dir");

  Try<Owned<Provisioner>> provisioner = Provisioner::create(flags);
  ASSERT_SOME(provisioner);

  Try<string> createImage = createTestImage(
      flags.appc_store_dir,
      getManifest());

  ASSERT_SOME(createImage);

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner.get()->recover({}));

  Image image;
  image.mutable_appc()->CopyFrom(getTestImage());

  ContainerID parent;
  ContainerID child;

  parent.set_value(UUID::random().toString());
  child.set_value(UUID::random().toString());
  child.mutable_parent()->CopyFrom(parent);

  AWAIT_READY(provisioner.get()->provision(child, image));

  provisioner->reset();

  // Create a new provisioner to recover the state from the container.
  provisioner = Provisioner::create(flags);
  ASSERT_SOME(provisioner);

  AWAIT_READY(provisioner.get()->recover({parent, child}));
  AWAIT_READY(provisioner.get()->provision(child, image));

  const string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        child);

  Future<bool> destroy = provisioner.get()->destroy(child);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());
  EXPECT_FALSE(os::exists(containerDir));

  containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        parent);

  destroy = provisioner.get()->destroy(parent);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());
  EXPECT_FALSE(os::exists(containerDir));
}
Beispiel #15
0
bool operator==(const ContainerID& left, const ContainerID& right)
{
  return left.value() == right.value() &&
    left.has_parent() == right.has_parent() &&
    (!left.has_parent() || left.parent() == right.parent());
}
Beispiel #16
0
Future<ExecutorInfo> ExternalContainerizerProcess::_launch(
    const ContainerID& containerId,
    const FrameworkID& frameworkId,
    const ExecutorInfo executorInfo,
    const SlaveID& slaveId,
    bool checkpoint,
    const Future<ResultFutures>& future)
{
  VLOG(1) << "Launch callback triggered on container '" << containerId << "'";

  if (!containers.contains(containerId)) {
    return Failure("Container '" + containerId.value() + "' not running");
  }

  string result;
  Try<bool> support = commandSupported(future, result);
  if (support.isError()) {
    terminate(containerId);
    return Failure(support.error());
  }

  if (!support.get()) {
    // We generally need to use an internal implementation in these
    // cases.
    // For the specific case of a launch however, there can not be an
    // internal implementation for a external containerizer, hence
    // we need to fail or even abort at this point.
    // TODO(tillt): Consider using posix-isolator as a fall back.
    terminate(containerId);
    return Failure("External containerizer does not support launch");
  }

  VLOG(1) << "Launch supported by external containerizer";

  ExternalStatus ps;
  if (!ps.ParseFromString(result)) {
    // TODO(tillt): Consider not terminating the containerizer due
    // to protocol breach but only fail the operation.
    terminate(containerId);
    return Failure("Could not parse launch result protobuf (error: "
      + protobufError(ps) + ")");
  }

  VLOG(2) << "Launch result: '" << ps.message() << "'";
  VLOG(2) << "Executor pid: " << ps.pid();

  containers[containerId]->pid = ps.pid();

  // Observe the executor process and install a callback for status
  // changes.
  process::reap(ps.pid())
    .onAny(defer(
        PID<ExternalContainerizerProcess>(this),
        &ExternalContainerizerProcess::reaped,
        containerId,
        lambda::_1));

  // Checkpoint the container's pid if requested.
  if (checkpoint) {
    const string& path = slave::paths::getForkedPidPath(
        slave::paths::getMetaRootDir(flags.work_dir),
        slaveId,
        frameworkId,
        executorInfo.executor_id(),
        containerId);

    LOG(INFO) << "Checkpointing containerized executor '" << containerId
              << "' pid " << ps.pid() << " to '" << path <<  "'";

    Try<Nothing> checkpointed =
        slave::state::checkpoint(path, stringify(ps.pid()));

    if (checkpointed.isError()) {
      terminate(containerId);
      return Failure("Failed to checkpoint containerized executor '"
        + containerId.value() + "' pid " + stringify(ps.pid()) + " to '" + path
        + "'");
    }
  }

  VLOG(1) << "Launch finishing up for container '" << containerId << "'";
  return executorInfo;
}
Beispiel #17
0
Future<Nothing> NvidiaGpuIsolatorProcess::update(
    const ContainerID& containerId,
    const Resources& resources)
{
  if (containerId.has_parent()) {
    return Failure("Not supported for nested containers");
  }

  if (!infos.contains(containerId)) {
    return Failure("Unknown container");
  }

  Info* info = CHECK_NOTNULL(infos[containerId]);

  Option<double> gpus = resources.gpus();

  // Make sure that the `gpus` resource is not fractional.
  // We rely on scalar resources only having 3 digits of precision.
  if (static_cast<long long>(gpus.getOrElse(0.0) * 1000.0) % 1000 != 0) {
    return Failure("The 'gpus' resource must be an unsigned integer");
  }

  size_t requested = static_cast<size_t>(resources.gpus().getOrElse(0.0));

  // Update the GPU allocation to reflect the new total.
  if (requested > info->allocated.size()) {
    size_t additional = requested - info->allocated.size();

    return allocator.allocate(additional)
      .then(defer(PID<NvidiaGpuIsolatorProcess>(this),
                  &NvidiaGpuIsolatorProcess::_update,
                  containerId,
                  lambda::_1));
  } else if (requested < info->allocated.size()) {
    size_t fewer = info->allocated.size() - requested;

    set<Gpu> deallocated;

    for (size_t i = 0; i < fewer; i++) {
      const auto gpu = info->allocated.begin();

      cgroups::devices::Entry entry;
      entry.selector.type = Entry::Selector::Type::CHARACTER;
      entry.selector.major = gpu->major;
      entry.selector.minor = gpu->minor;
      entry.access.read = true;
      entry.access.write = true;
      entry.access.mknod = true;

      Try<Nothing> deny = cgroups::devices::deny(
          hierarchy, info->cgroup, entry);

      if (deny.isError()) {
        return Failure("Failed to deny cgroups access to GPU device"
                       " '" + stringify(entry) + "': " + deny.error());
      }

      deallocated.insert(*gpu);
      info->allocated.erase(gpu);
    }

    return allocator.deallocate(deallocated);
  }

  return Nothing();
}
Beispiel #18
0
TYPED_TEST(CpuIsolatorTest, SystemCpuUsage)
{
    Flags flags;

    Try<Isolator*> isolator = TypeParam::create(flags);
    CHECK_SOME(isolator);

    // A PosixLauncher is sufficient even when testing a cgroups isolator.
    Try<Launcher*> launcher = PosixLauncher::create(flags);

    ExecutorInfo executorInfo;
    executorInfo.mutable_resources()->CopyFrom(
        Resources::parse("cpus:1.0").get());

    ContainerID containerId;
    containerId.set_value("system_cpu_usage");

    AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));

    Try<string> dir = os::mkdtemp();
    ASSERT_SOME(dir);
    const string& file = path::join(dir.get(), "mesos_isolator_test_ready");

    // Generating random numbers is done by the kernel and will max out a single
    // core and run almost exclusively in the kernel, i.e., system time.
    string command = "cat /dev/urandom > /dev/null & "
                     "touch " + file + "; " // Signals the command is running.
                     "sleep 60";

    int pipes[2];
    ASSERT_NE(-1, ::pipe(pipes));

    lambda::function<int()> inChild = lambda::bind(&execute, command, pipes);

    Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
    ASSERT_SOME(pid);

    // Reap the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    // Continue in the parent.
    ::close(pipes[0]);

    // Isolate the forked child.
    AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

    // Now signal the child to continue.
    int buf;
    ASSERT_LT(0, ::write(pipes[1],  &buf, sizeof(buf)));
    ::close(pipes[1]);

    // Wait for the command to start.
    while (!os::exists(file));

    // Wait up to 1 second for the child process to induce 1/8 of a second of
    // system cpu time.
    ResourceStatistics statistics;
    Duration waited = Duration::zero();
    do {
        Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
        AWAIT_READY(usage);

        statistics = usage.get();

        // If we meet our usage expectations, we're done!
        if (statistics.cpus_system_time_secs() >= 0.125) {
            break;
        }

        os::sleep(Milliseconds(200));
        waited += Milliseconds(200);
    } while (waited < Seconds(1));

    EXPECT_LE(0.125, statistics.cpus_system_time_secs());

    // Shouldn't be any appreciable user time.
    EXPECT_GT(0.025, statistics.cpus_user_time_secs());

    // Ensure all processes are killed.
    AWAIT_READY(launcher.get()->destroy(containerId));

    // Make sure the child was reaped.
    AWAIT_READY(status);

    // Let the isolator clean up.
    AWAIT_READY(isolator.get()->cleanup(containerId));

    delete isolator.get();
    delete launcher.get();

    CHECK_SOME(os::rmdir(dir.get()));
}
Beispiel #19
0
string LinuxLauncher::cgroup(const ContainerID& containerId)
{
  return path::join(flags.cgroups_root, containerId.value());
}
Beispiel #20
0
TYPED_TEST(MemIsolatorTest, MemUsage)
{
    Flags flags;

    Try<Isolator*> isolator = TypeParam::create(flags);
    CHECK_SOME(isolator);

    // A PosixLauncher is sufficient even when testing a cgroups isolator.
    Try<Launcher*> launcher = PosixLauncher::create(flags);

    ExecutorInfo executorInfo;
    executorInfo.mutable_resources()->CopyFrom(
        Resources::parse("mem:1024").get());

    ContainerID containerId;
    containerId.set_value("memory_usage");

    AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));

    int pipes[2];
    ASSERT_NE(-1, ::pipe(pipes));

    lambda::function<int()> inChild = lambda::bind(
                                          &consumeMemory,
                                          Megabytes(256),
                                          Seconds(10),
                                          pipes);

    Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
    ASSERT_SOME(pid);

    // Set up the reaper to wait on the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    // Continue in the parent.
    ::close(pipes[0]);

    // Isolate the forked child.
    AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));

    // Now signal the child to continue.
    int buf;
    ASSERT_LT(0, ::write(pipes[1], &buf, sizeof(buf)));
    ::close(pipes[1]);

    // Wait up to 5 seconds for the child process to consume 256 MB of memory;
    ResourceStatistics statistics;
    Bytes threshold = Megabytes(256);
    Duration waited = Duration::zero();
    do {
        Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
        AWAIT_READY(usage);

        statistics = usage.get();

        // If we meet our usage expectations, we're done!
        if (statistics.mem_rss_bytes() >= threshold.bytes()) {
            break;
        }

        os::sleep(Seconds(1));
        waited += Seconds(1);
    } while (waited < Seconds(5));

    EXPECT_LE(threshold.bytes(), statistics.mem_rss_bytes());

    // Ensure all processes are killed.
    AWAIT_READY(launcher.get()->destroy(containerId));

    // Make sure the child was reaped.
    AWAIT_READY(status);

    // Let the isolator clean up.
    AWAIT_READY(isolator.get()->cleanup(containerId));

    delete isolator.get();
    delete launcher.get();
}
// This test verifies that a provisioner can recover the rootfs
// provisioned by a previous provisioner and then destroy it. Note
// that we use the copy backend in this test so Linux is not required.
TEST_F(ProvisionerAppcTest, Recover)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "copy";
  flags.work_dir = "work_dir";

  Fetcher fetcher;
  Try<Owned<Provisioner>> provisioner1 = Provisioner::create(flags, &fetcher);
  ASSERT_SOME(provisioner1);

  // Create a simple image in the store:
  // <store>
  // |--images
  //    |--<id>
  //       |--manifest
  //       |--rootfs/tmp/test
  JSON::Value manifest = JSON::parse(
      "{"
      "  \"acKind\": \"ImageManifest\","
      "  \"acVersion\": \"0.6.1\","
      "  \"name\": \"foo.com/bar\""
      "}").get();

  // The 'imageId' below has the correct format but it's not computed
  // by hashing the tarball of the image. It's OK here as we assume
  // the images under 'images' have passed such check when they are
  // downloaded and validated.
  string imageId =
    "sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e"
    "797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0";

  string imagePath = path::join(flags.appc_store_dir, "images", imageId);

  ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp")));
  ASSERT_SOME(
      os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test"));
  ASSERT_SOME(
      os::write(path::join(imagePath, "manifest"), stringify(manifest)));

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner1.get()->recover({}, {}));

  Image image;
  image.mutable_appc()->set_name("foo.com/bar");

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  Future<string> rootfs = provisioner1.get()->provision(containerId, image);
  AWAIT_READY(rootfs);

  // Create a new provisioner to recover the state from the container.
  Try<Owned<Provisioner>> provisioner2 = Provisioner::create(flags, &fetcher);
  ASSERT_SOME(provisioner2);

  mesos::slave::ContainerState state;

  // Here we are using an ExecutorInfo in the ContainerState without a
  // ContainerInfo. This is the situation where the Image is specified
  // via --default_container_info so it's not part of the recovered
  // ExecutorInfo.
  state.mutable_container_id()->CopyFrom(containerId);

  AWAIT_READY(provisioner2.get()->recover({state}, {}));

  // It's possible for the user to provision two different rootfses
  // from the same image.
  AWAIT_READY(provisioner2.get()->provision(containerId, image));

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size());

  Future<bool> destroy = provisioner2.get()->destroy(containerId);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
// This test verifies that persistent volumes are properly mounted if
// the container does not specify a root filesystem.
TEST_F(LinuxFilesystemIsolatorTest, ROOT_PersistentVolumeWithoutRootFilesystem)
{
  string registry = path::join(sandbox.get(), "registry");
  AWAIT_READY(DockerArchive::create(registry, "test_image"));

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "filesystem/linux,docker/runtime";
  flags.docker_registry = registry;
  flags.docker_store_dir = path::join(sandbox.get(), "store");
  flags.image_providers = "docker";

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, true, &fetcher);

  ASSERT_SOME(create);

  Owned<Containerizer> containerizer(create.get());

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  ExecutorInfo executor = createExecutorInfo(
      "test_executor",
      "echo abc > volume/file");

  executor.add_resources()->CopyFrom(createPersistentVolume(
      Megabytes(32),
      "test_role",
      "persistent_volume_id",
      "volume"));

  // Create a persistent volume.
  string volume = slave::paths::getPersistentVolumePath(
      flags.work_dir,
      "test_role",
      "persistent_volume_id");

  ASSERT_SOME(os::mkdir(volume));

  string directory = path::join(flags.work_dir, "sandbox");
  ASSERT_SOME(os::mkdir(directory));

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      createContainerConfig(None(), executor, directory),
      map<string, string>(),
      None());

  AWAIT_ASSERT_EQ(Containerizer::LaunchResult::SUCCESS, launch);

  Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait->get().has_status());
  EXPECT_WEXITSTATUS_EQ(0, wait->get().status());

  EXPECT_SOME_EQ("abc\n", os::read(path::join(volume, "file")));
}
TEST(AgentValidationTest, ContainerID)
{
  ContainerID containerId;
  Option<Error> error;

  // No empty IDs.
  containerId.set_value("");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // No slashes.
  containerId.set_value("/");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  containerId.set_value("\\");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // No spaces.
  containerId.set_value("redis backup");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // No periods.
  containerId.set_value("redis.backup");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Cannot be '.'.
  containerId.set_value(".");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Cannot be '..'.
  containerId.set_value("..");
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Valid.
  containerId.set_value("redis");
  error = validation::container::validateContainerId(containerId);
  EXPECT_NONE(error);

  // Valid with invalid parent (empty `ContainerID.value`).
  containerId.set_value("backup");
  containerId.mutable_parent();
  error = validation::container::validateContainerId(containerId);
  EXPECT_SOME(error);

  // Valid with valid parent.
  containerId.set_value("backup");
  containerId.mutable_parent()->set_value("redis");
  error = validation::container::validateContainerId(containerId);
  EXPECT_NONE(error);
}
// This test tries to catch the regression for MESOS-7366. It verifies
// that the persistent volume mount points in the sandbox will be
// cleaned up even if there is still reference to the volume.
TEST_F(LinuxFilesystemIsolatorTest, ROOT_PersistentVolumeMountPointCleanup)
{
  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "filesystem/linux";

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, true, &fetcher);

  ASSERT_SOME(create);

  Owned<Containerizer> containerizer(create.get());

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  ExecutorInfo executor = createExecutorInfo(
      "test_executor",
      "sleep 1000");

  // Create a persistent volume.
  executor.add_resources()->CopyFrom(createPersistentVolume(
      Megabytes(32),
      "test_role",
      "persistent_volume_id",
      "volume"));

  string volume = slave::paths::getPersistentVolumePath(
      flags.work_dir,
      "test_role",
      "persistent_volume_id");

  ASSERT_SOME(os::mkdir(volume));

  string directory = path::join(flags.work_dir, "sandbox");
  ASSERT_SOME(os::mkdir(directory));

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      createContainerConfig(None(), executor, directory),
      map<string, string>(),
      None());

  AWAIT_ASSERT_EQ(Containerizer::LaunchResult::SUCCESS, launch);

  ASSERT_SOME(os::touch(path::join(directory, "volume", "abc")));

  // This keeps a reference to the persistent volume mount.
  Try<int_fd> fd = os::open(
      path::join(directory, "volume", "abc"),
      O_WRONLY | O_TRUNC | O_CLOEXEC,
      S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);

  ASSERT_SOME(fd);

  containerizer->destroy(containerId);

  Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait->get().has_status());
  EXPECT_WTERMSIG_EQ(SIGKILL, wait.get()->status());

  // Verifies that mount point has been removed.
  EXPECT_FALSE(os::exists(path::join(directory, "volume", "abc")));

  os::close(fd.get());
}
Beispiel #25
0
inline bool operator==(const ContainerID& left, const std::string& right)
{
  return left.value() == right;
}
// This test verifies that the provisioner can provision an rootfs
// from an image that is already put into the store directory.
TEST_F(ProvisionerAppcTest, ROOT_Provision)
{
  // Create provisioner.
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = "bind";
  flags.work_dir = "work_dir";

  Try<Owned<Provisioner>> provisioner = Provisioner::create(flags);
  ASSERT_SOME(provisioner);

  Try<string> createImage = createTestImage(
      flags.appc_store_dir,
      getManifest());

  ASSERT_SOME(createImage);

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner.get()->recover({}, {}));

  // Simulate a task that requires an image.
  Image image;
  image.mutable_appc()->CopyFrom(getTestImage());

  ContainerID containerId;
  containerId.set_value("12345");

  Future<slave::ProvisionInfo> provisionInfo =
    provisioner.get()->provision(containerId, image);
  AWAIT_READY(provisionInfo);

  string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        containerId);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        containerId);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
  EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
            Path(provisionInfo.get().rootfs).basename());

  Future<bool> destroy = provisioner.get()->destroy(containerId);
  AWAIT_READY(destroy);

  // One rootfs is destroyed.
  EXPECT_TRUE(destroy.get());

  // The container directory is successfully cleaned up.
  EXPECT_FALSE(os::exists(containerDir));
}
Beispiel #27
0
inline bool operator != (const ContainerID& left, const ContainerID& right)
{
  return left.value() != right.value();
}
// This test verifies that the provisioner can provision an rootfs
// from an image for a child container.
TEST_F(ProvisionerAppcTest, ROOT_ProvisionNestedContainer)
{
  slave::Flags flags;
  flags.image_providers = "APPC";
  flags.appc_store_dir = path::join(os::getcwd(), "store");
  flags.image_provisioner_backend = BIND_BACKEND;
  flags.work_dir = path::join(sandbox.get(), "work_dir");

  Try<Owned<Provisioner>> provisioner = Provisioner::create(flags);
  ASSERT_SOME(provisioner);

  Try<string> createImage = createTestImage(
      flags.appc_store_dir,
      getManifest());

  ASSERT_SOME(createImage);

  // Recover. This is when the image in the store is loaded.
  AWAIT_READY(provisioner.get()->recover({}));

  Image image;
  image.mutable_appc()->CopyFrom(getTestImage());

  ContainerID parent;
  ContainerID child;

  parent.set_value(UUID::random().toString());
  child.set_value(UUID::random().toString());
  child.mutable_parent()->CopyFrom(parent);

  Future<slave::ProvisionInfo> provisionInfo =
    provisioner.get()->provision(child, image);

  AWAIT_READY(provisionInfo);

  const string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);

  const string containerDir =
    slave::provisioner::paths::getContainerDir(
        provisionerDir,
        child);

  Try<hashmap<string, hashset<string>>> rootfses =
    slave::provisioner::paths::listContainerRootfses(
        provisionerDir,
        child);

  ASSERT_SOME(rootfses);

  // Verify that the rootfs is successfully provisioned.
  ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
  ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
  EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
            Path(provisionInfo.get().rootfs).basename());

  // TODO(jieyu): Verify that 'containerDir' is nested under its
  // parent container's 'containerDir'.

  Future<bool> destroy = provisioner.get()->destroy(child);
  AWAIT_READY(destroy);
  EXPECT_TRUE(destroy.get());
  EXPECT_FALSE(os::exists(containerDir));
}