Ejemplo n.º 1
0
TEST_F(DockerTest, ROOT_DOCKER_CheckPortResource)
{
  const string containerName = NAME_PREFIX + "-port-resource-test";
  Owned<Docker> docker(Docker::create(tests::flags.docker,
                                     tests::flags.docker_socket,
                                     false).get());

  // Make sure the container is removed.
  Future<Nothing> remove = docker->rm(containerName, true);

  ASSERT_TRUE(process::internal::await(remove, Seconds(10)));

  ContainerInfo containerInfo;
  containerInfo.set_type(ContainerInfo::DOCKER);

  ContainerInfo::DockerInfo dockerInfo;
  dockerInfo.set_image("busybox");
  dockerInfo.set_network(ContainerInfo::DockerInfo::BRIDGE);

  ContainerInfo::DockerInfo::PortMapping portMapping;
  portMapping.set_host_port(10000);
  portMapping.set_container_port(80);

  dockerInfo.add_port_mappings()->CopyFrom(portMapping);
  containerInfo.mutable_docker()->CopyFrom(dockerInfo);

  CommandInfo commandInfo;
  commandInfo.set_shell(false);
  commandInfo.set_value("true");

  Resources resources =
    Resources::parse("ports:[9998-9999];ports:[10001-11000]").get();

  Future<Nothing> run = docker->run(
      containerInfo,
      commandInfo,
      containerName,
      "dir",
      "/mnt/mesos/sandbox",
      resources);

  // Port should be out side of the provided ranges.
  AWAIT_EXPECT_FAILED(run);

  resources = Resources::parse("ports:[9998-9999];ports:[10000-11000]").get();

  Try<string> directory = environment->mkdtemp();
  CHECK_SOME(directory) << "Failed to create temporary directory";

  run = docker->run(
      containerInfo,
      commandInfo,
      containerName,
      directory.get(),
      "/mnt/mesos/sandbox",
      resources);

  AWAIT_READY(run);
}
Ejemplo n.º 2
0
	TaskInfo buildTask (string hostname, string id, const SlaveID& slave)  {
		hostProfile profile = hostList[hostname];
	// Define the Docker container.
		/*  Since there is no "executor" to manage the tasks, the
			container will be built and attached directly into the task below */
		ContainerInfo container;
		container.set_type(container.DOCKER);
		ContainerInfo::DockerInfo docker;
		docker.set_image(DOCKER_IMAGE);
		container.mutable_docker()->MergeFrom(docker);

		// Mount local volume inside Container
		Volume * volume = container.add_volumes();
		volume->set_container_path("/mnt");
		volume->set_host_path("/local/mesos");
		volume->set_mode(Volume_Mode_RW);

		// Define the task
		TaskInfo task;
		task.set_name("K3-" + k3binary);
		task.mutable_task_id()->set_value(id);
		task.mutable_slave_id()->MergeFrom(slave);
		task.mutable_container()->MergeFrom(container);
		//task.set_data(stringify(localTasks));

		// Define include files for the command
		CommandInfo command;

		CommandInfo_URI * k3_bin = command.add_uris();
		k3_bin->set_value(fileServer + "/" + k3binary);
		k3_bin->set_executable(true);
		k3_bin->set_extract(false);

//		CommandInfo_URI * k3_args = command.add_uris();
//		k3_args->set_value(runpath + "/k3input.yaml");
		
//		command.set_value("$MESOS_SANDBOX/" + k3binary + " -l INFO -p " +
//				"$MESOS_SANDBOX/k3input.yaml");
		task.mutable_command()->MergeFrom(command);

		// Option A for doing resources management (see scheduler for option B)
		Resource* resource;

		resource = task.add_resources();
		resource->set_name("cpus");
		resource->set_type(Value::SCALAR);
		resource->mutable_scalar()->set_value(profile.cpu);

		resource = task.add_resources();
		resource->set_name("mem");
		resource->set_type(Value::SCALAR);
		resource->mutable_scalar()->set_value(profile.mem);
		
		return task;
	}
Ejemplo n.º 3
0
  virtual void resourceOffers(
      SchedulerDriver* driver,
      const vector<Offer>& offers)
  {
    static const Try<Resources> TASK_RESOURCES = Resources::parse(resources);

    if (TASK_RESOURCES.isError()) {
      cerr << "Failed to parse resources '" << resources
           << "': " << TASK_RESOURCES.error() << endl;
      driver->abort();
      return;
    }

    foreach (const Offer& offer, offers) {
      if (!launched &&
          Resources(offer.resources()).contains(TASK_RESOURCES.get())) {
        TaskInfo task;
        task.set_name(name);
        task.mutable_task_id()->set_value(name);
        task.mutable_slave_id()->MergeFrom(offer.slave_id());
        task.mutable_resources()->CopyFrom(TASK_RESOURCES.get());
        task.mutable_command()->set_value(command);
        if (uri.isSome()) {
          task.mutable_command()->add_uris()->set_value(uri.get());
        }

        if (dockerImage.isSome()) {
          ContainerInfo containerInfo;
          containerInfo.set_type(ContainerInfo::DOCKER);

          ContainerInfo::DockerInfo dockerInfo;
          dockerInfo.set_image(dockerImage.get());

          containerInfo.mutable_docker()->CopyFrom(dockerInfo);
          task.mutable_container()->CopyFrom(containerInfo);
        }

        vector<TaskInfo> tasks;
        tasks.push_back(task);

        driver->launchTasks(offer.id(), tasks);
        cout << "task " << name << " submitted to slave "
             << offer.slave_id() << endl;

        launched = true;
      } else {
        driver->declineOffer(offer.id());
      }
    }
  }
Ejemplo n.º 4
0
TEST(G_ContainerInfoTests, IsEmpty)
{
	ASSERT_TRUE(DatabasePrepare());
	
	ContainerInfo info = cont->GetInfo();
	cont->Clear();
	EXPECT_TRUE(info->IsEmpty());

	FolderGuard root = cont->GetRoot();
	ElementGuard ce = root->CreateChild("folder", ElementTypeFolder);
	EXPECT_FALSE(info->IsEmpty());

	ce->Remove();
	EXPECT_TRUE(info->IsEmpty());
}
// This test verifies that launching a task with a non-existent Seccomp profile
// leads to failure.
TEST_F(
    LinuxSeccompIsolatorTest,
    ROOT_SECCOMP_LaunchWithOverriddenNonExistentProfile)
{
  slave::Flags flags = CreateSlaveFlags();
  flags.seccomp_profile_name = createProfile(TEST_SECCOMP_PROFILE);

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, false, &fetcher);

  ASSERT_SOME(create);

  Owned<MesosContainerizer> containerizer(create.get());

  SlaveState state;
  state.id = SlaveID();

  AWAIT_READY(containerizer->recover(state));

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  Try<string> directory = environment->mkdtemp();
  ASSERT_SOME(directory);

  auto containerConfig =  createContainerConfig(
      None(),
      createExecutorInfo("executor", "exit 0", "cpus:1"),
      directory.get());

  ContainerInfo* container = containerConfig.mutable_container_info();
  container->set_type(ContainerInfo::MESOS);

  // Set a non-existent Seccomp profile for this particular task.
  SeccompInfo* seccomp = container->mutable_linux_info()->mutable_seccomp();
  seccomp->set_profile_name("absent");

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      containerConfig,
      map<string, string>(),
      None());

  AWAIT_FAILED(launch);
}
Ejemplo n.º 6
0
// This test verifies mounting in an absolute path when running a
// docker container works.
TEST_F(DockerTest, ROOT_DOCKER_MountAbsolute)
{
  Owned<Docker> docker = Docker::create(
      tests::flags.docker,
      tests::flags.docker_socket,
      false).get();

  ContainerInfo containerInfo;
  containerInfo.set_type(ContainerInfo::DOCKER);

  Try<string> directory = environment->mkdtemp();
  CHECK_SOME(directory) << "Failed to create temporary directory";

  const string testFile = path::join(directory.get(), "test_file");
  EXPECT_SOME(os::write(testFile, "data"));

  Volume* volume = containerInfo.add_volumes();
  volume->set_host_path(testFile);
  volume->set_container_path("/tmp/test_file");
  volume->set_mode(Volume::RO);

  ContainerInfo::DockerInfo dockerInfo;
  dockerInfo.set_image("busybox");

  containerInfo.mutable_docker()->CopyFrom(dockerInfo);

  CommandInfo commandInfo;
  commandInfo.set_shell(true);
  commandInfo.set_value("ls /tmp/test_file");

  Future<Nothing> run = docker->run(
      containerInfo,
      commandInfo,
      NAME_PREFIX + "-mount-absolute-test",
      directory.get(),
      directory.get());

  AWAIT_READY(run);
}
Ejemplo n.º 7
0
TEST(G_ContainerInfoTests, TotalDataSize)
{
	ASSERT_TRUE(DatabasePrepare());
	cont->Clear();
	ContainerInfo info = cont->GetInfo();
	
	FolderGuard root = cont->GetRoot();

	std::vector<std::string> data;
	data.push_back("0123456789");
	data.push_back("01234567890123456789");
	data.push_back("01234567890123456789");
	data.push_back("012345678901234");
	data.push_back("01234");
	unsigned totalSize(0);
	const std::string fileBaseName("file");
	for (size_t i = 0; i < data.size(); ++i)
	{
		FileGuard cf = root->CreateFile(fileBaseName + std::string(1, i + 97));
		EXPECT_EQ(0, cf->Size());
		std::stringstream strm;
		strm << data[i];
		cf->Write(strm, data[i].size());
		
		totalSize += data[i].size();
		EXPECT_EQ(totalSize, info->UsedSpace());
	}
	for (size_t i = 0; i < data.size(); ++i)
	{
		ElementGuard ce = root->GetChild(fileBaseName + std::string(1, i + 97));
		EXPECT_EQ(data[i].size(), ce->AsFile()->Size());
		ASSERT_NO_THROW(ce->Remove());
		totalSize -= data[i].size();
		EXPECT_EQ(totalSize, info->UsedSpace());
	}
	EXPECT_EQ(0, info->UsedSpace());
}
Ejemplo n.º 8
0
  virtual void resourceOffers(
      SchedulerDriver* driver,
      const vector<Offer>& offers)
  {
    static const Try<Resources> TASK_RESOURCES = Resources::parse(resources);

    if (TASK_RESOURCES.isError()) {
      cerr << "Failed to parse resources '" << resources
           << "': " << TASK_RESOURCES.error() << endl;
      driver->abort();
      return;
    }

    foreach (const Offer& offer, offers) {
      if (!launched &&
          Resources(offer.resources()).contains(TASK_RESOURCES.get())) {
        TaskInfo task;
        task.set_name(name);
        task.mutable_task_id()->set_value(name);
        task.mutable_slave_id()->MergeFrom(offer.slave_id());
        task.mutable_resources()->CopyFrom(TASK_RESOURCES.get());

        CommandInfo* commandInfo = task.mutable_command();
        commandInfo->set_value(command);
        if (environment.isSome()) {
          Environment* environment_ = commandInfo->mutable_environment();
          foreachpair (const std::string& name,
                       const std::string& value,
                       environment.get()) {
            Environment_Variable* environmentVariable =
              environment_->add_variables();
            environmentVariable->set_name(name);
            environmentVariable->set_value(value);
          }
        }

        if (uri.isSome()) {
          task.mutable_command()->add_uris()->set_value(uri.get());
        }

        if (dockerImage.isSome()) {
          ContainerInfo containerInfo;

          if (containerizer == "mesos") {
            containerInfo.set_type(ContainerInfo::MESOS);

            ContainerInfo::MesosInfo mesosInfo;

            Image mesosImage;
            mesosImage.set_type(Image::DOCKER);
            mesosImage.mutable_docker()->set_name(dockerImage.get());
            mesosInfo.mutable_image()->CopyFrom(mesosImage);

            containerInfo.mutable_mesos()->CopyFrom(mesosInfo);
          } else if (containerizer == "docker") {
            containerInfo.set_type(ContainerInfo::DOCKER);

            ContainerInfo::DockerInfo dockerInfo;
            dockerInfo.set_image(dockerImage.get());

            containerInfo.mutable_docker()->CopyFrom(dockerInfo);
          } else {
            cerr << "Unsupported containerizer: " << containerizer << endl;;

            driver->abort();

            return;
          }

          task.mutable_container()->CopyFrom(containerInfo);
        }

        vector<TaskInfo> tasks;
        tasks.push_back(task);

        driver->launchTasks(offer.id(), tasks);
        cout << "task " << name << " submitted to slave "
             << offer.slave_id() << endl;

        launched = true;
      } else {
// This test verifies that sandbox path volume allows two containers
// nested under the same parent container to share data.
// TODO(jieyu): Parameterize this test to test both linux and posix
// launcher and filesystem isolator.
TEST_F(VolumeSandboxPathIsolatorTest, SharedVolume)
{
  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "volume/sandbox_path";

  Fetcher fetcher;

  Try<MesosContainerizer*> create = MesosContainerizer::create(
      flags,
      true,
      &fetcher);

  ASSERT_SOME(create);

  Owned<MesosContainerizer> containerizer(create.get());

  SlaveState state;
  state.id = SlaveID();

  AWAIT_READY(containerizer->recover(state));

  ContainerID containerId;
  containerId.set_value(UUID::random().toString());

  ExecutorInfo executor = createExecutorInfo("executor", "sleep 99", "cpus:1");

  Try<string> directory = environment->mkdtemp();
  ASSERT_SOME(directory);

  Future<bool> launch = containerizer->launch(
      containerId,
      None(),
      executor,
      directory.get(),
      None(),
      state.id,
      map<string, string>(),
      true); // TODO(benh): Ever want to check not-checkpointing?

  AWAIT_ASSERT_TRUE(launch);

  ContainerID nestedContainerId1;
  nestedContainerId1.mutable_parent()->CopyFrom(containerId);
  nestedContainerId1.set_value(UUID::random().toString());

  ContainerInfo containerInfo;
  containerInfo.set_type(ContainerInfo::MESOS);

  Volume* volume = containerInfo.add_volumes();
  volume->set_mode(Volume::RW);
  volume->set_container_path("parent");

  Volume::Source* source = volume->mutable_source();
  source->set_type(Volume::Source::SANDBOX_PATH);

  Volume::Source::SandboxPath* sandboxPath = source->mutable_sandbox_path();
  sandboxPath->set_type(Volume::Source::SandboxPath::PARENT);
  sandboxPath->set_path("shared");

  launch = containerizer->launch(
      nestedContainerId1,
      createCommandInfo("touch parent/file; sleep 1000"),
      containerInfo,
      None(),
      state.id);

  AWAIT_ASSERT_TRUE(launch);

  ContainerID nestedContainerId2;
  nestedContainerId2.mutable_parent()->CopyFrom(containerId);
  nestedContainerId2.set_value(UUID::random().toString());

  launch = containerizer->launch(
      nestedContainerId2,
      createCommandInfo(
        "while true; do if [ -f parent/file ]; then exit 0; fi; done"),
      containerInfo,
      None(),
      state.id);

  AWAIT_ASSERT_TRUE(launch);

  Future<Option<ContainerTermination>> wait =
    containerizer->wait(nestedContainerId2);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait.get()->has_status());
  EXPECT_WEXITSTATUS_EQ(0, wait.get()->status());

  wait = containerizer->wait(containerId);

  containerizer->destroy(containerId);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait.get()->has_status());
  EXPECT_WTERMSIG_EQ(SIGKILL, wait.get()->status());
}
Ejemplo n.º 10
0
// This test launches a container which has an image and joins host
// network, and then verifies that the container can access Internet.
TEST_F(CniIsolatorTest, ROOT_INTERNET_CURL_LaunchContainerInHostNetwork)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "docker/runtime,filesystem/linux";
  flags.image_providers = "docker";
  flags.docker_store_dir = path::join(sandbox.get(), "store");

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  MockScheduler sched;
  MesosSchedulerDriver driver(
      &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(&driver, _, _));

  Future<vector<Offer>> offers;
  EXPECT_CALL(sched, resourceOffers(&driver, _))
    .WillOnce(FutureArg<1>(&offers))
    .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(offers);
  ASSERT_EQ(1u, offers->size());

  const Offer& offer = offers.get()[0];

  // NOTE: We use a non-shell command here because 'sh' might not be
  // in the PATH. 'alpine' does not specify env PATH in the image.
  CommandInfo command;
  command.set_shell(false);
  command.set_value("/bin/ping");
  command.add_arguments("/bin/ping");
  command.add_arguments("-c1");
  command.add_arguments("google.com");

  TaskInfo task = createTask(
      offer.slave_id(),
      Resources::parse("cpus:1;mem:128").get(),
      command);

  Image image;
  image.set_type(Image::DOCKER);
  image.mutable_docker()->set_name("alpine");

  ContainerInfo* container = task.mutable_container();
  container->set_type(ContainerInfo::MESOS);
  container->mutable_mesos()->mutable_image()->CopyFrom(image);

  Future<TaskStatus> statusRunning;
  Future<TaskStatus> statusFinished;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
    .WillOnce(FutureArg<1>(&statusRunning))
    .WillOnce(FutureArg<1>(&statusFinished));

  driver.launchTasks(offer.id(), {task});

  AWAIT_READY_FOR(statusRunning, Seconds(60));
  EXPECT_EQ(task.task_id(), statusRunning->task_id());
  EXPECT_EQ(TASK_RUNNING, statusRunning->state());

  AWAIT_READY(statusFinished);
  EXPECT_EQ(task.task_id(), statusFinished->task_id());
  EXPECT_EQ(TASK_FINISHED, statusFinished->state());

  driver.stop();
  driver.join();
}
Ejemplo n.º 11
0
// This test launches a command task which has checkpoint enabled, and
// agent is terminated when the task is running, after agent is restarted,
// kill the task and then verify we can receive TASK_KILLED for the task.
TEST_F(CniIsolatorTest, ROOT_SlaveRecovery)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "network/cni";

  flags.network_cni_plugins_dir = cniPluginDir;
  flags.network_cni_config_dir = cniConfigDir;

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  MockScheduler sched;

  // Enable checkpointing for the framework.
  FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
  frameworkInfo.set_checkpoint(true);

  MesosSchedulerDriver driver(
      &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(_, _, _));

  Future<vector<Offer>> offers;
  EXPECT_CALL(sched, resourceOffers(&driver, _))
    .WillOnce(FutureArg<1>(&offers))
    .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(offers);
  ASSERT_EQ(1u, offers->size());

  const Offer& offer = offers.get()[0];

  CommandInfo command;
  command.set_value("sleep 1000");

  TaskInfo task = createTask(
      offer.slave_id(),
      Resources::parse("cpus:1;mem:128").get(),
      command);

  ContainerInfo* container = task.mutable_container();
  container->set_type(ContainerInfo::MESOS);

  // Make sure the container join the mock CNI network.
  container->add_network_infos()->set_name("__MESOS_TEST__");

  Future<TaskStatus> statusRunning;
  Future<TaskStatus> statusKilled;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
    .WillOnce(FutureArg<1>(&statusRunning))
    .WillOnce(FutureArg<1>(&statusKilled));

  EXPECT_CALL(sched, offerRescinded(&driver, _))
    .Times(AtMost(1));

  Future<Nothing> ack =
    FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);

  driver.launchTasks(offer.id(), {task});

  AWAIT_READY(statusRunning);
  EXPECT_EQ(task.task_id(), statusRunning->task_id());
  EXPECT_EQ(TASK_RUNNING, statusRunning->state());

  // Wait for the ACK to be checkpointed.
  AWAIT_READY(ack);

  // Stop the slave after TASK_RUNNING is received.
  slave.get()->terminate();

  // Restart the slave.
  slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  // Kill the task.
  driver.killTask(task.task_id());

  AWAIT_READY(statusKilled);
  EXPECT_EQ(task.task_id(), statusKilled->task_id());
  EXPECT_EQ(TASK_KILLED, statusKilled->state());

  driver.stop();
  driver.join();
}
Ejemplo n.º 12
0
// This test verifies that docker image default cmd is executed correctly.
// This corresponds to the case in runtime isolator logic table: sh=0,
// value=0, argv=1, entrypoint=0, cmd=1.
TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultCmdLocalPuller)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  const string directory = path::join(os::getcwd(), "archives");

  Future<Nothing> testImage =
    DockerArchive::create(directory, "alpine", "null", "[\"sh\"]");

  AWAIT_READY(testImage);

  ASSERT_TRUE(os::exists(path::join(directory, "alpine.tar")));

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "docker/runtime,filesystem/linux";
  flags.image_providers = "docker";
  flags.docker_registry = directory;

  // Make docker store directory as a temparary directory. Because the
  // manifest of the test image is changeable, the image cached on
  // previous tests should never be used.
  flags.docker_store_dir = path::join(os::getcwd(), "store");

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  MockScheduler sched;
  MesosSchedulerDriver driver(
      &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(&driver, _, _));

  Future<vector<Offer>> offers;
  EXPECT_CALL(sched, resourceOffers(&driver, _))
    .WillOnce(FutureArg<1>(&offers))
    .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(offers);
  ASSERT_EQ(1u, offers->size());

  const Offer& offer = offers.get()[0];

  TaskInfo task;
  task.set_name("test-task");
  task.mutable_task_id()->set_value(UUID::random().toString());
  task.mutable_slave_id()->CopyFrom(offer.slave_id());
  task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get());
  task.mutable_command()->set_shell(false);
  task.mutable_command()->add_arguments("-c");
  task.mutable_command()->add_arguments("echo 'hello world'");

  Image image;
  image.set_type(Image::DOCKER);
  image.mutable_docker()->set_name("alpine");

  ContainerInfo* container = task.mutable_container();
  container->set_type(ContainerInfo::MESOS);
  container->mutable_mesos()->mutable_image()->CopyFrom(image);

  Future<TaskStatus> statusRunning;
  Future<TaskStatus> statusFinished;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
    .WillOnce(FutureArg<1>(&statusRunning))
    .WillOnce(FutureArg<1>(&statusFinished));

  driver.launchTasks(offer.id(), {task});

  AWAIT_READY_FOR(statusRunning, Seconds(60));
  EXPECT_EQ(task.task_id(), statusRunning->task_id());
  EXPECT_EQ(TASK_RUNNING, statusRunning->state());

  AWAIT_READY(statusFinished);
  EXPECT_EQ(task.task_id(), statusFinished->task_id());
  EXPECT_EQ(TASK_FINISHED, statusFinished->state());

  driver.stop();
  driver.join();
}
// This test checks the behavior of passed invalid limits.
TEST_F(PosixRLimitsIsolatorTest, InvalidLimits)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "posix/rlimits";

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  MockScheduler sched;

  MesosSchedulerDriver driver(
      &sched,
      DEFAULT_FRAMEWORK_INFO,
      master.get()->pid, DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(_, _, _));

  Future<vector<Offer>> offers;

  EXPECT_CALL(sched, resourceOffers(_, _))
      .WillOnce(FutureArg<1>(&offers))
      .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(offers);
  ASSERT_NE(0u, offers->size());

  TaskInfo task = createTask(
      offers.get()[0].slave_id(),
      offers.get()[0].resources(),
      "true");

  ContainerInfo* container = task.mutable_container();
  container->set_type(ContainerInfo::MESOS);

  // Set impossible limit soft > hard.
  RLimitInfo rlimitInfo;
  RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits();
  rlimit->set_type(RLimitInfo::RLimit::RLMT_CPU);
  rlimit->set_soft(100);
  rlimit->set_hard(1);

  container->mutable_rlimit_info()->CopyFrom(rlimitInfo);

  Future<TaskStatus> taskStatus;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
      .WillOnce(FutureArg<1>(&taskStatus));

  driver.launchTasks(offers.get()[0].id(), {task});

  AWAIT_READY(taskStatus);
  EXPECT_EQ(task.task_id(), taskStatus->task_id());
  EXPECT_EQ(TASK_FAILED, taskStatus->state());
  EXPECT_EQ(TaskStatus::REASON_EXECUTOR_TERMINATED, taskStatus->reason());

  driver.stop();
  driver.join();
}
Ejemplo n.º 14
0
bool operator == (const ContainerInfo& left, const ContainerInfo& right)
{
    // Order of volumes is not important.
    if (left.volumes().size() != right.volumes().size()) {
        return false;
    }

    for (int i = 0; i < left.volumes().size(); i++) {
        bool found = false;
        for (int j = 0; j < right.volumes().size(); j++) {
            if (left.volumes().Get(i) == right.volumes().Get(j)) {
                found = true;
                break;
            }
        }
        if (!found) {
            return false;
        }
    }

    return left.type() == right.type() &&
           left.hostname() == right.hostname() &&
           left.docker() == right.docker();
}
Ejemplo n.º 15
0
// Test that a container can create a private view of a system
// directory (/var/tmp). Check that a file written by a process inside
// the container doesn't appear on the host filesystem but does appear
// under the container's work directory.
TEST_F(SharedFilesystemIsolatorTest, ROOT_RelativeVolume)
{
    slave::Flags flags = CreateSlaveFlags();
    flags.isolation = "filesystem/shared";

    Try<Isolator*> isolator = SharedFilesystemIsolatorProcess::create(flags);
    CHECK_SOME(isolator);

    Try<Launcher*> launcher = LinuxLauncher::create(flags);
    CHECK_SOME(launcher);

    // Use /var/tmp so we don't mask the work directory (under /tmp).
    const string containerPath = "/var/tmp";
    ASSERT_TRUE(os::isdir(containerPath));

    // Use a host path relative to the container work directory.
    const string hostPath = strings::remove(containerPath, "/", strings::PREFIX);

    ContainerInfo containerInfo;
    containerInfo.set_type(ContainerInfo::MESOS);
    containerInfo.add_volumes()->CopyFrom(
        CREATE_VOLUME(containerPath, hostPath, Volume::RW));

    ExecutorInfo executorInfo;
    executorInfo.mutable_container()->CopyFrom(containerInfo);

    ContainerID containerId;
    containerId.set_value(UUID::random().toString());

    Future<Option<CommandInfo> > prepare =
        isolator.get()->prepare(containerId, executorInfo, flags.work_dir, None());
    AWAIT_READY(prepare);
    ASSERT_SOME(prepare.get());

    // The test will touch a file in container path.
    const string file = path::join(containerPath, UUID::random().toString());
    ASSERT_FALSE(os::exists(file));

    // Manually run the isolator's preparation command first, then touch
    // the file.
    vector<string> args;
    args.push_back("/bin/sh");
    args.push_back("-x");
    args.push_back("-c");
    args.push_back(prepare.get().get().value() + " && touch " + file);

    Try<pid_t> pid = launcher.get()->fork(
                         containerId,
                         "/bin/sh",
                         args,
                         Subprocess::FD(STDIN_FILENO),
                         Subprocess::FD(STDOUT_FILENO),
                         Subprocess::FD(STDERR_FILENO),
                         None(),
                         None(),
                         None());
    ASSERT_SOME(pid);

    // Set up the reaper to wait on the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    AWAIT_READY(status);
    EXPECT_SOME_EQ(0, status.get());

    // Check the correct hierarchy was created under the container work
    // directory.
    string dir = "/";
    foreach (const string& subdir, strings::tokenize(containerPath, "/")) {
        dir = path::join(dir, subdir);

        struct stat hostStat;
        EXPECT_EQ(0, ::stat(dir.c_str(), &hostStat));

        struct stat containerStat;
        EXPECT_EQ(0,
                  ::stat(path::join(flags.work_dir, dir).c_str(), &containerStat));

        EXPECT_EQ(hostStat.st_mode, containerStat.st_mode);
        EXPECT_EQ(hostStat.st_uid, containerStat.st_uid);
        EXPECT_EQ(hostStat.st_gid, containerStat.st_gid);
    }

    // Check it did *not* create a file in the host namespace.
    EXPECT_FALSE(os::exists(file));

    // Check it did create the file under the container's work directory
    // on the host.
    EXPECT_TRUE(os::exists(path::join(flags.work_dir, file)));

    delete launcher.get();
    delete isolator.get();
}
Ejemplo n.º 16
0
TEST(G_ContainerInfoTests, TotalElements)
{
	ASSERT_TRUE(DatabasePrepare());
	cont->Clear();
	ContainerInfo info = cont->GetInfo();

	EXPECT_EQ(1, info->TotalElements()); // root should necessarily be in the empty container
	EXPECT_EQ(1, info->TotalElements(ElementTypeFolder));
	EXPECT_EQ(0, info->TotalElements(ElementTypeFile));
	FolderGuard root = cont->GetRoot();
	FolderGuard cfold = root->CreateFolder("folder1");
	EXPECT_EQ(2, info->TotalElements());

	cfold->CreateChild("file1", ElementTypeFile);
	EXPECT_EQ(3, info->TotalElements());

	EXPECT_EQ(2, info->TotalElements(ElementTypeFolder));
	EXPECT_EQ(1, info->TotalElements(ElementTypeFile));

	root->CreateFolder("folder2")->CreateFile("file2");

	EXPECT_EQ(5, info->TotalElements());
	EXPECT_EQ(3, info->TotalElements(ElementTypeFolder));
	EXPECT_EQ(2, info->TotalElements(ElementTypeFile));

	cfold->Remove();
	ASSERT_FALSE(cfold->Exists());

	EXPECT_EQ(3, info->TotalElements());
	EXPECT_EQ(2, info->TotalElements(ElementTypeFolder));
	EXPECT_EQ(1, info->TotalElements(ElementTypeFile));

	cont->Clear();
	EXPECT_EQ(1, info->TotalElements());
	EXPECT_EQ(1, info->TotalElements(ElementTypeFolder));
}
Ejemplo n.º 17
0
// This test tests the functionality of the  docker's interfaces.
TEST(DockerTest, ROOT_DOCKER_interface)
{
  string containerName = "mesos-docker-test";
  Resources resources = Resources::parse("cpus:1;mem:512").get();
  Docker docker = Docker::create(tests::flags.docker, false).get();

  // Cleaning up the container first if it exists.
  Future<Nothing> status = docker.rm(containerName, true);
  ASSERT_TRUE(status.await(Seconds(10)));

  // Verify that we do not see the container.
  Future<list<Docker::Container> > containers = docker.ps(true, containerName);
  AWAIT_READY(containers);
  foreach (const Docker::Container& container, containers.get()) {
    EXPECT_NE("/" + containerName, container.name);
  }

  Try<string> directory = environment->mkdtemp();
  CHECK_SOME(directory) << "Failed to create temporary directory";

  ContainerInfo containerInfo;
  containerInfo.set_type(ContainerInfo::DOCKER);

  ContainerInfo::DockerInfo dockerInfo;
  dockerInfo.set_image("busybox");
  containerInfo.mutable_docker()->CopyFrom(dockerInfo);

  CommandInfo commandInfo;
  commandInfo.set_value("sleep 120");

  // Start the container.
  status = docker.run(
      containerInfo,
      commandInfo,
      containerName,
      directory.get(),
      "/mnt/mesos/sandbox",
      resources);

  AWAIT_READY(status);

  // Should be able to see the container now.
  containers = docker.ps();
  AWAIT_READY(containers);
  bool found = false;
  foreach (const Docker::Container& container, containers.get()) {
    if ("/" + containerName == container.name) {
      found = true;
      break;
    }
  }
  EXPECT_TRUE(found);

  Future<Docker::Container> container = docker.inspect(containerName);
  AWAIT_READY(container);

  // Test some fields of the container.
  EXPECT_NE("", container.get().id);
  EXPECT_EQ("/" + containerName, container.get().name);
  EXPECT_SOME(container.get().pid);

  // Kill the container.
  status = docker.kill(containerName);
  AWAIT_READY(status);

  // Now, the container should not appear in the result of ps().
  // But it should appear in the result of ps(true).
  containers = docker.ps();
  AWAIT_READY(containers);
  foreach (const Docker::Container& container, containers.get()) {
    EXPECT_NE("/" + containerName, container.name);
  }

  containers = docker.ps(true, containerName);
  AWAIT_READY(containers);
  found = false;
  foreach (const Docker::Container& container, containers.get()) {
    if ("/" + containerName == container.name) {
      found = true;
      break;
    }
  }
  EXPECT_TRUE(found);

  // Check the container's info, both id and name should remain the
  // same since we haven't removed it, but the pid should be none
  // since it's not running.
  container = docker.inspect(containerName);
  AWAIT_READY(container);

  EXPECT_NE("", container.get().id);
  EXPECT_EQ("/" + containerName, container.get().name);
  EXPECT_NONE(container.get().pid);

  // Remove the container.
  status = docker.rm(containerName);
  AWAIT_READY(status);

  // Should not be able to inspect the container.
  container = docker.inspect(containerName);
  AWAIT_FAILED(container);

  // Also, now we should not be able to see the container by invoking
  // ps(true).
  containers = docker.ps(true, containerName);
  AWAIT_READY(containers);
  foreach (const Docker::Container& container, containers.get()) {
    EXPECT_NE("/" + containerName, container.name);
  }

  // Start the container again, this time we will do a "rm -f"
  // directly, instead of killing and rm.
  status = docker.run(
      containerInfo,
      commandInfo,
      containerName,
      directory.get(),
      "/mnt/mesos/sandbox",
      resources);

  AWAIT_READY(status);

  // Verify that the container is there.
  containers = docker.ps();
  AWAIT_READY(containers);
  found = false;
  foreach (const Docker::Container& container, containers.get()) {
    if ("/" + containerName == container.name) {
      found = true;
      break;
    }
  }
  EXPECT_TRUE(found);

  // Then do a "rm -f".
  status = docker.rm(containerName, true);
  AWAIT_READY(status);

  // Verify that the container is totally removed, that is we can't
  // find it by ps() or ps(true).
  containers = docker.ps();
  AWAIT_READY(containers);
  foreach (const Docker::Container& container, containers.get()) {
    EXPECT_NE("/" + containerName, container.name);
  }
  containers = docker.ps(true, containerName);
  AWAIT_READY(containers);
  foreach (const Docker::Container& container, containers.get()) {
    EXPECT_NE("/" + containerName, container.name);
  }
}
// This test verifies that we can launch a task container with overridden
// Seccomp profile.
TEST_F(LinuxSeccompIsolatorTest, ROOT_SECCOMP_LaunchWithOverriddenProfile)
{
  const string config =
    R"~(
    {
      "defaultAction": "SCMP_ACT_ALLOW",
      "archMap": [
        {
          "architecture": "SCMP_ARCH_X86_64",
          "subArchitectures": [
            "SCMP_ARCH_X86",
            "SCMP_ARCH_X32"
          ]
        }
      ],
      "syscalls": [
        {
          "names": ["uname"],
          "action": "SCMP_ACT_ERRNO",
          "args": [],
          "includes": {},
          "excludes": {}
        }
      ]
    })~";

  slave::Flags flags = CreateSlaveFlags();
  flags.seccomp_profile_name = createProfile(TEST_SECCOMP_PROFILE);

  Fetcher fetcher(flags);

  Try<MesosContainerizer*> create =
    MesosContainerizer::create(flags, false, &fetcher);

  ASSERT_SOME(create);

  Owned<MesosContainerizer> containerizer(create.get());

  SlaveState state;
  state.id = SlaveID();

  AWAIT_READY(containerizer->recover(state));

  ContainerID containerId;
  containerId.set_value(id::UUID::random().toString());

  Try<string> directory = environment->mkdtemp();
  ASSERT_SOME(directory);

  auto containerConfig =  createContainerConfig(
      None(),
      createExecutorInfo("executor", "uname", "cpus:1"),
      directory.get());

  ContainerInfo* container = containerConfig.mutable_container_info();
  container->set_type(ContainerInfo::MESOS);

  // Set the Seccomp profile name for this particular task.
  SeccompInfo* seccomp = container->mutable_linux_info()->mutable_seccomp();
  seccomp->set_profile_name(createProfile(config));

  Future<Containerizer::LaunchResult> launch = containerizer->launch(
      containerId,
      containerConfig,
      map<string, string>(),
      None());

  AWAIT_ASSERT_EQ(Containerizer::LaunchResult::SUCCESS, launch);

  Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);

  AWAIT_READY(wait);
  ASSERT_SOME(wait.get());
  ASSERT_TRUE(wait.get()->has_status());
  EXPECT_WEXITSTATUS_NE(0, wait.get()->status());
}
Ejemplo n.º 19
0
TEST_F(SharedFilesystemIsolatorTest, ROOT_AbsoluteVolume)
{
    slave::Flags flags = CreateSlaveFlags();
    flags.isolation = "filesystem/shared";

    Try<Isolator*> isolator = SharedFilesystemIsolatorProcess::create(flags);
    CHECK_SOME(isolator);

    Try<Launcher*> launcher = LinuxLauncher::create(flags);
    CHECK_SOME(launcher);

    // We'll mount the absolute test work directory as /var/tmp in the
    // container.
    const string hostPath = flags.work_dir;
    const string containerPath = "/var/tmp";

    ContainerInfo containerInfo;
    containerInfo.set_type(ContainerInfo::MESOS);
    containerInfo.add_volumes()->CopyFrom(
        CREATE_VOLUME(containerPath, hostPath, Volume::RW));

    ExecutorInfo executorInfo;
    executorInfo.mutable_container()->CopyFrom(containerInfo);

    ContainerID containerId;
    containerId.set_value(UUID::random().toString());

    Future<Option<CommandInfo> > prepare =
        isolator.get()->prepare(containerId, executorInfo, flags.work_dir, None());
    AWAIT_READY(prepare);
    ASSERT_SOME(prepare.get());

    // Test the volume mounting by touching a file in the container's
    // /tmp, which should then be in flags.work_dir.
    const string filename = UUID::random().toString();
    ASSERT_FALSE(os::exists(path::join(containerPath, filename)));

    vector<string> args;
    args.push_back("/bin/sh");
    args.push_back("-x");
    args.push_back("-c");
    args.push_back(prepare.get().get().value() +
                   " && touch " +
                   path::join(containerPath, filename));

    Try<pid_t> pid = launcher.get()->fork(
                         containerId,
                         "/bin/sh",
                         args,
                         Subprocess::FD(STDIN_FILENO),
                         Subprocess::FD(STDOUT_FILENO),
                         Subprocess::FD(STDERR_FILENO),
                         None(),
                         None(),
                         None());
    ASSERT_SOME(pid);

    // Set up the reaper to wait on the forked child.
    Future<Option<int> > status = process::reap(pid.get());

    AWAIT_READY(status);
    EXPECT_SOME_EQ(0, status.get());

    // Check the file was created in flags.work_dir.
    EXPECT_TRUE(os::exists(path::join(hostPath, filename)));

    // Check it didn't get created in the host's view of containerPath.
    EXPECT_FALSE(os::exists(path::join(containerPath, filename)));

    delete launcher.get();
    delete isolator.get();
}
Ejemplo n.º 20
0
  virtual void resourceOffers(SchedulerDriver* driver,
                              const vector<Offer>& offers)
  {
    cout << "." << flush;
    for (size_t i = 0; i < offers.size(); i++) {
      const Offer& offer = offers[i];

      // Lookup resources we care about.
      // TODO(benh): It would be nice to ultimately have some helper
      // functions for looking up resources.
      double cpus = 0;
      double mem = 0;

      for (int i = 0; i < offer.resources_size(); i++) {
        const Resource& resource = offer.resources(i);
        if (resource.name() == "cpus" &&
            resource.type() == Value::SCALAR) {
          cpus = resource.scalar().value();
        } else if (resource.name() == "mem" &&
                   resource.type() == Value::SCALAR) {
          mem = resource.scalar().value();
        }
      }

      // Launch tasks.
      vector<TaskInfo> tasks;
      while (tasksLaunched < totalTasks &&
             cpus >= CPUS_PER_TASK &&
             mem >= MEM_PER_TASK) {
        int taskId = tasksLaunched++;

        cout << "Starting task " << taskId << " on "
             << offer.hostname() << endl;

        TaskInfo task;
        task.set_name("Task " + lexical_cast<string>(taskId));
        task.mutable_task_id()->set_value(lexical_cast<string>(taskId));
        task.mutable_slave_id()->MergeFrom(offer.slave_id());
        task.mutable_command()->set_value("echo hello");

        // Use Docker to run the task.
        ContainerInfo containerInfo;
        containerInfo.set_type(ContainerInfo::DOCKER);

        ContainerInfo::DockerInfo dockerInfo;
        dockerInfo.set_image("busybox");

        containerInfo.mutable_docker()->CopyFrom(dockerInfo);
        task.mutable_container()->CopyFrom(containerInfo);

        Resource* resource;

        resource = task.add_resources();
        resource->set_name("cpus");
        resource->set_type(Value::SCALAR);
        resource->mutable_scalar()->set_value(CPUS_PER_TASK);

        resource = task.add_resources();
        resource->set_name("mem");
        resource->set_type(Value::SCALAR);
        resource->mutable_scalar()->set_value(MEM_PER_TASK);

        tasks.push_back(task);

        cpus -= CPUS_PER_TASK;
        mem -= MEM_PER_TASK;
      }

      driver->launchTasks(offer.id(), tasks);
    }
  }
Ejemplo n.º 21
0
// Test that the prepare launch docker hook execute before launch
// a docker container. Test hook create a file "foo" in the sandbox
// directory. When the docker container launched, the sandbox directory
// is mounted to the docker container. We validate the hook by verifying
// the "foo" file exists in the docker container or not.
TEST_F(HookTest, ROOT_DOCKER_VerifySlavePreLaunchDockerHook)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  MockDocker* mockDocker =
    new MockDocker(tests::flags.docker, tests::flags.docker_socket);

  Shared<Docker> docker(mockDocker);

  slave::Flags flags = CreateSlaveFlags();

  Fetcher fetcher;

  Try<ContainerLogger*> logger =
    ContainerLogger::create(flags.container_logger);

  ASSERT_SOME(logger);

  MockDockerContainerizer containerizer(
      flags,
      &fetcher,
      Owned<ContainerLogger>(logger.get()),
      docker);

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave =
    StartSlave(detector.get(), &containerizer, flags);
  ASSERT_SOME(slave);

  MockScheduler sched;
  MesosSchedulerDriver driver(
      &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);

  Future<FrameworkID> frameworkId;
  EXPECT_CALL(sched, registered(&driver, _, _))
    .WillOnce(FutureArg<1>(&frameworkId));

  Future<vector<Offer>> offers;
  EXPECT_CALL(sched, resourceOffers(&driver, _))
    .WillOnce(FutureArg<1>(&offers))
    .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(frameworkId);

  AWAIT_READY(offers);
  ASSERT_NE(0u, offers.get().size());

  const Offer& offer = offers.get()[0];

  SlaveID slaveId = offer.slave_id();

  TaskInfo task;
  task.set_name("");
  task.mutable_task_id()->set_value("1");
  task.mutable_slave_id()->CopyFrom(offer.slave_id());
  task.mutable_resources()->CopyFrom(offer.resources());

  CommandInfo command;
  command.set_value("test -f " + path::join(flags.sandbox_directory, "foo"));

  ContainerInfo containerInfo;
  containerInfo.set_type(ContainerInfo::DOCKER);

  // TODO(tnachen): Use local image to test if possible.
  ContainerInfo::DockerInfo dockerInfo;
  dockerInfo.set_image("alpine");
  containerInfo.mutable_docker()->CopyFrom(dockerInfo);

  task.mutable_command()->CopyFrom(command);
  task.mutable_container()->CopyFrom(containerInfo);

  vector<TaskInfo> tasks;
  tasks.push_back(task);

  Future<ContainerID> containerId;
  EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _, _))
    .WillOnce(DoAll(FutureArg<0>(&containerId),
                    Invoke(&containerizer,
                           &MockDockerContainerizer::_launch)));

  Future<TaskStatus> statusRunning;
  Future<TaskStatus> statusFinished;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
    .WillOnce(FutureArg<1>(&statusRunning))
    .WillOnce(FutureArg<1>(&statusFinished))
    .WillRepeatedly(DoDefault());

  driver.launchTasks(offers.get()[0].id(), tasks);

  AWAIT_READY_FOR(containerId, Seconds(60));
  AWAIT_READY_FOR(statusRunning, Seconds(60));
  EXPECT_EQ(TASK_RUNNING, statusRunning.get().state());
  AWAIT_READY_FOR(statusFinished, Seconds(60));
  EXPECT_EQ(TASK_FINISHED, statusFinished.get().state());

  Future<containerizer::Termination> termination =
    containerizer.wait(containerId.get());

  driver.stop();
  driver.join();

  AWAIT_READY(termination);

  Future<list<Docker::Container>> containers =
    docker.get()->ps(true, slave::DOCKER_NAME_PREFIX);

  AWAIT_READY(containers);

  // Cleanup all mesos launched containers.
  foreach (const Docker::Container& container, containers.get()) {
    AWAIT_READY_FOR(docker.get()->rm(container.id, true), Seconds(30));
  }
}
Ejemplo n.º 22
0
// This test verifies that docker image default entrypoint is executed
// correctly using registry puller. This corresponds to the case in runtime
// isolator logic table: sh=0, value=0, argv=1, entrypoint=1, cmd=0.
TEST_F(DockerRuntimeIsolatorTest,
       ROOT_CURL_INTERNET_DockerDefaultEntryptRegistryPuller)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "docker/runtime,filesystem/linux";
  flags.image_providers = "docker";
  flags.docker_store_dir = path::join(os::getcwd(), "store");

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  MockScheduler sched;
  MesosSchedulerDriver driver(
      &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(&driver, _, _));

  Future<vector<Offer>> offers;
  EXPECT_CALL(sched, resourceOffers(&driver, _))
    .WillOnce(FutureArg<1>(&offers))
    .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(offers);
  ASSERT_EQ(1u, offers->size());

  const Offer& offer = offers.get()[0];

  TaskInfo task;
  task.set_name("test-task");
  task.mutable_task_id()->set_value(UUID::random().toString());
  task.mutable_slave_id()->CopyFrom(offer.slave_id());
  task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get());
  task.mutable_command()->set_shell(false);
  task.mutable_command()->add_arguments("hello world");

  Image image;
  image.set_type(Image::DOCKER);

  // 'mesosphere/inky' image is used in docker containerizer test, which
  // contains entrypoint as 'echo' and cmd as null.
  image.mutable_docker()->set_name("mesosphere/inky");

  ContainerInfo* container = task.mutable_container();
  container->set_type(ContainerInfo::MESOS);
  container->mutable_mesos()->mutable_image()->CopyFrom(image);

  Future<TaskStatus> statusRunning;
  Future<TaskStatus> statusFinished;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
    .WillOnce(FutureArg<1>(&statusRunning))
    .WillOnce(FutureArg<1>(&statusFinished));

  driver.launchTasks(offer.id(), {task});

  AWAIT_READY_FOR(statusRunning, Seconds(60));
  EXPECT_EQ(task.task_id(), statusRunning->task_id());
  EXPECT_EQ(TASK_RUNNING, statusRunning->state());

  AWAIT_READY(statusFinished);
  EXPECT_EQ(task.task_id(), statusFinished->task_id());
  EXPECT_EQ(TASK_FINISHED, statusFinished->state());

  driver.stop();
  driver.join();
}
// This test confirms that if a task exceeds configured resource
// limits it is forcibly terminated.
TEST_F(PosixRLimitsIsolatorTest, TaskExceedingLimit)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "posix/rlimits";

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  MockScheduler sched;

  MesosSchedulerDriver driver(
      &sched,
      DEFAULT_FRAMEWORK_INFO,
      master.get()->pid,
      DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(_, _, _));

  Future<vector<Offer>> offers;

  EXPECT_CALL(sched, resourceOffers(_, _))
    .WillOnce(FutureArg<1>(&offers))
    .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(offers);
  ASSERT_FALSE(offers->empty());

  // The task attempts to use an infinite amount of CPU time.
  TaskInfo task = createTask(
      offers.get()[0].slave_id(),
      offers.get()[0].resources(),
      "while true; do true; done");

  ContainerInfo* container = task.mutable_container();
  container->set_type(ContainerInfo::MESOS);

  // Limit the process to use maximally 1 second of CPU time.
  RLimitInfo rlimitInfo;
  RLimitInfo::RLimit* cpuLimit = rlimitInfo.add_rlimits();
  cpuLimit->set_type(RLimitInfo::RLimit::RLMT_CPU);
  cpuLimit->set_soft(1);
  cpuLimit->set_hard(1);

  container->mutable_rlimit_info()->CopyFrom(rlimitInfo);

  Future<TaskStatus> statusRunning;
  Future<TaskStatus> statusFailed;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
    .WillOnce(FutureArg<1>(&statusRunning))
    .WillOnce(FutureArg<1>(&statusFailed));

  driver.launchTasks(offers.get()[0].id(), {task});

  AWAIT_READY(statusRunning);
  EXPECT_EQ(task.task_id(), statusRunning->task_id());
  EXPECT_EQ(TASK_RUNNING, statusRunning->state());

  AWAIT_READY(statusFailed);
  EXPECT_EQ(task.task_id(), statusFailed->task_id());
  EXPECT_EQ(TASK_FAILED, statusFailed->state());

  driver.stop();
  driver.join();
}
// This test confirms that setting no values for the soft and hard
// limits implies an unlimited resource.
TEST_F(PosixRLimitsIsolatorTest, UnsetLimits) {
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  slave::Flags flags = CreateSlaveFlags();
  flags.isolation = "posix/rlimits";

  Owned<MasterDetector> detector = master.get()->createDetector();

  Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
  ASSERT_SOME(slave);

  MockScheduler sched;

  MesosSchedulerDriver driver(
      &sched,
      DEFAULT_FRAMEWORK_INFO,
      master.get()->pid,
      DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(_, _, _));

  Future<vector<Offer>> offers;

  EXPECT_CALL(sched, resourceOffers(_, _))
      .WillOnce(FutureArg<1>(&offers))
      .WillRepeatedly(Return()); // Ignore subsequent offers.

  driver.start();

  AWAIT_READY(offers);
  ASSERT_NE(0u, offers->size());

  TaskInfo task = createTask(
      offers.get()[0].slave_id(),
      offers.get()[0].resources(),
      "exit `ulimit -c | grep -q unlimited`");

  // Force usage of C locale as we interpret a potentially translated
  // string in the task's command.
  mesos::Environment::Variable* locale =
      task.mutable_command()->mutable_environment()->add_variables();
  locale->set_name("LC_ALL");
  locale->set_value("C");

  ContainerInfo* container = task.mutable_container();
  container->set_type(ContainerInfo::MESOS);

  // Setting rlimit for core without soft or hard limit signifies
  // unlimited range.
  RLimitInfo rlimitInfo;
  RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits();
  rlimit->set_type(RLimitInfo::RLimit::RLMT_CORE);

  container->mutable_rlimit_info()->CopyFrom(rlimitInfo);

  Future<TaskStatus> statusRunning;
  Future<TaskStatus> statusFinal;
  EXPECT_CALL(sched, statusUpdate(&driver, _))
      .WillOnce(FutureArg<1>(&statusRunning))
      .WillOnce(FutureArg<1>(&statusFinal));

  driver.launchTasks(offers.get()[0].id(), {task});

  AWAIT_READY(statusRunning);
  EXPECT_EQ(task.task_id(), statusRunning->task_id());
  EXPECT_EQ(TASK_RUNNING, statusRunning->state());

  AWAIT_READY(statusFinal);
  EXPECT_EQ(task.task_id(), statusFinal->task_id());
  EXPECT_EQ(TASK_FINISHED, statusFinal->state());

  driver.stop();
  driver.join();
}