virtual void resourceOffers(SchedulerDriver* driver,
                              const vector<Offer>& offers)
  {
    foreach (const Offer& offer, offers) {
      LOG(INFO) << "Received offer " << offer.id() << " with "
                << offer.resources();

      // If the framework got this offer for the first time, the state is
      // `State::INIT`; framework will reserve it (sending RESERVE operation
      // to master) in this loop.
      if (!states.contains(offer.slave_id())) {
        // If all tasks were launched, do not reserve more resources; wait
        // for them to finish and unreserve resources.
        if (tasksLaunched == totalTasks) {
          continue;
        }

        states[offer.slave_id()] = State::INIT;
      }

      const State state = states[offer.slave_id()];

      Filters filters;
      filters.set_refuse_seconds(0);

      switch (state) {
        case State::INIT: {
          // Framework reserves resources from this offer for only one task;
          // the task'll be dispatched when reserved resources are re-offered
          // to this framework.
          Resources resources = offer.resources();
          Offer::Operation reserve = RESERVE(taskResources);

          Try<Resources> apply = resources.apply(reserve);
          if (apply.isError()) {
            LOG(INFO) << "Failed to reserve resources for task in offer "
                      << stringify(offer.id()) << ": " << apply.error();
            break;
          }

          driver->acceptOffers({offer.id()}, {reserve}, filters);
          states[offer.slave_id()] = State::RESERVING;
          break;
        }
        case State::RESERVING: {
          Resources resources = offer.resources();
          Resources reserved = resources.reserved(role);
          if (!reserved.contains(taskResources)) {
            break;
          }
          states[offer.slave_id()] = State::RESERVED;

          // We fallthrough here to save an offer cycle.
        }
        case State::RESERVED: {
          Resources resources = offer.resources();
          Resources reserved = resources.reserved(role);

          CHECK(reserved.contains(taskResources));

          // If all tasks were launched, unreserve those resources.
          if (tasksLaunched == totalTasks) {
            driver->acceptOffers(
                {offer.id()}, {UNRESERVE(taskResources)}, filters);
            states[offer.slave_id()] = State::UNRESERVING;
            break;
          }

          // Framework dispatches task on the reserved resources.
          CHECK(tasksLaunched < totalTasks);

          // Launch tasks on reserved resources.
          const string& taskId = stringify(tasksLaunched++);
          LOG(INFO) << "Launching task " << taskId << " using offer "
                    << offer.id();
          TaskInfo task;
          task.set_name("Task " + taskId + ": " + command);
          task.mutable_task_id()->set_value(taskId);
          task.mutable_slave_id()->MergeFrom(offer.slave_id());
          task.mutable_command()->set_shell(true);
          task.mutable_command()->set_value(command);
          task.mutable_resources()->MergeFrom(taskResources);
          driver->launchTasks(offer.id(), {task}, filters);
          states[offer.slave_id()] = State::TASK_RUNNING;
          break;
        }
        case State::TASK_RUNNING:
          LOG(INFO) << "The task on " << offer.slave_id()
                    << " is running, waiting for task done";
          break;
        case State::UNRESERVING: {
          Resources resources = offer.resources();
          Resources reserved = resources.reserved(role);
          if (!reserved.contains(taskResources)) {
            states[offer.slave_id()] = State::UNRESERVED;
          }
          break;
        }
        case State::UNRESERVED:
          // If state of slave is UNRESERVED, ignore it. The driver is stopped
          // when all tasks are done and all resources are unreserved.
          break;
      }
    }
Exemple #2
0
Try<RunState> RunState::recover(
    const string& rootDir,
    const SlaveID& slaveId,
    const FrameworkID& frameworkId,
    const ExecutorID& executorId,
    const UUID& uuid,
    bool strict)
{
  RunState state;
  state.id = uuid;
  string message;

  // Find the tasks.
  const Try<list<string> >& tasks = os::glob(strings::format(
      paths::TASK_PATH,
      rootDir,
      slaveId,
      frameworkId,
      executorId,
      uuid.toString(),
      "*").get());

  if (tasks.isError()) {
    return Error("Failed to find tasks for executor run " + uuid.toString() +
                 ": " + tasks.error());
  }

  // Recover tasks.
  foreach (const string& path, tasks.get()) {
    TaskID taskId;
    taskId.set_value(os::basename(path).get());

    const Try<TaskState>& task = TaskState::recover(
        rootDir, slaveId, frameworkId, executorId, uuid, taskId, strict);

    if (task.isError()) {
      return Error(
          "Failed to recover task " + taskId.value() + ": " + task.error());
    }

    state.tasks[taskId] = task.get();
  }

  // Read the forked pid.
  string path = paths::getForkedPidPath(
      rootDir, slaveId, frameworkId, executorId, uuid);

  Try<string> pid = os::read(path);

  if (pid.isError()) {
    message = "Failed to read executor's forked pid from '" + path +
              "': " + pid.error();

    if (strict) {
      return Error(message);
    } else {
      LOG(WARNING) << message;
      return state;
    }
  }

  Try<pid_t> forkedPid = numify<pid_t>(pid.get());
  if (forkedPid.isError()) {
    return Error("Failed to parse forked pid " + pid.get() +
                 ": " + forkedPid.error());
  }

  state.forkedPid = forkedPid.get();

  // Read the libprocess pid.
  path = paths::getLibprocessPidPath(
      rootDir, slaveId, frameworkId, executorId, uuid);

  pid = os::read(path);

  if (pid.isError()) {
    message = "Failed to read executor's libprocess pid from '" + path +
              "': " + pid.error();

    if (strict) {
      return Error(message);
    } else {
      LOG(WARNING) << message;
      return state;
    }
  }

  state.libprocessPid = process::UPID(pid.get());

  return state;
}
Exemple #3
0
Future<size_t> RegistryClientProcess::getBlob(
    const Image::Name& imageName,
    const Option<string>& digest,
    const Path& filePath)
{
  Try<Nothing> mkdir = os::mkdir(filePath.dirname(), true);
  if (mkdir.isError()) {
    return Failure(
        "Failed to create directory to download blob: " + mkdir.error());
  }

  const string blobURLPath = getRepositoryPath(imageName) + "/blobs/" +
                             digest.getOrElse("");

  http::URL blobURL(registryServer_);
  blobURL.path = blobURLPath;

  return doHttpGet(blobURL, None(), true, true, None())
    .then([this, blobURLPath, digest, filePath](
        const http::Response& response) -> Future<size_t> {
      Try<int> fd = os::open(
          filePath.value,
          O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC,
          S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);

      if (fd.isError()) {
        return Failure("Failed to open file '" + filePath.value + "': " +
                       fd.error());
      }

      Try<Nothing> nonblock = os::nonblock(fd.get());
      if (nonblock.isError()) {
        Try<Nothing> close = os::close(fd.get());
        if (close.isError()) {
          LOG(WARNING) << "Failed to close the file descriptor for file '"
                       << stringify(filePath) << "': " << close.error();
        }

        return Failure(
            "Failed to set non-blocking mode for file: " + filePath.value);
      }

      // TODO(jojy): Add blob validation.
      // TODO(jojy): Add check for max size.

      Option<Pipe::Reader> reader = response.reader;
      if (reader.isNone()) {
        Try<Nothing> close = os::close(fd.get());
        if (close.isError()) {
          LOG(WARNING) << "Failed to close the file descriptor for file '"
                       << stringify(filePath) << "': " << close.error();
        }

        return Failure("Failed to get streaming reader from blob response");
      }

      return saveBlob(fd.get(), reader.get())
        .onAny([blobURLPath, digest, filePath, fd](
            const Future<size_t>& future) {
          Try<Nothing> close = os::close(fd.get());
          if (close.isError()) {
            LOG(WARNING) << "Failed to close the file descriptor for blob '"
                         << stringify(filePath) << "': " << close.error();
          }

          if (future.isFailed()) {
            LOG(WARNING) << "Failed to save blob requested from '"
                         << blobURLPath << "' to path '"
                         << stringify(filePath) << "': " << future.failure();
          }

          if (future.isDiscarded()) {
            LOG(WARNING) << "Failed to save blob requested from '"
                         << blobURLPath << "' to path '" << stringify(filePath)
                         << "': future discarded";
          }
        });
    });
}
Exemple #4
0
Future<http::Response> ResourceProviderManagerProcess::api(
    const http::Request& request,
    const Option<Principal>& principal)
{
  if (request.method != "POST") {
    return MethodNotAllowed({"POST"}, request.method);
  }

  v1::resource_provider::Call v1Call;

  // TODO(anand): Content type values are case-insensitive.
  Option<string> contentType = request.headers.get("Content-Type");

  if (contentType.isNone()) {
    return BadRequest("Expecting 'Content-Type' to be present");
  }

  if (contentType.get() == APPLICATION_PROTOBUF) {
    if (!v1Call.ParseFromString(request.body)) {
      return BadRequest("Failed to parse body into Call protobuf");
    }
  } else if (contentType.get() == APPLICATION_JSON) {
    Try<JSON::Value> value = JSON::parse(request.body);
    if (value.isError()) {
      return BadRequest("Failed to parse body into JSON: " + value.error());
    }

    Try<v1::resource_provider::Call> parse =
      ::protobuf::parse<v1::resource_provider::Call>(value.get());

    if (parse.isError()) {
      return BadRequest("Failed to convert JSON into Call protobuf: " +
                        parse.error());
    }

    v1Call = parse.get();
  } else {
    return UnsupportedMediaType(
        string("Expecting 'Content-Type' of ") +
        APPLICATION_JSON + " or " + APPLICATION_PROTOBUF);
  }

  Call call = devolve(v1Call);

  Option<Error> error = validate(call);
  if (error.isSome()) {
    return BadRequest(
        "Failed to validate resource_provider::Call: " + error->message);
  }

  ContentType acceptType;
  if (request.acceptsMediaType(APPLICATION_JSON)) {
    acceptType = ContentType::JSON;
  } else if (request.acceptsMediaType(APPLICATION_PROTOBUF)) {
    acceptType = ContentType::PROTOBUF;
  } else {
    return NotAcceptable(
        string("Expecting 'Accept' to allow ") +
        "'" + APPLICATION_PROTOBUF + "' or '" + APPLICATION_JSON + "'");
  }

  switch(call.type()) {
    case Call::UNKNOWN: {
      return NotImplemented();
    }

    case Call::SUBSCRIBE: {
      Pipe pipe;
      OK ok;

      ok.headers["Content-Type"] = stringify(acceptType);
      ok.type = http::Response::PIPE;
      ok.reader = pipe.reader();

      HttpConnection http(pipe.writer(), acceptType);
      subscribe(http, call.subscribe());

      return ok;
    }

    case Call::UPDATE: {
      if (!resourceProviders.contains(call.resource_provider_id())) {
        return BadRequest("Resource provider cannot be found");
      }

      auto resourceProvider = resourceProviders.at(call.resource_provider_id());

      update(&resourceProvider, call.update());
      return Accepted();
    }
  }

  UNREACHABLE();
}
Exemple #5
0
int main(int argc, char** argv)
{
  GOOGLE_PROTOBUF_VERIFY_VERSION;

  master::Flags flags;

  // The following flags are executable specific (e.g., since we only
  // have one instance of libprocess per execution, we only want to
  // advertise the IP and port option once, here).
  Option<string> ip;
  flags.add(&ip,
            "ip",
            "IP address to listen on. This cannot be used in conjunction\n"
            "with `--ip_discovery_command`.");

  uint16_t port;
  flags.add(&port,
            "port",
            "Port to listen on.",
            MasterInfo().port());

  Option<string> advertise_ip;
  flags.add(&advertise_ip,
            "advertise_ip",
            "IP address advertised to reach this Mesos master.\n"
            "The master does not bind using this IP address.\n"
            "However, this IP address may be used to access this master.");

  Option<string> advertise_port;
  flags.add(&advertise_port,
            "advertise_port",
            "Port advertised to reach Mesos master (along with\n"
            "`advertise_ip`). The master does not bind to this port.\n"
            "However, this port (along with `advertise_ip`) may be used to\n"
            "access this master.");

  Option<string> zk;
  flags.add(&zk,
            "zk",
            "ZooKeeper URL (used for leader election amongst masters)\n"
            "May be one of:\n"
            "  `zk://host1:port1,host2:port2,.../path`\n"
            "  `zk://username:password@host1:port1,host2:port2,.../path`\n"
            "  `file:///path/to/file` (where file contains one of the above)\n"
            "NOTE: Not required if master is run in standalone mode (non-HA).");

  // Optional IP discover script that will set the Master IP.
  // If set, its output is expected to be a valid parseable IP string.
  Option<string> ip_discovery_command;
  flags.add(&ip_discovery_command,
            "ip_discovery_command",
            "Optional IP discovery binary: if set, it is expected to emit\n"
            "the IP address which the master will try to bind to.\n"
            "Cannot be used in conjunction with `--ip`.");

  Try<Nothing> load = flags.load("MESOS_", argc, argv);

  if (load.isError()) {
    cerr << flags.usage(load.error()) << endl;
    return EXIT_FAILURE;
  }

  if (flags.version) {
    version();
    return EXIT_SUCCESS;
  }

  if (flags.help) {
    cout << flags.usage() << endl;
    return EXIT_SUCCESS;
  }

  // Initialize modules. Note that since other subsystems may depend
  // upon modules, we should initialize modules before anything else.
  if (flags.modules.isSome()) {
    Try<Nothing> result = ModuleManager::load(flags.modules.get());
    if (result.isError()) {
      EXIT(EXIT_FAILURE) << "Error loading modules: " << result.error();
    }
  }

  // Initialize hooks.
  if (flags.hooks.isSome()) {
    Try<Nothing> result = HookManager::initialize(flags.hooks.get());
    if (result.isError()) {
      EXIT(EXIT_FAILURE) << "Error installing hooks: " << result.error();
    }
  }

  if (ip_discovery_command.isSome() && ip.isSome()) {
    EXIT(EXIT_FAILURE) << flags.usage(
        "Only one of `--ip` or `--ip_discovery_command` should be specified");
  }

  if (ip_discovery_command.isSome()) {
    Try<string> ipAddress = os::shell(ip_discovery_command.get());

    if (ipAddress.isError()) {
      EXIT(EXIT_FAILURE) << ipAddress.error();
    }

    os::setenv("LIBPROCESS_IP", strings::trim(ipAddress.get()));
  } else if (ip.isSome()) {
    os::setenv("LIBPROCESS_IP", ip.get());
  }

  os::setenv("LIBPROCESS_PORT", stringify(port));

  if (advertise_ip.isSome()) {
    os::setenv("LIBPROCESS_ADVERTISE_IP", advertise_ip.get());
  }

  if (advertise_port.isSome()) {
    os::setenv("LIBPROCESS_ADVERTISE_PORT", advertise_port.get());
  }

  // Initialize libprocess.
  process::initialize("master");

  logging::initialize(argv[0], flags, true); // Catch signals.

  spawn(new VersionProcess(), true);

  LOG(INFO) << "Build: " << build::DATE << " by " << build::USER;

  LOG(INFO) << "Version: " << MESOS_VERSION;

  if (build::GIT_TAG.isSome()) {
    LOG(INFO) << "Git tag: " << build::GIT_TAG.get();
  }

  if (build::GIT_SHA.isSome()) {
    LOG(INFO) << "Git SHA: " << build::GIT_SHA.get();
  }

  // Create an instance of allocator.
  const string allocatorName = flags.allocator;
  Try<Allocator*> allocator = Allocator::create(allocatorName);

  if (allocator.isError()) {
    EXIT(EXIT_FAILURE)
      << "Failed to create '" << allocatorName
      << "' allocator: " << allocator.error();
  }

  CHECK_NOTNULL(allocator.get());
  LOG(INFO) << "Using '" << allocatorName << "' allocator";

  state::Storage* storage = NULL;
  Log* log = NULL;

  if (flags.registry == "in_memory") {
    if (flags.registry_strict) {
      EXIT(EXIT_FAILURE)
        << "Cannot use '--registry_strict' when using in-memory storage"
        << " based registry";
    }
    storage = new state::InMemoryStorage();
  } else if (flags.registry == "replicated_log" ||
             flags.registry == "log_storage") {
    // TODO(bmahler): "log_storage" is present for backwards
    // compatibility, can be removed before 0.19.0.
    if (flags.work_dir.isNone()) {
      EXIT(EXIT_FAILURE)
        << "--work_dir needed for replicated log based registry";
    }

    Try<Nothing> mkdir = os::mkdir(flags.work_dir.get());
    if (mkdir.isError()) {
      EXIT(EXIT_FAILURE)
        << "Failed to create work directory '" << flags.work_dir.get()
        << "': " << mkdir.error();
    }

    if (zk.isSome()) {
      // Use replicated log with ZooKeeper.
      if (flags.quorum.isNone()) {
        EXIT(EXIT_FAILURE)
          << "Need to specify --quorum for replicated log based"
          << " registry when using ZooKeeper";
      }

      Try<zookeeper::URL> url = zookeeper::URL::parse(zk.get());
      if (url.isError()) {
        EXIT(EXIT_FAILURE) << "Error parsing ZooKeeper URL: " << url.error();
      }

      log = new Log(
          flags.quorum.get(),
          path::join(flags.work_dir.get(), "replicated_log"),
          url.get().servers,
          flags.zk_session_timeout,
          path::join(url.get().path, "log_replicas"),
          url.get().authentication,
          flags.log_auto_initialize);
    } else {
      // Use replicated log without ZooKeeper.
      log = new Log(
          1,
          path::join(flags.work_dir.get(), "replicated_log"),
          set<UPID>(),
          flags.log_auto_initialize);
    }
    storage = new state::LogStorage(log);
  } else {
    EXIT(EXIT_FAILURE)
      << "'" << flags.registry << "' is not a supported"
      << " option for registry persistence";
  }

  CHECK_NOTNULL(storage);

  state::protobuf::State* state = new state::protobuf::State(storage);
  Registrar* registrar = new Registrar(flags, state);
  Repairer* repairer = new Repairer();

  Files files;

  MasterContender* contender;
  MasterDetector* detector;

  Try<MasterContender*> contender_ = MasterContender::create(zk);
  if (contender_.isError()) {
    EXIT(EXIT_FAILURE)
      << "Failed to create a master contender: " << contender_.error();
  }
  contender = contender_.get();

  Try<MasterDetector*> detector_ = MasterDetector::create(zk);
  if (detector_.isError()) {
    EXIT(EXIT_FAILURE)
      << "Failed to create a master detector: " << detector_.error();
  }
  detector = detector_.get();

  Option<Authorizer*> authorizer_ = None();

  auto authorizerNames = strings::split(flags.authorizers, ",");
  if (authorizerNames.empty()) {
    EXIT(EXIT_FAILURE) << "No authorizer specified";
  }
  if (authorizerNames.size() > 1) {
    EXIT(EXIT_FAILURE) << "Multiple authorizers not supported";
  }
  string authorizerName = authorizerNames[0];

  // NOTE: The flag --authorizers overrides the flag --acls, i.e. if
  // a non default authorizer is requested, it will be used and
  // the contents of --acls will be ignored.
  // TODO(arojas): Consider adding support for multiple authorizers.
  Result<Authorizer*> authorizer((None()));
  if (authorizerName != master::DEFAULT_AUTHORIZER) {
    LOG(INFO) << "Creating '" << authorizerName << "' authorizer";

    authorizer = Authorizer::create(authorizerName);
  } else {
    // `authorizerName` is `DEFAULT_AUTHORIZER` at this point.
    if (flags.acls.isSome()) {
      LOG(INFO) << "Creating default '" << authorizerName << "' authorizer";

      authorizer = Authorizer::create(flags.acls.get());
    }
  }

  if (authorizer.isError()) {
    EXIT(EXIT_FAILURE) << "Could not create '" << authorizerName
                       << "' authorizer: " << authorizer.error();
  } else if (authorizer.isSome()) {
    authorizer_ = authorizer.get();
  }

  Option<shared_ptr<RateLimiter>> slaveRemovalLimiter = None();
  if (flags.slave_removal_rate_limit.isSome()) {
    // Parse the flag value.
    // TODO(vinod): Move this parsing logic to flags once we have a
    // 'Rate' abstraction in stout.
    vector<string> tokens =
      strings::tokenize(flags.slave_removal_rate_limit.get(), "/");

    if (tokens.size() != 2) {
      EXIT(EXIT_FAILURE)
        << "Invalid slave_removal_rate_limit: "
        << flags.slave_removal_rate_limit.get()
        << ". Format is <Number of slaves>/<Duration>";
    }

    Try<int> permits = numify<int>(tokens[0]);
    if (permits.isError()) {
      EXIT(EXIT_FAILURE)
        << "Invalid slave_removal_rate_limit: "
        << flags.slave_removal_rate_limit.get()
        << ". Format is <Number of slaves>/<Duration>"
        << ": " << permits.error();
    }

    Try<Duration> duration = Duration::parse(tokens[1]);
    if (duration.isError()) {
      EXIT(EXIT_FAILURE)
        << "Invalid slave_removal_rate_limit: "
        << flags.slave_removal_rate_limit.get()
        << ". Format is <Number of slaves>/<Duration>"
        << ": " << duration.error();
    }

    slaveRemovalLimiter = new RateLimiter(permits.get(), duration.get());
  }

  if (flags.firewall_rules.isSome()) {
    vector<Owned<FirewallRule>> rules;

    const Firewall firewall = flags.firewall_rules.get();

    if (firewall.has_disabled_endpoints()) {
      hashset<string> paths;

      foreach (const string& path, firewall.disabled_endpoints().paths()) {
        paths.insert(path);
      }

      rules.emplace_back(new DisabledEndpointsFirewallRule(paths));
    }
Exemple #6
0
Try<RunState> RunState::recover(
    const string& rootDir,
    const SlaveID& slaveId,
    const FrameworkID& frameworkId,
    const ExecutorID& executorId,
    const UUID& uuid,
    bool strict)
{
  RunState state;
  state.id = uuid;
  string message;

  // Find the tasks.
  const Try<list<string> >& tasks = os::glob(strings::format(
      paths::TASK_PATH,
      rootDir,
      slaveId,
      frameworkId,
      executorId,
      uuid.toString(),
      "*").get());

  if (tasks.isError()) {
    return Error("Failed to find tasks for executor run " + uuid.toString() +
                 ": " + tasks.error());
  }

  // Recover tasks.
  foreach (const string& path, tasks.get()) {
    TaskID taskId;
    taskId.set_value(os::basename(path).get());

    const Try<TaskState>& task = TaskState::recover(
        rootDir, slaveId, frameworkId, executorId, uuid, taskId, strict);

    if (task.isError()) {
      return Error(
          "Failed to recover task " + taskId.value() + ": " + task.error());
    }

    state.tasks[taskId] = task.get();
    state.errors += task.get().errors;
  }

  // Read the forked pid.
  string path = paths::getForkedPidPath(
      rootDir, slaveId, frameworkId, executorId, uuid);
  if (!os::exists(path)) {
    // This could happen if the slave died before the isolator
    // checkpointed the forked pid.
    LOG(WARNING) << "Failed to find executor forked pid file '" << path << "'";
    return state;
  }

  Try<string> pid = os::read(path);

  if (pid.isError()) {
    message = "Failed to read executor forked pid from '" + path +
              "': " + pid.error();

    if (strict) {
      return Error(message);
    } else {
      LOG(WARNING) << message;
      state.errors++;
      return state;
    }
  }

  if (pid.get().empty()) {
    // This could happen if the slave died after opening the file for
    // writing but before it checkpointed anything.
    LOG(WARNING) << "Found empty executor forked pid file '" << path << "'";
    return state;
  }

  Try<pid_t> forkedPid = numify<pid_t>(pid.get());
  if (forkedPid.isError()) {
    return Error("Failed to parse forked pid " + pid.get() +
                 ": " + forkedPid.error());
  }

  state.forkedPid = forkedPid.get();

  // Read the libprocess pid.
  path = paths::getLibprocessPidPath(
      rootDir, slaveId, frameworkId, executorId, uuid);

  if (!os::exists(path)) {
    // This could happen if the slave died before the executor
    // registered with the slave.
    LOG(WARNING)
      << "Failed to find executor libprocess pid file '" << path << "'";
    return state;
  }

  pid = os::read(path);

  if (pid.isError()) {
    message = "Failed to read executor libprocess pid from '" + path +
              "': " + pid.error();

    if (strict) {
      return Error(message);
    } else {
      LOG(WARNING) << message;
      state.errors++;
      return state;
    }
  }

  if (pid.get().empty()) {
    // This could happen if the slave died after opening the file for
    // writing but before it checkpointed anything.
    LOG(WARNING) << "Found empty executor libprocess pid file '" << path << "'";
    return state;
  }

  state.libprocessPid = process::UPID(pid.get());

  // See if the sentinel file exists.
  path = paths::getExecutorSentinelPath(
      rootDir, slaveId, frameworkId, executorId, uuid);

  state.completed = os::exists(path);

  return state;
}
Exemple #7
0
Try<Socket> Socket::create(Kind kind, Option<int> s)
{
  // If the caller passed in a file descriptor, we do
  // not own its life cycle and must not close it.
  bool owned = s.isNone();

  if (owned) {
    // Supported in Linux >= 2.6.27.
#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
    Try<int> fd =
      network::socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, 0);

    if (fd.isError()) {
      return Error("Failed to create socket: " + fd.error());
    }
#else
    Try<int> fd = network::socket(AF_INET, SOCK_STREAM, 0);
    if (fd.isError()) {
      return Error("Failed to create socket: " + fd.error());
    }

    Try<Nothing> nonblock = os::nonblock(fd.get());
    if (nonblock.isError()) {
      os::close(fd.get());
      return Error("Failed to create socket, nonblock: " + nonblock.error());
    }

    Try<Nothing> cloexec = os::cloexec(fd.get());
    if (cloexec.isError()) {
      os::close(fd.get());
      return Error("Failed to create socket, cloexec: " + cloexec.error());
    }
#endif

    s = fd.get();
  }

  switch (kind) {
    case POLL: {
      Try<std::shared_ptr<Socket::Impl>> socket =
        PollSocketImpl::create(s.get());
      if (socket.isError()) {
        if (owned) {
          os::close(s.get());
        }
        return Error(socket.error());
      }
      return Socket(socket.get());
    }
#ifdef USE_SSL_SOCKET
    case SSL: {
      Try<std::shared_ptr<Socket::Impl>> socket =
        LibeventSSLSocketImpl::create(s.get());
      if (socket.isError()) {
        if (owned) {
          os::close(s.get());
        }
        return Error(socket.error());
      }
      return Socket(socket.get());
    }
#endif
    // By not setting a default we leverage the compiler errors when
    // the enumeration is augmented to find all the cases we need to
    // provide.
  }
}
Exemple #8
0
  virtual void resourceOffers(
      SchedulerDriver* driver,
      const vector<Offer>& offers)
  {
    static const Try<Resources> TASK_RESOURCES = Resources::parse(resources);

    if (TASK_RESOURCES.isError()) {
      cerr << "Failed to parse resources '" << resources
           << "': " << TASK_RESOURCES.error() << endl;
      driver->abort();
      return;
    }

    foreach (const Offer& offer, offers) {
      if (!launched &&
          Resources(offer.resources()).contains(TASK_RESOURCES.get())) {
        TaskInfo task;
        task.set_name(name);
        task.mutable_task_id()->set_value(name);
        task.mutable_slave_id()->MergeFrom(offer.slave_id());
        task.mutable_resources()->CopyFrom(TASK_RESOURCES.get());

        CommandInfo* commandInfo = task.mutable_command();
        commandInfo->set_value(command);
        if (environment.isSome()) {
          Environment* environment_ = commandInfo->mutable_environment();
          foreachpair (
              const string& name, const string& value, environment.get()) {
            Environment_Variable* environmentVariable =
              environment_->add_variables();
            environmentVariable->set_name(name);
            environmentVariable->set_value(value);
          }
        }

        if (uri.isSome()) {
          task.mutable_command()->add_uris()->set_value(uri.get());
        }

        if (dockerImage.isSome()) {
          ContainerInfo containerInfo;

          if (containerizer == "mesos") {
            containerInfo.set_type(ContainerInfo::MESOS);

            ContainerInfo::MesosInfo mesosInfo;

            Image mesosImage;
            mesosImage.set_type(Image::DOCKER);
            mesosImage.mutable_docker()->set_name(dockerImage.get());
            mesosInfo.mutable_image()->CopyFrom(mesosImage);

            containerInfo.mutable_mesos()->CopyFrom(mesosInfo);
          } else if (containerizer == "docker") {
            containerInfo.set_type(ContainerInfo::DOCKER);

            ContainerInfo::DockerInfo dockerInfo;
            dockerInfo.set_image(dockerImage.get());

            containerInfo.mutable_docker()->CopyFrom(dockerInfo);
          } else {
            cerr << "Unsupported containerizer: " << containerizer << endl;;

            driver->abort();

            return;
          }

          task.mutable_container()->CopyFrom(containerInfo);
        }

        vector<TaskInfo> tasks;
        tasks.push_back(task);

        driver->launchTasks(offer.id(), tasks);
        cout << "task " << name << " submitted to slave "
             << offer.slave_id() << endl;

        launched = true;
      } else {
Exemple #9
0
    static void SetUpTestCase()
    {
        // We store the allocated objects in these results so that we can
        // have a consolidated 'cleanup()' function. This makes all the
        // 'EXIT()' calls more readable and less error prone.
        Result<EVP_PKEY*> private_key = None();
        Result<X509*> certificate = None();
        Result<EVP_PKEY*> scrap_key = None();
        Result<X509*> scrap_certificate = None();

        auto cleanup = [&private_key, &certificate, &scrap_key, &scrap_certificate](
        bool failure = true) {
            if (private_key.isSome()) {
                EVP_PKEY_free(private_key.get());
            }
            if (certificate.isSome()) {
                X509_free(certificate.get());
            }
            if (scrap_key.isSome()) {
                EVP_PKEY_free(scrap_key.get());
            }
            if (scrap_certificate.isSome()) {
                X509_free(scrap_certificate.get());
            }

            // If we are under a failure condition, clean up any files we
            // already generated. The expected behavior is that they will be
            // cleaned up in 'TearDownTestCase()'; however, we call ABORT
            // during 'SetUpTestCase()' failures.
            if (failure) {
                cleanup_directories();
            }
        };

        // Generate the authority key.
        private_key = process::network::openssl::generate_private_rsa_key();
        if (private_key.isError()) {
            ABORT("Could not generate private key: " + private_key.error());
        }

        // Figure out the hostname that 'INADDR_LOOPBACK' will bind to.
        // Set the hostname of the certificate to this hostname so that
        // hostname verification of the certificate will pass.
        Try<std::string> hostname = net::getHostname(net::IP(INADDR_LOOPBACK));
        if (hostname.isError()) {
            cleanup();
            ABORT("Could not determine hostname of 'INADDR_LOOPBACK': " +
                  hostname.error());
        }

        // Generate an authorized certificate.
        certificate = process::network::openssl::generate_x509(
                          private_key.get(),
                          private_key.get(),
                          None(),
                          1,
                          365,
                          hostname.get());

        if (certificate.isError()) {
            cleanup();
            ABORT("Could not generate certificate: " + certificate.error());
        }

        // Write the authority key to disk.
        Try<Nothing> key_write =
            process::network::openssl::write_key_file(private_key.get(), key_path());

        if (key_write.isError()) {
            cleanup();
            ABORT("Could not write private key to disk: " + key_write.error());
        }

        // Write the authorized certificate to disk.
        Try<Nothing> certificate_write =
            process::network::openssl::write_certificate_file(
                certificate.get(),
                certificate_path());

        if (certificate_write.isError()) {
            cleanup();
            ABORT("Could not write certificate to disk: " +
                  certificate_write.error());
        }

        // Generate a scrap key.
        scrap_key = process::network::openssl::generate_private_rsa_key();
        if (scrap_key.isError()) {
            cleanup();
            ABORT("Could not generate a scrap private key: " + scrap_key.error());
        }

        // Write the scrap key to disk.
        key_write = process::network::openssl::write_key_file(
                        scrap_key.get(),
                        scrap_key_path());

        if (key_write.isError()) {
            cleanup();
            ABORT("Could not write scrap key to disk: " + key_write.error());
        }

        // Generate a scrap certificate.
        scrap_certificate = process::network::openssl::generate_x509(
                                scrap_key.get(),
                                scrap_key.get());

        if (scrap_certificate.isError()) {
            cleanup();
            ABORT("Could not generate a scrap certificate: " +
                  scrap_certificate.error());
        }

        // Write the scrap certificate to disk.
        certificate_write = process::network::openssl::write_certificate_file(
                                scrap_certificate.get(),
                                scrap_certificate_path());

        if (certificate_write.isError()) {
            cleanup();
            ABORT("Could not write scrap certificate to disk: " +
                  certificate_write.error());
        }

        // Since we successfully set up all our state, we call cleanup
        // with failure set to 'false'.
        cleanup(false);
    }
  Flags()
  {
    add(&Flags::environment_variable_prefix,
        "environment_variable_prefix",
        "Prefix for environment variables meant to modify the behavior of\n"
        "the container logger for the specific executor being launched.\n"
        "The logger will look for the prefixed environment variables in the\n"
        "'ExecutorInfo's 'CommandInfo's 'Environment':\n"
        "  * DESTINATION_TYPE\n"
        "  * LOGROTATE_MAX_STDOUT_SIZE\n"
        "  * LOGROTATE_STDOUT_OPTIONS\n"
        "  * LOGROTATE_MAX_STDERR_SIZE\n"
        "  * LOGROTATE_STDERR_OPTIONS\n"
        "If present, these variables will override the global values set\n"
        "via module parameters.",
        "CONTAINER_LOGGER_");

    add(&Flags::companion_dir,
        "companion_dir",
        None(),
        "Directory where this module's companion binary is located.\n"
        "The journald container logger will find the '" +
        mesos::journald::logger::NAME + "'\n"
        "binary file under this directory.",
        static_cast<const std::string*>(nullptr),
        [](const std::string& value) -> Option<Error> {
          std::string executablePath =
            path::join(value, mesos::journald::logger::NAME);

          if (!os::exists(executablePath)) {
            return Error("Cannot find: " + executablePath);
          }

          return None();
        });

    add(&Flags::logrotate_path,
        "logrotate_path",
        "If specified, the logrotate container logger will use the specified\n"
        "'logrotate' instead of the system's 'logrotate'.",
        "logrotate",
        [](const std::string& value) -> Option<Error> {
          // Check if `logrotate` exists via the help command.
          // TODO(josephw): Consider a more comprehensive check.
          Try<std::string> helpCommand =
            os::shell(value + " --help > /dev/null");

          if (helpCommand.isError()) {
            return Error(
                "Failed to check logrotate: " + helpCommand.error());
          }

          return None();
        });

    add(&Flags::max_label_payload_size,
        "max_label_payload_size",
        "Maximum size of the label data transferred to the\n"
        "logger companion binary. Can be at most one megabyte.",
        Kilobytes(10),
        [](const Bytes& value) -> Option<Error> {
          if (value > Megabytes(1)) {
            return Error(
                "Maximum --max_label_payload_size is one megabyte");
          }

          return None();
        });

    add(&Flags::libprocess_num_worker_threads,
        "libprocess_num_worker_threads",
        "Number of Libprocess worker threads.\n"
        "Defaults to 8.  Must be at least 1.",
        8u,
        [](const size_t& value) -> Option<Error> {
          if (value < 1u) {
            return Error(
                "Expected --libprocess_num_worker_threads of at least 1");
          }

          return None();
        });


    add(&Flags::fluentbit_ip,
        "fluentbit_ip",
        "IP of the Fluent Bit TCP listener.",
        [](const Option<net::IP>& value) -> Option<Error> {
          if (value.isNone()) {
            return Error("--fluentbit_ip is required");
          }

          return None();
        });

    add(&Flags::fluentbit_port,
        "fluentbit_port",
        "Port of the Fluent Bit TCP listener.");
  }
Exemple #11
0
int main(int argc, char** argv)
{
  Flags flags;
  flags.setUsageMessage("Usage: " + Path(argv[0]).basename() + " <master>");

  // Load flags from environment and command line, and remove
  // them from argv.
  Try<flags::Warnings> load = flags.load(None(), &argc, &argv);

  if (flags.help) {
    cout << flags.usage() << endl;
    return EXIT_SUCCESS;
  }

  if (load.isError()) {
    cerr << flags.usage(load.error()) << endl;
    return EXIT_FAILURE;
  }

  // Log any flag warnings.
  foreach (const flags::Warning& warning, load->warnings) {
    cerr << warning.message << endl;
  }

  // 'master' argument must be the only argument left after parsing.
  if (argc != 2) {
    cerr << flags.usage("There must be only one argument: <master>") << endl;
    return EXIT_FAILURE;
  }

  string master = argv[1];
  Try<mesos::master::detector::MasterDetector*> detector =
    mesos::master::detector::MasterDetector::create(master);

  if (detector.isError()) {
    cerr << "Failed to create a master detector: " << detector.error() << endl;
    return EXIT_FAILURE;
  }

  Future<Option<MasterInfo>> masterInfo = detector.get()->detect();

  if (!masterInfo.await(flags.timeout)) {
    cerr << "Failed to detect master from '" << master
         << "' within " << flags.timeout << endl;
    return -1;
  } else {
    CHECK(!masterInfo.isDiscarded());

    if (masterInfo.isFailed()) {
      cerr << "Failed to detect master from '" << master
           << "': " << masterInfo.failure() << endl;
      return EXIT_FAILURE;
    }
  }

  // The future is not satisfied unless the result is Some.
  CHECK_SOME(masterInfo.get());
  cout << strings::remove(masterInfo.get().get().pid(), "master@") << endl;

  return EXIT_SUCCESS;
}
Exemple #12
0
Future<std::shared_ptr<SocketImpl>> PollSocketImpl::accept()
{
  // Need to hold a copy of `this` so that the underlying socket
  // doesn't end up getting reused before we return from the call to
  // `io::poll` and end up accepting a socket incorrectly.
  auto self = shared(this);

  Try<Address> address = network::address(get());
  if (address.isError()) {
    return Failure("Failed to get address: " + address.error());
  }

  int family = 0;
  if (address->family() == Address::Family::INET4) {
    family = AF_INET;
  } else if (address->family() == Address::Family::INET6) {
    family = AF_INET6;
  } else {
    return Failure("Unsupported address family. Windows only supports IP.");
  }

  Try<int_fd> accept_socket_ = net::socket(family, SOCK_STREAM, 0);
  if (accept_socket_.isError()) {
    return Failure(accept_socket_.error());
  }

  int_fd accept_socket = accept_socket_.get();

  return windows::accept(self->get(), accept_socket)
    .onAny([accept_socket](const Future<Nothing> future) {
      if (!future.isReady()) {
        os::close(accept_socket);
      }
    })
    .then([self, accept_socket]() -> Future<std::shared_ptr<SocketImpl>> {
      SOCKET listen = self->get();

      // Inherit from the listening socket.
      int res = ::setsockopt(
          accept_socket,
          SOL_SOCKET,
          SO_UPDATE_ACCEPT_CONTEXT,
          reinterpret_cast<char*>(&listen),
          sizeof(listen));

      if (res != 0) {
        const WindowsError error;
        os::close(accept_socket);
        return Failure("Failed to set accepted socket: " + error.message);
      }

      // Disable Nagle algorithm, since we care about latency more than
      // throughput. See https://en.wikipedia.org/wiki/Nagle%27s_algorithm
      // for more info.
      const int on = 1;
      res = ::setsockopt(
          accept_socket,
          SOL_TCP,
          TCP_NODELAY,
          reinterpret_cast<const char*>(&on),
          sizeof(on));

      if (res != 0) {
        const WindowsError error;
        os::close(accept_socket);
        return Failure(
            "Failed to turn off the Nagle algorithm: " + error.message);
      }

      Try<Nothing> error = io::prepare_async(accept_socket);
      if (error.isError()) {
        os::close(accept_socket);
        return Failure(
            "Failed to set socket for asynchronous IO: " + error.error());
      }

      Try<std::shared_ptr<SocketImpl>> impl = create(accept_socket);
      if (impl.isError()) {
        os::close(accept_socket);
        return Failure("Failed to create socket: " + impl.error());
      }

      return impl.get();
    });
}
Exemple #13
0
Result<T> Object::find(const std::string& path) const
{
  const std::vector<std::string>& names = strings::split(path, ".", 2);

  if (names.empty()) {
    return None();
  }

  std::string name = names[0];

  // Determine if we have an array subscript. If so, save it but
  // remove it from the name for doing the lookup.
  Option<size_t> subscript = None();
  size_t index = name.find('[');
  if (index != std::string::npos) {
    // Check for the closing bracket.
    if (name.at(name.length() - 1) != ']') {
      return Error("Malformed array subscript, expecting ']'");
    }

    // Now remove the closing bracket (last character) and everything
    // before and including the opening bracket.
    std::string s = name.substr(index + 1, name.length() - index - 2);

    // Now numify the subscript.
    Try<int> i = numify<int>(s);

    if (i.isError()) {
      return Error("Failed to numify array subscript '" + s + "'");
    } else if (i.get() < 0) {
      return Error("Array subscript '" + s + "' must be >= 0");
    }

    subscript = i.get();

    // And finally remove the array subscript from the name.
    name = name.substr(0, index);
  }

  std::map<std::string, Value>::const_iterator entry = values.find(name);

  if (entry == values.end()) {
    return None();
  }

  Value value = entry->second;

  if (value.is<Array>() && subscript.isSome()) {
    Array array = value.as<Array>();
    if (subscript.get() >= array.values.size()) {
      return None();
    }
    value = array.values[subscript.get()];
  }

  if (names.size() == 1) {
    if (!value.is<T>()) {
      // TODO(benh): Use a visitor to print out the type found.
      return Error("Found JSON value of wrong type");
    }
    return value.as<T>();
  } else if (!value.is<Object>()) {
    // TODO(benh): Use a visitor to print out the intermediate type.
    return Error("Intermediate JSON value not an object");
  }

  return value.as<Object>().find<T>(names[1]);
}
process::Future<Option<ContainerLaunchInfo>> NetworkIsolatorProcess::prepare(
    const ContainerID& containerId,
    const ContainerConfig& containerConfig)
{
  LOG(INFO) << "NetworkIsolator::prepare for container: " << containerId;

  const ExecutorInfo executorInfo = containerConfig.executorinfo();
  if (!executorInfo.has_container()) {
    LOG(INFO) << "NetworkIsolator::prepare Ignoring request as "
              << "executorInfo.container is missing for container: "
              << containerId;
    return None();
  }

  if (executorInfo.container().network_infos().size() == 0) {
    LOG(INFO) << "NetworkIsolator::prepare Ignoring request as "
              << "executorInfo.container.network_infos is missing for "
              << "container: " << containerId;
    return None();
  }

  if (executorInfo.container().network_infos().size() > 1) {
    return Failure(
        "NetworkIsolator:: multiple NetworkInfos are not supported.");
  }

  NetworkInfo networkInfo = executorInfo.container().network_infos(0);

  if (networkInfo.has_protocol()) {
    return Failure(
      "NetworkIsolator: NetworkInfo.protocol is deprecated and unsupported.");
  }
  if (networkInfo.has_ip_address()) {
    return Failure(
      "NetworkIsolator: NetworkInfo.ip_address is deprecated and"
      " unsupported.");
  }

  string uid = UUID::random().toString();

  // Two IPAM commands:
  // 1) reserve for IPs the user has specifically asked for.
  // 2) auto-assign IPs.
  // Spin through all IPAddress messages once to get info for each command.
  // Then we'll issue each command if needed.
  IPAMReserveIPMessage reserveMessage;
  IPAMReserveIPMessage::Args* reserveArgs = reserveMessage.mutable_args();

  // Counter of IPs to auto assign.
  int numIPv4 = 0;
  foreach (const NetworkInfo::IPAddress& ipAddress, networkInfo.ip_addresses()) {
    if (ipAddress.has_ip_address() && ipAddress.has_protocol()) {
      return Failure("NetworkIsolator: Cannot include both ip_address and "
                     "protocol in a request.");
    }
    if (ipAddress.has_ip_address()) {
      // Store IP to attempt to reserve.
      reserveArgs->add_ipv4_addrs(ipAddress.ip_address());
    } else if (ipAddress.has_protocol() &&
               ipAddress.protocol() == NetworkInfo::IPv6){
      return Failure("NetworkIsolator: IPv6 is not supported at this time.");
    } else {
      // Either protocol is IPv4, or not included (in which case we default to
      // IPv4 anyway).
      numIPv4++;
    }
  }

  if (!(reserveArgs->ipv4_addrs_size() + numIPv4)) {
    return Failure(
      "NetworkIsolator: Container requires at least one IP address.");
  }

  // All the IP addresses, both reserved and allocated.
  vector<string> allAddresses;

  // Reserve provided IPs first.
  if (reserveArgs->ipv4_addrs_size()) {
    reserveArgs->set_hostname(slaveInfo.hostname());
    reserveArgs->set_uid(uid);
    reserveArgs->mutable_netgroups()->CopyFrom(networkInfo.groups());
    reserveArgs->mutable_labels()->CopyFrom(networkInfo.labels().labels());

    LOG(INFO) << "Sending IP reserve command to IPAM";
    Try<IPAMResponse> response =
      runCommand<IPAMReserveIPMessage, IPAMResponse>(
          ipamClientPath, reserveMessage);
    if (response.isError()) {
      return Failure("Error reserving IPs with IPAM: " + response.error());
    }

    string addresses = "";
    foreach (const string& addr, reserveArgs->ipv4_addrs()) {
      addresses = addresses + addr + " ";
      allAddresses.push_back(addr);
    }
    LOG(INFO) << "IP(s) " << addresses << "reserved with IPAM";
  }
Exemple #15
0
// TODO(josephw): Parse this string with a protobuf.
Try<Docker::Container> Docker::Container::create(const string& output)
{
  Try<JSON::Array> parse = JSON::parse<JSON::Array>(output);
  if (parse.isError()) {
    return Error("Failed to parse JSON: " + parse.error());
  }

  // TODO(benh): Handle the case where the short container ID was
  // not sufficiently unique and 'array.values.size() > 1'.
  JSON::Array array = parse.get();
  if (array.values.size() != 1) {
    return Error("Failed to find container");
  }

  CHECK(array.values.front().is<JSON::Object>());

  JSON::Object json = array.values.front().as<JSON::Object>();

  Result<JSON::String> idValue = json.find<JSON::String>("Id");
  if (idValue.isNone()) {
    return Error("Unable to find Id in container");
  } else if (idValue.isError()) {
    return Error("Error finding Id in container: " + idValue.error());
  }

  string id = idValue.get().value;

  Result<JSON::String> nameValue = json.find<JSON::String>("Name");
  if (nameValue.isNone()) {
    return Error("Unable to find Name in container");
  } else if (nameValue.isError()) {
    return Error("Error finding Name in container: " + nameValue.error());
  }

  string name = nameValue.get().value;

  Result<JSON::Object> stateValue = json.find<JSON::Object>("State");
  if (stateValue.isNone()) {
    return Error("Unable to find State in container");
  } else if (stateValue.isError()) {
    return Error("Error finding State in container: " + stateValue.error());
  }

  Result<JSON::Number> pidValue = stateValue.get().find<JSON::Number>("Pid");
  if (pidValue.isNone()) {
    return Error("Unable to find Pid in State");
  } else if (pidValue.isError()) {
    return Error("Error finding Pid in State: " + pidValue.error());
  }

  pid_t pid = pid_t(pidValue.get().as<int64_t>());

  Option<pid_t> optionalPid;
  if (pid != 0) {
    optionalPid = pid;
  }

  Result<JSON::String> startedAtValue =
    stateValue.get().find<JSON::String>("StartedAt");
  if (startedAtValue.isNone()) {
    return Error("Unable to find StartedAt in State");
  } else if (startedAtValue.isError()) {
    return Error("Error finding StartedAt in State: " + startedAtValue.error());
  }

  bool started = startedAtValue.get().value != "0001-01-01T00:00:00Z";

  Option<string> ipAddress;
  bool findDeprecatedIP = false;
  Result<JSON::String> networkMode =
    json.find<JSON::String>("HostConfig.NetworkMode");
  if (!networkMode.isSome()) {
    // We need to fail back to the old field as Docker added NetworkMode
    // since Docker remote API 1.15.
    VLOG(1) << "Unable to detect HostConfig.NetworkMode, "
            << "attempting deprecated IP field";
    findDeprecatedIP = true;
  } else {
    // We currently rely on the fact that we always set --net when
    // we shell out to docker run, and therefore the network mode
    // matches what --net is. Without --net, the network mode would be set
    // to 'default' and we won't be able to find the IP address as
    // it will be in 'Networks.bridge' key.
    string addressLocation = "NetworkSettings.Networks." +
                             networkMode->value + ".IPAddress";

    Result<JSON::String> ipAddressValue =
      json.find<JSON::String>(addressLocation);

    if (!ipAddressValue.isSome()) {
      // We also need to failback to the old field as the IP Address
      // field location also changed since Docker remote API 1.20.
      VLOG(1) << "Unable to detect IP Address at '" << addressLocation << "',"
              << " attempting deprecated field";
      findDeprecatedIP = true;
    } else if (!ipAddressValue->value.empty()) {
      ipAddress = ipAddressValue->value;
    }
  }

  if (findDeprecatedIP) {
    Result<JSON::String> ipAddressValue =
      json.find<JSON::String>("NetworkSettings.IPAddress");

    if (ipAddressValue.isNone()) {
      return Error("Unable to find NetworkSettings.IPAddress in container");
    } else if (ipAddressValue.isError()) {
      return Error(
        "Error finding NetworkSettings.IPAddress in container: " +
        ipAddressValue.error());
    } else if (!ipAddressValue->value.empty()) {
      ipAddress = ipAddressValue->value;
    }
  }

  return Docker::Container(output, id, name, optionalPid, started, ipAddress);
}
Exemple #16
0
Future<http::Response> Master::QuotaHandler::set(
    const http::Request& request,
    const Option<string>& principal) const
{
  VLOG(1) << "Setting quota from request: '" << request.body << "'";

  // Check that the request type is POST which is guaranteed by the master.
  CHECK_EQ("POST", request.method);

  // Validate request and extract JSON.
  // TODO(alexr): Create a type (e.g. a protobuf) for the request JSON. If we
  // move the `force` field out of the request JSON, we can reuse `QuotaInfo`.
  Try<JSON::Object> parse = JSON::parse<JSON::Object>(request.body);
  if (parse.isError()) {
    return BadRequest(
        "Failed to parse set quota request JSON '" + request.body + "': " +
        parse.error());
  }

  // Extract role from the request JSON.
  Result<JSON::String> roleJSON = parse.get().find<JSON::String>("role");

  if (roleJSON.isError()) {
    // An `Error` usually indicates that a search string is malformed
    // (which is not the case here), however it may also indicate that
    // the `role` field is not a string.
    return BadRequest(
        "Failed to extract 'role' from set quota request JSON '" +
        request.body + "': " + roleJSON.error());
  }

  if (roleJSON.isNone()) {
    return BadRequest(
        "Failed to extract 'role' from set quota request JSON '" +
        request.body + "': Field is missing");
  }

  string role = roleJSON.get().value;

  // Extract resources from the request JSON.
  Result<JSON::Array> resourcesJSON =
    parse.get().find<JSON::Array>("resources");

  if (resourcesJSON.isError()) {
    // An `Error` usually indicates that a search string is malformed
    // (which is not the case here), however it may also indicate that
    // the `resources` field is not an array.
    return BadRequest(
        "Failed to extract 'resources' from set quota request JSON '" +
        request.body + "': " + resourcesJSON.error());
  }

  if (resourcesJSON.isNone()) {
    return BadRequest(
        "Failed to extract 'resources' from set quota request JSON '" +
        request.body + "': Field is missing");
  }

  // Create protobuf representation of resources.
  Try<RepeatedPtrField<Resource>> resources =
    ::protobuf::parse<RepeatedPtrField<Resource>>(resourcesJSON.get());

  if (resources.isError()) {
    return BadRequest(
        "Failed to parse 'resources' from set quota request JSON '" +
        request.body + "': " + resources.error());
  }

  // Create the `QuotaInfo` protobuf message from the request JSON.
  Try<QuotaInfo> create = createQuotaInfo(role, resources.get());
  if (create.isError()) {
    return BadRequest(
        "Failed to create 'QuotaInfo' from set quota request JSON '" +
        request.body + "': " + create.error());
  }

  QuotaInfo quotaInfo = create.get();

  // Check that the `QuotaInfo` is a valid quota request.
  Option<Error> validateError = quota::validation::quotaInfo(quotaInfo);
  if (validateError.isSome()) {
    return BadRequest(
        "Failed to validate set quota request JSON '" + request.body + "': " +
        validateError.get().message);
  }

  // Check that the role is on the role whitelist, if it exists.
  if (!master->isWhitelistedRole(quotaInfo.role())) {
    return BadRequest(
        "Failed to validate set quota request JSON '" + request.body +
        "': Unknown role '" + quotaInfo.role() + "'");
  }

  // Check that we are not updating an existing quota.
  // TODO(joerg84): Update error message once quota update is in place.
  if (master->quotas.contains(quotaInfo.role())) {
    return BadRequest(
        "Failed to validate set quota request JSON '" + request.body +
        "': Can not set quota for a role that already has quota");
  }

  // The force flag can be used to overwrite the `capacityHeuristic` check.
  Result<JSON::Boolean> force = parse.get().find<JSON::Boolean>("force");
  if (force.isError()) {
    // An `Error` usually indicates that a search string is malformed
    // (which is not the case here), however it may also indicate that
    // the `force` field is not a boolean.
    return BadRequest(
        "Failed to extract 'force' from set quota request JSON '" +
        request.body + "': " + force.error());
  }

  const bool forced = force.isSome() ? force.get().value : false;

  if (principal.isSome()) {
    quotaInfo.set_principal(principal.get());
  }

  return authorizeSetQuota(principal, quotaInfo.role())
    .then(defer(master->self(), [=](bool authorized) -> Future<http::Response> {
      if (!authorized) {
        return Forbidden();
      }

      return _set(quotaInfo, forced);
    }));
}
int main(int argc, char** argv)
{
  Flags flags;

  Try<Nothing> load = flags.load("MESOS_", argc, argv);

  if (load.isError()) {
    cerr << flags.usage(load.error()) << endl;
    return EXIT_FAILURE;
  }

  if (flags.help) {
    cout << flags.usage() << endl;
    return EXIT_SUCCESS;
  }

  if (flags.master.isNone()) {
    cerr << flags.usage( "Missing required option --master") << endl;
    return EXIT_FAILURE;
  }

  if (flags.qps.isNone()) {
    cerr << flags.usage("Missing required option --qps") << endl;
    return EXIT_FAILURE;
  }

  if (flags.qps.get() <= 0) {
    cerr << flags.usage("--qps needs to be greater than zero") << endl;
    return EXIT_FAILURE;
  }

  // We want the logger to catch failure signals.
  mesos::internal::logging::initialize(argv[0], flags, true);

  LoadGeneratorScheduler scheduler(flags.qps.get(), flags.duration);

  FrameworkInfo framework;
  framework.set_user(""); // Have Mesos fill in the current user.
  framework.set_name("Load Generator Framework (C++)");

  const Option<string> checkpoint = os::getenv("MESOS_CHECKPOINT");
  if (checkpoint.isSome()) {
    framework.set_checkpoint(
        numify<bool>(checkpoint.get()).get());
  }

  MesosSchedulerDriver* driver;
  if (flags.authenticate) {
    cout << "Enabling authentication for the framework" << endl;

    if (flags.secret.isNone()) {
      cerr << "Expecting --secret when --authenticate is set" << endl;
      return EXIT_FAILURE;
    }

    string secret = flags.secret.get();

    Credential credential;
    credential.set_principal(flags.principal);
    credential.set_secret(strings::trim(secret));

    framework.set_principal(flags.principal);

    driver = new MesosSchedulerDriver(
        &scheduler, framework, flags.master.get(), credential);
  } else {
    framework.set_principal(flags.principal);

    driver = new MesosSchedulerDriver(
        &scheduler, framework, flags.master.get());
  }

  int status = driver->run() == DRIVER_STOPPED ? EXIT_SUCCESS : EXIT_SUCCESS;

  // Ensure that the driver process terminates.
  driver->stop();

  delete driver;
  return status;
}
Exemple #18
0
// Recursive version of `RemoveDirectory`. NOTE: unlike `rmdir`, this requires
// Windows-formatted paths, and therefore should be in the `internal` namespace.
inline Try<Nothing> recursive_remove_directory(
    const std::string& path, bool removeRoot, bool continueOnError)
{
  // Appending a slash here if the path doesn't already have one simplifies
  // path join logic later, because (unlike Unix) Windows doesn't like double
  // slashes in paths.
  std::string current_path;

  if (!strings::endsWith(path, "\\")) {
    current_path = path + "\\";
  } else {
    current_path = path;
  }

  // Get first file matching pattern `X:\path\to\wherever\*`.
  WIN32_FIND_DATA found;
  const std::string search_pattern = current_path + "*";
  const SharedHandle search_handle(
      FindFirstFile(search_pattern.c_str(), &found),
      FindClose);

  if (search_handle.get() == INVALID_HANDLE_VALUE) {
    return WindowsError(
        "`os::internal::recursive_remove_directory` failed when searching "
        "for files with pattern '" + search_pattern + "'");
  }

  do {
    // NOTE: do-while is appropriate here because folder is guaranteed to have
    // at least a file called `.` (and probably also one called `..`).
    const std::string current_file(found.cFileName);

    const bool is_current_directory = current_file.compare(".") == 0;
    const bool is_parent_directory = current_file.compare("..") == 0;

    // Don't try to delete `.` and `..` files in directory.
    if (is_current_directory || is_parent_directory) {
      continue;
    }

    // Path to remove.
    const std::string current_absolute_path = current_path + current_file;

    const bool is_directory = os::stat::isdir(current_absolute_path);

    // Delete current path, whether it's a directory, file, or symlink.
    if (is_directory) {
      Try<Nothing> removed = recursive_remove_directory(
          current_absolute_path, removeRoot, continueOnError);

      if (removed.isError()) {
        if (continueOnError) {
          LOG(WARNING) << "Failed to delete directory " << current_absolute_path
                       << " with error " << removed.error();
        } else {
          return Error(removed.error());
        }
      }
    } else {
      // NOTE: this also handles symbolic links.
      if (::remove(current_absolute_path.c_str()) != 0) {
        if (continueOnError) {
          LOG(WARNING)
              << "`os::internal::recursive_remove_directory`"
              << " attempted to delete file '"
              << current_absolute_path << "', but failed";
        } else {
          return WindowsError(
              "`os::internal::recursive_remove_directory` attempted to delete "
              "file '" + current_absolute_path + "', but failed");
        }
      }
    }
  } while (FindNextFile(search_handle.get(), &found));

  // Finally, remove current directory unless `removeRoot` is disabled.
  if (removeRoot && ::_rmdir(current_path.c_str()) == -1) {
    if (continueOnError) {
      LOG(WARNING) << "`os::internal::recursive_remove_directory`"
                   << " attempted to delete directory '"
                   << current_path << "', but failed";
      return ErrnoError("rmdir failed in 'continueOnError' mode");
    } else {
      return ErrnoError(
          "`os::internal::recursive_remove_directory` attempted to delete "
          "directory '" + current_path + "', but failed");
    }
  }

  return Nothing();
}
Exemple #19
0
// This "fetcher program" is invoked by the slave's fetcher actor
// (Fetcher, FetcherProcess) to "fetch" URIs into the sandbox directory
// of a given task. Its parameters are provided in the form of the env
// var MESOS_FETCHER_INFO which contains a FetcherInfo (see
// fetcher.proto) object formatted in JSON. These are set by the actor
// to indicate what set of URIs to process and how to proceed with
// each one. A URI can be downloaded directly to the task's sandbox
// directory or it can be copied to a cache first or it can be reused
// from the cache, avoiding downloading. All cache management and
// bookkeeping is centralized in the slave's fetcher actor, which can
// have multiple instances of this fetcher program running at any
// given time. Exit code: 0 if entirely successful, otherwise 1.
int main(int argc, char* argv[])
{
  GOOGLE_PROTOBUF_VERIFY_VERSION;

  mesos::internal::logging::Flags flags;

  Try<Nothing> load = flags.load("MESOS_", argc, argv);

  CHECK_SOME(load) << "Could not load flags: " << load.error();

  logging::initialize(argv[0], flags, true); // Catch signals.

  const Option<std::string> jsonFetcherInfo = os::getenv("MESOS_FETCHER_INFO");
  CHECK_SOME(jsonFetcherInfo)
    << "Missing MESOS_FETCHER_INFO environment variable";

  LOG(INFO) << "Fetcher Info: " << jsonFetcherInfo.get();

  Try<JSON::Object> parse = JSON::parse<JSON::Object>(jsonFetcherInfo.get());
  CHECK_SOME(parse) << "Failed to parse MESOS_FETCHER_INFO: " << parse.error();

  Try<FetcherInfo> fetcherInfo = ::protobuf::parse<FetcherInfo>(parse.get());
  CHECK_SOME(fetcherInfo)
    << "Failed to parse FetcherInfo: " << fetcherInfo.error();

  CHECK(!fetcherInfo.get().sandbox_directory().empty())
    << "Missing sandbox directory";

  const string sandboxDirectory = fetcherInfo.get().sandbox_directory();

  const Option<string> cacheDirectory =
    fetcherInfo.get().has_cache_directory() ?
      Option<string>::some(fetcherInfo.get().cache_directory()) :
        Option<string>::none();

  const Option<string> frameworksHome =
    fetcherInfo.get().has_frameworks_home() ?
      Option<string>::some(fetcherInfo.get().frameworks_home()) :
        Option<string>::none();

  // Fetch each URI to a local file, chmod, then chown if a user is provided.
  foreach (const FetcherInfo::Item& item, fetcherInfo.get().items()) {
    Try<string> fetched =
      fetch(item, cacheDirectory, sandboxDirectory, frameworksHome);
    if (fetched.isError()) {
      EXIT(1) << "Failed to fetch '" << item.uri().value()
              << "': " + fetched.error();
    } else {
      LOG(INFO) << "Fetched '" << item.uri().value()
                << "' to '" << fetched.get() << "'";
    }
  }

  // Recursively chown the sandbox directory if a user is provided.
  if (fetcherInfo.get().has_user()) {
    Try<Nothing> chowned = os::chown(
        fetcherInfo.get().user(),
        sandboxDirectory);
    if (chowned.isError()) {
      EXIT(1) << "Failed to chown " << sandboxDirectory
              << ": " << chowned.error();
    }
  }

  return 0;
}
Exemple #20
0
void execute(const string& script)
{
  // Create a temporary directory for the test.
  Try<string> directory = environment->mkdtemp();

  CHECK_SOME(directory) << "Failed to create temporary directory";

  if (flags.verbose) {
    std::cerr << "Using temporary directory '"
              << directory.get() << "'" << std::endl;
  }

  // Determine the path for the script.
  Result<string> path = os::realpath(getTestScriptPath(script));

  if (!path.isSome()) {
    FAIL() << "Failed to locate script "
           << script << ": "
           << (path.isError() ? path.error() : "No such file or directory");
  }

  // Fork a process to change directory and run the test.
  pid_t pid;
  if ((pid = fork()) == -1) {
    FAIL() << "Failed to fork to launch script";
  }

  if (pid > 0) {
    // In parent process.
    int status;
    while (wait(&status) != pid || WIFSTOPPED(status));
    CHECK(WIFEXITED(status) || WIFSIGNALED(status));

    if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
      FAIL() << script << " " << WSTRINGIFY(status);
    }
  } else {
    // In child process. DO NOT USE GLOG!

    // Start by cd'ing into the temporary directory.
    Try<Nothing> chdir = os::chdir(directory.get());
    if (chdir.isError()) {
      std::cerr << "Failed to chdir to '" << directory.get() << "': "
                << chdir.error() << std::endl;
      abort();
    }

    // Redirect output to /dev/null unless the test is verbose.
    if (!flags.verbose) {
      if (freopen("/dev/null", "w", stdout) == NULL ||
          freopen("/dev/null", "w", stderr) == NULL) {
        std::cerr << "Failed to redirect stdout/stderr to /dev/null:"
                  << os::strerror(errno) << std::endl;
        abort();
      }
    }

    // Set up the environment for executing the script. We might be running from
    // the Mesos source tree or from an installed version of the tests. In the
    // latter case, all of the variables below are swizzled to point to the
    // installed locations, except MESOS_SOURCE_DIR. Scripts that make use of
    // MESOS_SOURCE_DIR are expected to gracefully degrade if the Mesos source
    // is no longer present.
    os::setenv("MESOS_BUILD_DIR", flags.build_dir);
    os::setenv("MESOS_HELPER_DIR", getTestHelperDir());
    os::setenv("MESOS_LAUNCHER_DIR", getLauncherDir());
    os::setenv("MESOS_SBIN_DIR", getSbinDir());
    os::setenv("MESOS_SOURCE_DIR", flags.source_dir);
    os::setenv("MESOS_WEBUI_DIR", getWebUIDir());

    // Enable replicated log based registry.
    os::setenv("MESOS_REGISTRY", "replicated_log");

    // Enable authentication.
    os::setenv("MESOS_AUTHENTICATE_FRAMEWORKS", "true");

    // Create test credentials.
    const string& credentials =
      DEFAULT_CREDENTIAL.principal() + " " + DEFAULT_CREDENTIAL.secret();

    const string& credentialsPath =
      path::join(directory.get(), "credentials");

    CHECK_SOME(os::write(credentialsPath, credentials))
      << "Failed to write credentials to '" << credentialsPath << "'";

    os::setenv("MESOS_CREDENTIALS", "file://" + credentialsPath);

    // We set test credentials here for example frameworks to use.
    os::setenv("DEFAULT_PRINCIPAL", DEFAULT_CREDENTIAL.principal());
    os::setenv("DEFAULT_SECRET", DEFAULT_CREDENTIAL.secret());

    // TODO(bmahler): Update the example frameworks to use flags and
    // remove the special DEFAULT_* environment variables above.
    os::setenv("MESOS_PRINCIPAL", DEFAULT_CREDENTIAL.principal());
    os::setenv("MESOS_SECRET", DEFAULT_CREDENTIAL.secret());

    // Create test ACLs.
    ACLs acls;
    acls.set_permissive(false);

    mesos::ACL::RunTask* run = acls.add_run_tasks();
    run->mutable_principals()->add_values(DEFAULT_CREDENTIAL.principal());

    Result<string> user = os::user();
    CHECK_SOME(user) << "Failed to get current user name";
    run->mutable_users()->add_values(user.get());

    mesos::ACL::RegisterFramework* register_ = acls.add_register_frameworks();
    register_->mutable_principals()->add_values(DEFAULT_CREDENTIAL.principal());
    register_->mutable_roles()->add_values("*");

    const string& aclsPath = path::join(directory.get(), "acls");

    CHECK_SOME(os::write(aclsPath, stringify(JSON::protobuf(acls))))
      << "Failed to write ACLs to '" << aclsPath << "'";

    os::setenv("MESOS_ACLS", "file://" + aclsPath);

    // Now execute the script.
    execl(path.get().c_str(), path.get().c_str(), (char*) NULL);

    std::cerr << "Failed to execute '" << script << "': "
              << os::strerror(errno) << std::endl;
    abort();
  }
}
Exemple #21
0
Option<Error> validate(const mesos::executor::Call& call)
{
  if (!call.IsInitialized()) {
    return Error("Not initialized: " + call.InitializationErrorString());
  }

  if (!call.has_type()) {
    return Error("Expecting 'type' to be present");
  }

  // All calls should have executor id set.
  if (!call.has_executor_id()) {
    return Error("Expecting 'executor_id' to be present");
  }

  // All calls should have framework id set.
  if (!call.has_framework_id()) {
    return Error("Expecting 'framework_id' to be present");
  }

  switch (call.type()) {
    case mesos::executor::Call::SUBSCRIBE: {
      if (!call.has_subscribe()) {
        return Error("Expecting 'subscribe' to be present");
      }
      return None();
    }

    case mesos::executor::Call::UPDATE: {
      if (!call.has_update()) {
        return Error("Expecting 'update' to be present");
      }

      const TaskStatus& status = call.update().status();

      if (!status.has_uuid()) {
        return Error("Expecting 'uuid' to be present");
      }

      Try<UUID> uuid = UUID::fromBytes(status.uuid());
      if (uuid.isError()) {
        return uuid.error();
      }

      if (status.has_executor_id() &&
          status.executor_id().value()
          != call.executor_id().value()) {
        return Error("ExecutorID in Call: " +
                     call.executor_id().value() +
                     " does not match ExecutorID in TaskStatus: " +
                     call.update().status().executor_id().value()
                     );
      }

      if (status.source() != TaskStatus::SOURCE_EXECUTOR) {
        return Error("Received Call from executor " +
                     call.executor_id().value() +
                     " of framework " +
                     call.framework_id().value() +
                     " with invalid source, expecting 'SOURCE_EXECUTOR'"
                     );
      }

      if (status.state() == TASK_STAGING) {
        return Error("Received TASK_STAGING from executor " +
                     call.executor_id().value() +
                     " of framework " +
                     call.framework_id().value() +
                     " which is not allowed"
                     );
      }

      // TODO(alexr): Validate `check_status` is present if
      // the corresponding `TaskInfo.check` has been defined.

      if (status.has_check_status()) {
        Option<Error> validate =
          checks::validation::checkStatusInfo(status.check_status());

        if (validate.isSome()) {
          return validate.get();
        }
      }

      return None();
    }

    case mesos::executor::Call::MESSAGE: {
      if (!call.has_message()) {
        return Error("Expecting 'message' to be present");
      }
      return None();
    }

    case mesos::executor::Call::UNKNOWN: {
      return None();
    }
  }
  UNREACHABLE();
}
Exemple #22
0
// Executes a subprocess.
//
// NOTE: On Windows, components of the `path` and `argv` that need to be quoted
// are expected to have been quoted before they are passed to `subprocess. For
// example, either of these may contain paths with spaces in them, like
// `C:\"Program Files"\foo.exe`, where notably the character sequence `\"` does
// is not escaped quote, but instead a path separator and the start of a path
// component. Since the semantics of quoting are shell-dependent, it is not
// practical to attempt to re-parse the command that is passed in and properly
// escape it. Therefore, incorrectly-quoted command arguments will probably
// lead the child process to terminate with an error.
Try<Subprocess> subprocess(
    const string& path,
    vector<string> argv,
    const Subprocess::IO& in,
    const Subprocess::IO& out,
    const Subprocess::IO& err,
    const flags::FlagsBase* flags,
    const Option<map<string, string>>& environment,
    const Option<lambda::function<
        pid_t(const lambda::function<int()>&)>>& _clone,
    const vector<Subprocess::ParentHook>& parent_hooks,
    const vector<Subprocess::ChildHook>& child_hooks)
{
  // File descriptors for redirecting stdin/stdout/stderr.
  // These file descriptors are used for different purposes depending
  // on the specified I/O modes.
  // See `Subprocess::PIPE`, `Subprocess::PATH`, and `Subprocess::FD`.
  InputFileDescriptors stdinfds;
  OutputFileDescriptors stdoutfds;
  OutputFileDescriptors stderrfds;

  // Prepare the file descriptor(s) for stdin.
  Try<InputFileDescriptors> input = in.input();
  if (input.isError()) {
    return Error(input.error());
  }

  stdinfds = input.get();

  // Prepare the file descriptor(s) for stdout.
  Try<OutputFileDescriptors> output = out.output();
  if (output.isError()) {
    process::internal::close(stdinfds, stdoutfds, stderrfds);
    return Error(output.error());
  }

  stdoutfds = output.get();

  // Prepare the file descriptor(s) for stderr.
  output = err.output();
  if (output.isError()) {
    process::internal::close(stdinfds, stdoutfds, stderrfds);
    return Error(output.error());
  }

  stderrfds = output.get();

#ifndef __WINDOWS__
  // TODO(jieyu): Consider using O_CLOEXEC for atomic close-on-exec.
  Try<Nothing> cloexec = internal::cloexec(stdinfds, stdoutfds, stderrfds);
  if (cloexec.isError()) {
    process::internal::close(stdinfds, stdoutfds, stderrfds);
    return Error("Failed to cloexec: " + cloexec.error());
  }
#endif // __WINDOWS__

  // Prepare the arguments. If the user specifies the 'flags', we will
  // stringify them and append them to the existing arguments.
  if (flags != nullptr) {
    foreachvalue (const flags::Flag& flag, *flags) {
      Option<string> value = flag.stringify(*flags);
      if (value.isSome()) {
        argv.push_back("--" + flag.effective_name().value + "=" + value.get());
      }
    }
  }
Exemple #23
0
// Reads from /proc/cpuinfo and returns a list of CPUs.
inline Try<std::list<CPU>> cpus()
{
  std::list<CPU> results;

  std::ifstream file("/proc/cpuinfo");

  if (!file.is_open()) {
    return Error("Failed to open /proc/cpuinfo");
  }

  // Placeholders as we parse the file.
  Option<unsigned int> id;
  Option<unsigned int> core;
  Option<unsigned int> socket;

  std::string line;
  while (std::getline(file, line)) {
    if (line.find("processor") == 0 ||
        line.find("physical id") == 0 ||
        line.find("core id") == 0) {
      // Get out and parse the value.
      std::vector<std::string> tokens = strings::tokenize(line, ": ");

      if (tokens.size() < 2) {
        return Error("Unexpected format in /proc/cpuinfo: " +
                     stringify(tokens));
      }

      Try<unsigned int> value = numify<unsigned int>(tokens.back());
      if (value.isError()) {
        return Error(value.error());
      }

      // Now save the value.
      if (line.find("processor") == 0) {
        if (id.isSome()) {
          // The physical id and core id are not present in this case.
          results.push_back(CPU(id.get(), 0, 0));
        }
        id = value.get();
      } else if (line.find("physical id") == 0) {
        if (socket.isSome()) {
          return Error("Unexpected format in /proc/cpuinfo");
        }
        socket = value.get();
      } else if (line.find("core id") == 0) {
        if (core.isSome()) {
          return Error("Unexpected format in /proc/cpuinfo");
        }
        core = value.get();
      }

      // And finally create a CPU if we have all the information.
      if (id.isSome() && core.isSome() && socket.isSome()) {
        results.push_back(CPU(id.get(), core.get(), socket.get()));
        id = None();
        core = None();
        socket = None();
      }
    }
  }

  // Add the last processor if the physical id and core id were not present.
  if (id.isSome()) {
    // The physical id and core id are not present.
    results.push_back(CPU(id.get(), 0, 0));
  }

  if (file.fail() && !file.eof()) {
    return Error("Failed to read /proc/cpuinfo");
  }

  return results;
}
Exemple #24
0
Future<Nothing> NetworkCniIsolatorProcess::_attach(
    const ContainerID& containerId,
    const string& networkName,
    const string& plugin,
    const tuple<Future<Option<int>>, Future<string>>& t)
{
  CHECK(infos.contains(containerId));
  CHECK(infos[containerId]->networkInfos.contains(networkName));

  Future<Option<int>> status = std::get<0>(t);
  if (!status.isReady()) {
    return Failure(
        "Failed to get the exit status of the CNI plugin '" +
        plugin + "' subprocess: " +
        (status.isFailed() ? status.failure() : "discarded"));
  }

  if (status->isNone()) {
    return Failure(
        "Failed to reap the CNI plugin '" + plugin + "' subprocess");
  }

  // CNI plugin will print result (in case of success) or error (in
  // case of failure) to stdout.
  Future<string> output = std::get<1>(t);
  if (!output.isReady()) {
    return Failure(
        "Failed to read stdout from the CNI plugin '" +
        plugin + "' subprocess: " +
        (output.isFailed() ? output.failure() : "discarded"));
  }

  if (status.get() != 0) {
    return Failure(
        "The CNI plugin '" + plugin + "' failed to attach container " +
        containerId.value() + " to CNI network '" + networkName +
        "': " + output.get());
  }

  // Parse the output of CNI plugin.
  Try<spec::NetworkInfo> parse = spec::parseNetworkInfo(output.get());
  if (parse.isError()) {
    return Failure(
        "Failed to parse the output of the CNI plugin '" +
        plugin + "': " + parse.error());
  }

  if (parse.get().has_ip4()) {
    LOG(INFO) << "Got assigned IPv4 address '" << parse.get().ip4().ip()
              << "' from CNI network '" << networkName
              << "' for container " << containerId;
  }

  if (parse.get().has_ip6()) {
    LOG(INFO) << "Got assigned IPv6 address '" << parse.get().ip6().ip()
              << "' from CNI network '" << networkName
              << "' for container " << containerId;
  }

  // Checkpoint the output of CNI plugin.
  // The destruction of the container cannot happen in the middle of
  // 'attach()' and '_attach()' because the containerizer will wait
  // for 'isolate()' to finish before destroying the container.
  NetworkInfo& networkInfo = infos[containerId]->networkInfos[networkName];

  const string networkInfoPath = paths::getNetworkInfoPath(
      rootDir.get(),
      containerId.value(),
      networkName,
      networkInfo.ifName);

  Try<Nothing> write = os::write(networkInfoPath, output.get());
  if (write.isError()) {
    return Failure(
        "Failed to checkpoint the output of CNI plugin'" +
        output.get() + "': " + write.error());
  }

  networkInfo.network = parse.get();

  return Nothing();
}
Exemple #25
0
inline Try<Nothing> FlagsBase::load(
    const Option<std::string>& prefix,
    int* argc,
    char*** argv,
    bool unknowns,
    bool duplicates)
{
  std::map<std::string, Option<std::string>> envValues;
  std::map<std::string, Option<std::string>> cmdValues;

  if (prefix.isSome()) {
    envValues = extract(prefix.get());
  }

  // Grab the program name from argv, without removing it.
  programName_ = argc > 0 ? Path(*(argv[0])).basename() : "";

  // Keep the arguments that are not being processed as flags.
  std::vector<char*> args;

  // Read flags from the command line.
  for (int i = 1; i < *argc; i++) {
    const std::string arg(strings::trim((*argv)[i]));

    // Stop parsing flags after '--' is encountered.
    if (arg == "--") {
      // Save the rest of the arguments.
      for (int j = i + 1; j < *argc; j++) {
        args.push_back((*argv)[j]);
      }
      break;
    }

    // Skip anything that doesn't look like a flag.
    if (arg.find("--") != 0) {
      args.push_back((*argv)[i]);
      continue;
    }

    std::string name;
    Option<std::string> value = None();

    size_t eq = arg.find_first_of("=");
    if (eq == std::string::npos && arg.find("--no-") == 0) { // --no-name
      name = arg.substr(2);
    } else if (eq == std::string::npos) {                    // --name
      name = arg.substr(2);
    } else {                                                 // --name=value
      name = arg.substr(2, eq - 2);
      value = arg.substr(eq + 1);
    }

    name = strings::lower(name);

    if (!duplicates) {
      if (cmdValues.count(name) > 0 ||
          (name.find("no-") == 0 && cmdValues.count(name.substr(3)) > 0)) {
        return Error("Duplicate flag '" + name + "' on command line");
      }
    }

    cmdValues[name] = value;
  }

  cmdValues.insert(envValues.begin(), envValues.end());

  Try<Nothing> result = load(cmdValues, unknowns);

  // Update 'argc' and 'argv' if we successfully loaded the flags.
  if (!result.isError()) {
    CHECK_LE(args.size(), (size_t) *argc);
    size_t i = 1; // Start at '1' to skip argv[0].
    foreach (char* arg, args) {
      (*argv)[i++] = arg;
    }

    *argc = i;

    // Now null terminate the array. Note that we'll "leak" the
    // arguments that were processed here but it's not like they would
    // have gotten deleted in normal operations anyway.
    (*argv)[i++] = NULL;
  }
Exemple #26
0
  Flags()
  {
    setUsageMessage(
      "Usage: " + NAME + " [options]\n"
      "\n"
      "This command pipes from STDIN to the given leading log file.\n"
      "When the leading log file reaches '--max_size', the command.\n"
      "uses 'logrotate' to rotate the logs.  All 'logrotate' options\n"
      "are supported.  See '--logrotate_options'.\n"
      "\n");

    add(&max_size,
        "max_size",
        "Maximum size, in bytes, of a single log file.\n"
        "Defaults to 10 MB.  Must be at least 1 (memory) page.",
        Megabytes(10),
        [](const Bytes& value) -> Option<Error> {
          if (value.bytes() < (size_t) sysconf(_SC_PAGE_SIZE)) {
            return Error(
                "Expected --max_size of at least " +
                stringify(sysconf(_SC_PAGE_SIZE)) + " bytes");
          }
          return None();
        });

    add(&logrotate_options,
        "logrotate_options",
        "Additional config options to pass into 'logrotate'.\n"
        "This string will be inserted into a 'logrotate' configuration file.\n"
        "i.e.\n"
        "  /path/to/<log_filename> {\n"
        "    <logrotate_options>\n"
        "    size <max_size>\n"
        "  }\n"
        "NOTE: The 'size' option will be overriden by this command.");

    add(&log_filename,
        "log_filename",
        "Absolute path to the leading log file.\n"
        "NOTE: This command will also create two files by appending\n"
        "'" + CONF_SUFFIX + "' and '" + STATE_SUFFIX + "' to the end of\n"
        "'--log_filename'.  These files are used by 'logrotate'.",
        [](const Option<std::string>& value) -> Option<Error> {
          if (value.isNone()) {
            return Error("Missing required option --log_filename");
          }

          if (!path::absolute(value.get())) {
            return Error("Expected --log_filename to be an absolute path");
          }

          return None();
        });

    add(&logrotate_path,
        "logrotate_path",
        "If specified, this command will use the specified\n"
        "'logrotate' instead of the system's 'logrotate'.",
        "logrotate",
        [](const std::string& value) -> Option<Error> {
          // Check if `logrotate` exists via the help command.
          // TODO(josephw): Consider a more comprehensive check.
          Try<std::string> helpCommand =
            os::shell(value + " --help > /dev/null");

          if (helpCommand.isError()) {
            return Error(
                "Failed to check logrotate: " + helpCommand.error());
          }

          return None();
        });
  }
Exemple #27
0
Try<Manifest> Manifest::create(const string& jsonString)
{
    Try<JSON::Object> manifestJSON = JSON::parse<JSON::Object>(jsonString);

    if (manifestJSON.isError()) {
      return Error(manifestJSON.error());
    }

    Result<JSON::String> name = manifestJSON.get().find<JSON::String>("name");
    if (name.isNone()) {
      return Error("Failed to find \"name\" in manifest response");
    }

    Result<JSON::Array> fsLayersJSON =
      manifestJSON.get().find<JSON::Array>("fsLayers");

    if (fsLayersJSON.isNone()) {
      return Error("Failed to find \"fsLayers\" in manifest response");
    }

    Result<JSON::Array> historyArray =
      manifestJSON.get().find<JSON::Array>("history");

    if (historyArray.isNone()) {
      return Error("Failed to find \"history\" in manifest response");
    }

    if (historyArray.get().values.size() != fsLayersJSON.get().values.size()) {
      return Error(
          "\"history\" and \"fsLayers\" array count mismatch"
          "in manifest response");
    }

    vector<FileSystemLayerInfo> fsLayers;

    // We add layers in reverse order because 'fsLayers' in the manifest
    // response is ordered with the latest layer on the top. When we apply the
    // layer changes, we want the filesystem modification order to be the same
    // as its history(oldest layer applied first).
    for (size_t index = fsLayersJSON.get().values.size(); index-- > 0; ) {
      const JSON::Value& layer = fsLayersJSON.get().values[index];

      if (!layer.is<JSON::Object>()) {
        return Error(
            "Failed to parse layer as a JSON object for index: " +
            stringify(index));
      }

      const JSON::Object& layerInfoJSON = layer.as<JSON::Object>();

      // Get blobsum for layer.
      const Result<JSON::String> blobSumInfo =
        layerInfoJSON.find<JSON::String>("blobSum");

      if (blobSumInfo.isNone()) {
        return Error("Failed to find \"blobSum\" in manifest response");
      }

      // Get history for layer.
      if (!historyArray.get().values[index].is<JSON::Object>()) {
        return Error(
            "Failed to parse history as a JSON object for index: " +
            stringify(index));
      }
      const JSON::Object& historyObj =
        historyArray.get().values[index].as<JSON::Object>();

      // Get layer id.
      const Result<JSON::String> v1CompatibilityJSON =
        historyObj.find<JSON::String>("v1Compatibility");

      if (!v1CompatibilityJSON.isSome()) {
        return Error(
            "Failed to obtain layer v1 compability json in manifest for layer: "
            + stringify(index));
      }

      Try<JSON::Object> v1CompatibilityObj =
        JSON::parse<JSON::Object>(v1CompatibilityJSON.get().value);

      if (!v1CompatibilityObj.isSome()) {
        return Error(
            "Failed to parse v1 compability json in manifest for layer: "
            + stringify(index));
      }

      const Result<JSON::String> id =
        v1CompatibilityObj.get().find<JSON::String>("id");

      if (!id.isSome()) {
        return Error(
            "Failed to find \"id\" in manifest for layer: " + stringify(index));
      }

      fsLayers.emplace_back(
          FileSystemLayerInfo{
            blobSumInfo.get().value,
            id.get().value,
          });
    }

    return Manifest{name.get().value, fsLayers};
}
Exemple #28
0
void initialize(
    const string& _argv0,
    const Flags& flags,
    bool installFailureSignalHandler)
{
  static Once* initialized = new Once();

  if (initialized->once()) {
    return;
  }

  argv0 = _argv0;

  if (flags.logging_level != "INFO" &&
      flags.logging_level != "WARNING" &&
      flags.logging_level != "ERROR") {
    EXIT(1) << "'" << flags.logging_level << "' is not a valid logging level."
               " Possible values for 'logging_level' flag are: "
               " 'INFO', 'WARNING', 'ERROR'.";
  }

  FLAGS_minloglevel = getLogSeverity(flags.logging_level);

  if (flags.log_dir.isSome()) {
    Try<Nothing> mkdir = os::mkdir(flags.log_dir.get());
    if (mkdir.isError()) {
      EXIT(1) << "Could not initialize logging: Failed to create directory "
              << flags.log_dir.get() << ": " << mkdir.error();
    }
    FLAGS_log_dir = flags.log_dir.get();
    // Do not log to stderr instead of log files.
    FLAGS_logtostderr = false;
  } else {
    // Log to stderr instead of log files.
    FLAGS_logtostderr = true;
  }

  // Log everything to stderr IN ADDITION to log files unless
  // otherwise specified.
  if (flags.quiet) {
    FLAGS_stderrthreshold = 3; // FATAL.

    // FLAGS_stderrthreshold is ignored when logging to stderr instead
    // of log files. Setting the minimum log level gets around this issue.
    if (FLAGS_logtostderr) {
      FLAGS_minloglevel = 3; // FATAL.
    }
  } else {
    FLAGS_stderrthreshold = FLAGS_minloglevel;
  }

  FLAGS_logbufsecs = flags.logbufsecs;

  google::InitGoogleLogging(argv0.c_str());
  if (flags.log_dir.isSome()) {
    // Log this message in order to create the log file; this is because GLOG
    // creates the log file once the first log message occurs; also recreate
    // the file if it has been created on a previous run.
    LOG_AT_LEVEL(FLAGS_minloglevel)
      << google::GetLogSeverityName(FLAGS_minloglevel)
      << " level logging started!";
  }

  VLOG(1) << "Logging to " <<
    (flags.log_dir.isSome() ? flags.log_dir.get() : "STDERR");

  if (installFailureSignalHandler) {
    // Handles SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGBUS, SIGTERM
    // by default.
    google::InstallFailureSignalHandler();

    // Set up our custom signal handlers.
    struct sigaction action;
    action.sa_handler = handler;

    // Do not block additional signals while in the handler.
    sigemptyset(&action.sa_mask);
    action.sa_flags = 0;

    // Set up the SIGPIPE signal handler to escalate to SIGABRT
    // in order to have the glog handler catch it and print all
    // of its lovely information.
    if (sigaction(SIGPIPE, &action, NULL) < 0) {
      PLOG(FATAL) << "Failed to set sigaction";
    }

    // We also do not want SIGTERM to dump a stacktrace, as this
    // can imply that we crashed, when we were in fact terminated
    // by user request.
    if (sigaction(SIGTERM, &action, NULL) < 0) {
      PLOG(FATAL) << "Failed to set sigaction";
    }
  }

  initialized->done();
}
Exemple #29
0
int main(int argc, char** argv)
{
  GOOGLE_PROTOBUF_VERIFY_VERSION;

  master::Flags flags;

  // The following flags are executable specific (e.g., since we only
  // have one instance of libprocess per execution, we only want to
  // advertise the IP and port option once, here).
  Option<string> ip;
  flags.add(&ip, "ip", "IP address to listen on");

  uint16_t port;
  flags.add(&port, "port", "Port to listen on", MasterInfo().port());

  Option<string> zk;
  flags.add(&zk,
            "zk",
            "ZooKeeper URL (used for leader election amongst masters)\n"
            "May be one of:\n"
            "  zk://host1:port1,host2:port2,.../path\n"
            "  zk://username:password@host1:port1,host2:port2,.../path\n"
            "  file:///path/to/file (where file contains one of the above)");

  bool help;
  flags.add(&help,
            "help",
            "Prints this help message",
            false);

  Try<Nothing> load = flags.load("MESOS_", argc, argv);

  if (load.isError()) {
    cerr << load.error() << endl;
    usage(argv[0], flags);
    exit(1);
  }

  if (flags.version) {
    version();
    exit(0);
  }

  if (help) {
    usage(argv[0], flags);
    exit(1);
  }

  // Initialize libprocess.
  if (ip.isSome()) {
    os::setenv("LIBPROCESS_IP", ip.get());
  }

  os::setenv("LIBPROCESS_PORT", stringify(port));

  process::initialize("master");

  logging::initialize(argv[0], flags, true); // Catch signals.

  LOG(INFO) << "Build: " << build::DATE << " by " << build::USER;

  LOG(INFO) << "Version: " << MESOS_VERSION;

  if (build::GIT_TAG.isSome()) {
    LOG(INFO) << "Git tag: " << build::GIT_TAG.get();
  }

  if (build::GIT_SHA.isSome()) {
    LOG(INFO) << "Git SHA: " << build::GIT_SHA.get();
  }

  allocator::AllocatorProcess* allocatorProcess =
    new allocator::HierarchicalDRFAllocatorProcess();
  allocator::Allocator* allocator =
    new allocator::Allocator(allocatorProcess);

  state::Storage* storage = NULL;
  Log* log = NULL;

  if (flags.registry == "in_memory") {
    if (flags.registry_strict) {
      EXIT(1) << "Cannot use '--registry_strict' when using in-memory storage"
              << " based registry";
    }
    storage = new state::InMemoryStorage();
  } else if (flags.registry == "replicated_log" ||
             flags.registry == "log_storage") {
    // TODO(bmahler): "log_storage" is present for backwards
    // compatibility, can be removed before 0.19.0.
    if (flags.work_dir.isNone()) {
      EXIT(1) << "--work_dir needed for replicated log based registry";
    }

    Try<Nothing> mkdir = os::mkdir(flags.work_dir.get());
    if (mkdir.isError()) {
      EXIT(1) << "Failed to create work directory '" << flags.work_dir.get()
              << "': " << mkdir.error();
    }

    if (zk.isSome()) {
      // Use replicated log with ZooKeeper.
      if (flags.quorum.isNone()) {
        EXIT(1) << "Need to specify --quorum for replicated log based"
                << " registry when using ZooKeeper";
      }

      string zk_;
      if (strings::startsWith(zk.get(), "file://")) {
        const string& path = zk.get().substr(7);
        const Try<string> read = os::read(path);
        if (read.isError()) {
          EXIT(1) << "Failed to read from file at '" + path + "': "
                  << read.error();
        }
        zk_ = read.get();
      } else {
        zk_ = zk.get();
      }

      Try<URL> url = URL::parse(zk_);
      if (url.isError()) {
        EXIT(1) << "Error parsing ZooKeeper URL: " << url.error();
      }

      log = new Log(
          flags.quorum.get(),
          path::join(flags.work_dir.get(), "replicated_log"),
          url.get().servers,
          flags.zk_session_timeout,
          path::join(url.get().path, "log_replicas"),
          url.get().authentication,
          flags.log_auto_initialize);
    } else {
      // Use replicated log without ZooKeeper.
      log = new Log(
          1,
          path::join(flags.work_dir.get(), "replicated_log"),
          set<UPID>(),
          flags.log_auto_initialize);
    }
    storage = new state::LogStorage(log);
  } else {
    EXIT(1) << "'" << flags.registry << "' is not a supported"
            << " option for registry persistence";
  }

  CHECK_NOTNULL(storage);

  state::protobuf::State* state = new state::protobuf::State(storage);
  Registrar* registrar = new Registrar(flags, state);
  Repairer* repairer = new Repairer();

  Files files;

  MasterContender* contender;
  MasterDetector* detector;

  // TODO(vinod): 'MasterContender::create()' should take
  // Option<string>.
  Try<MasterContender*> contender_ = MasterContender::create(zk.get(""));
  if (contender_.isError()) {
    EXIT(1) << "Failed to create a master contender: " << contender_.error();
  }
  contender = contender_.get();

  // TODO(vinod): 'MasterDetector::create()' should take
  // Option<string>.
  Try<MasterDetector*> detector_ = MasterDetector::create(zk.get(""));
  if (detector_.isError()) {
    EXIT(1) << "Failed to create a master detector: " << detector_.error();
  }
  detector = detector_.get();

  Option<Authorizer*> authorizer = None();
  if (flags.acls.isSome()) {
    Try<Owned<Authorizer> > authorizer_ = Authorizer::create(flags.acls.get());
    if (authorizer_.isError()) {
      EXIT(1) << "Failed to initialize the authorizer: "
              << authorizer_.error() << " (see --acls flag)";
    }
    Owned<Authorizer> authorizer__ = authorizer_.get();
    authorizer = authorizer__.release();
  }

  LOG(INFO) << "Starting Mesos master";

  Master* master =
    new Master(
      allocator,
      registrar,
      repairer,
      &files,
      contender,
      detector,
      authorizer,
      flags);

  if (zk.isNone()) {
    // It means we are using the standalone detector so we need to
    // appoint this Master as the leader.
    dynamic_cast<StandaloneMasterDetector*>(detector)->appoint(master->info());
  }

  process::spawn(master);

  process::wait(master->self());
  delete master;
  delete allocator;
  delete allocatorProcess;

  delete registrar;
  delete repairer;
  delete state;
  delete storage;
  delete log;

  delete contender;
  delete detector;

  if (authorizer.isSome()) {
    delete authorizer.get();
  }

  return 0;
}
  // Forwards the status update on the specified update stream.
  //
  // If `checkpoint` is `false`, the update will be retried as long as it is in
  // memory, but it will not be checkpointed.
  process::Future<Nothing> update(
      const UpdateType& update,
      const IDType& streamId,
      bool checkpoint)
  {
    LOG(INFO) << "Received " << statusUpdateType << " " << update;

    if (!streams.contains(streamId)) {
      Try<Nothing> create =
        createStatusUpdateStream(
            streamId,
            update.has_framework_id()
              ? Option<FrameworkID>(update.framework_id())
              : None(),
            checkpoint);

      if (create.isError()) {
        return process::Failure(create.error());
      }
    }
    CHECK(streams.contains(streamId));
    StatusUpdateStream* stream = streams[streamId].get();

    if (update.has_latest_status()) {
      return process::Failure(
          "Expected " + statusUpdateType + " to not contain 'latest_status'");
    }

    // Verify that we didn't get a non-checkpointable update for a
    // stream that is checkpointable, and vice-versa.
    if (stream->checkpointed() != checkpoint) {
      return process::Failure(
          "Mismatched checkpoint value for " + statusUpdateType + " " +
          stringify(update) + " (expected checkpoint=" +
          stringify(stream->checkpointed()) + " actual checkpoint=" +
          stringify(checkpoint) + ")");
    }

    // Verify that the framework ID of the update matches the framework ID
    // of the stream.
    if (update.has_framework_id() != stream->frameworkId.isSome()) {
      return process::Failure(
          "Mismatched framework ID for " + statusUpdateType +
          " " + stringify(update) + " (expected " +
          (stream->frameworkId.isSome()
             ? stringify(stream->frameworkId.get())
             : "no framework ID") +
          " got " +
          (update.has_framework_id()
             ? stringify(update.framework_id())
             : "no framework ID") +
          ")");
    }

    if (update.has_framework_id() &&
        update.framework_id() != stream->frameworkId.get()) {
      return process::Failure(
          "Mismatched framework ID for " + statusUpdateType +
          " " + stringify(update) +
          " (expected " + stringify(stream->frameworkId.get()) +
          " actual " + stringify(update.framework_id()) + ")");
    }

    // Handle the status update.
    Try<bool> result = stream->update(update);
    if (result.isError()) {
      return process::Failure(result.error());
    }

    // This only happens if the status update is a duplicate.
    if (!result.get()) {
      return Nothing();
    }

    // Forward the status update if this is at the front of the queue.
    // Subsequent status updates will be sent in `acknowledgement()`.
    if (!paused && stream->pending.size() == 1) {
      CHECK_NONE(stream->timeout);

      const Result<UpdateType>& next = stream->next();
      if (next.isError()) {
        return process::Failure(next.error());
      }

      CHECK_SOME(next);
      stream->timeout =
        forward(stream, next.get(), slave::STATUS_UPDATE_RETRY_INTERVAL_MIN);
    }

    return Nothing();
  }