// This test ensures we don't break the API when it comes to JSON // representation of NetworkInfo. TEST(HTTP, SerializeNetworkInfo) { NetworkInfo networkInfo; NetworkInfo::IPAddress* address = networkInfo.add_ip_addresses(); address->set_protocol(NetworkInfo::IPv4); address->set_ip_address("10.0.0.1"); networkInfo.set_protocol(NetworkInfo::IPv6); networkInfo.set_ip_address("10.0.0.2"); networkInfo.add_groups("foo"); networkInfo.add_groups("bar"); JSON::Value object = JSON::protobuf(networkInfo); Try<JSON::Value> expected = JSON::parse( "{" " \"ip_addresses\":" " [" " {" " \"protocol\": \"IPv4\"," " \"ip_address\": \"10.0.0.1\"" " }" " ]," " \"protocol\": \"IPv6\"," " \"ip_address\": \"10.0.0.2\"," " \"groups\": [" " \"foo\"," " \"bar\"" " ]" "}"); ASSERT_SOME(expected); EXPECT_EQ(expected.get(), object); }
virtual Result<TaskStatus> slaveTaskStatusDecorator( const FrameworkID& frameworkId, const TaskStatus& status) { LOG(INFO) << "Executing 'slaveTaskStatusDecorator' hook"; Labels labels; // Set one known label. Label* newLabel = labels.add_labels(); newLabel->set_key("bar"); newLabel->set_value("qux"); // Remove label which was set by test. foreach (const Label& oldLabel, status.labels().labels()) { if (oldLabel.key() != "foo") { labels.add_labels()->CopyFrom(oldLabel); } } TaskStatus result; result.mutable_labels()->CopyFrom(labels); // Set an IP address, a network isolation group, and a known label // in network info. This data is later validated by the // 'HookTest.VerifySlaveTaskStatusDecorator' test. NetworkInfo* networkInfo = result.mutable_container_status()->add_network_infos(); // TODO(CD): Deprecated -- remove after 0.27.0. networkInfo->set_ip_address("4.3.2.1"); NetworkInfo::IPAddress* ipAddress = networkInfo->add_ip_addresses(); ipAddress->set_ip_address("4.3.2.1"); networkInfo->add_groups("public"); Label* networkInfoLabel = networkInfo->mutable_labels()->add_labels(); networkInfoLabel->set_key("net_foo"); networkInfoLabel->set_value("net_bar"); return result; }
void launchTask(ExecutorDriver* driver, const TaskInfo& task) { if (run.isSome()) { // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.set_state(TASK_FAILED); status.set_message( "Attempted to run multiple tasks using a \"docker\" executor"); driver->sendStatusUpdate(status); return; } // Capture the TaskID. taskId = task.task_id(); // Capture the kill policy. if (task.has_kill_policy()) { killPolicy = task.kill_policy(); } LOG(INFO) << "Starting task " << taskId.get(); CHECK(task.has_container()); CHECK(task.has_command()); CHECK(task.container().type() == ContainerInfo::DOCKER); Try<Docker::RunOptions> runOptions = Docker::RunOptions::create( task.container(), task.command(), containerName, sandboxDirectory, mappedDirectory, task.resources() + task.executor().resources(), cgroupsEnableCfs, taskEnvironment, None(), // No extra devices. defaultContainerDNS ); if (runOptions.isError()) { // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.set_state(TASK_FAILED); status.set_message( "Failed to create docker run options: " + runOptions.error()); driver->sendStatusUpdate(status); _stop(); return; } // We're adding task and executor resources to launch docker since // the DockerContainerizer updates the container cgroup limits // directly and it expects it to be the sum of both task and // executor resources. This does leave to a bit of unaccounted // resources for running this executor, but we are assuming // this is just a very small amount of overcommit. run = docker->run( runOptions.get(), Subprocess::FD(STDOUT_FILENO), Subprocess::FD(STDERR_FILENO)); run->onAny(defer(self(), &Self::reaped, lambda::_1)); // Delay sending TASK_RUNNING status update until we receive // inspect output. Note that we store a future that completes // after the sending of the running update. This allows us to // ensure that the terminal update is sent after the running // update (see `reaped()`). inspect = docker->inspect(containerName, DOCKER_INSPECT_DELAY) .then(defer(self(), [=](const Docker::Container& container) { if (!killed) { containerPid = container.pid; // TODO(alexr): Use `protobuf::createTaskStatus()` // instead of manually setting fields. TaskStatus status; status.mutable_task_id()->CopyFrom(taskId.get()); status.set_state(TASK_RUNNING); status.set_data(container.output); if (container.ipAddress.isSome()) { // TODO(karya): Deprecated -- Remove after 0.25.0 has shipped. Label* label = status.mutable_labels()->add_labels(); label->set_key("Docker.NetworkSettings.IPAddress"); label->set_value(container.ipAddress.get()); NetworkInfo* networkInfo = status.mutable_container_status()->add_network_infos(); // Copy the NetworkInfo if it is specified in the // ContainerInfo. A Docker container has at most one // NetworkInfo, which is validated in containerizer. if (task.container().network_infos().size() > 0) { networkInfo->CopyFrom(task.container().network_infos(0)); networkInfo->clear_ip_addresses(); } NetworkInfo::IPAddress* ipAddress = networkInfo->add_ip_addresses(); ipAddress->set_ip_address(container.ipAddress.get()); containerNetworkInfo = *networkInfo; } driver->sendStatusUpdate(status); } return Nothing(); })); inspect.onFailed(defer(self(), [=](const string& failure) { LOG(ERROR) << "Failed to inspect container '" << containerName << "'" << ": " << failure; // TODO(bmahler): This is fatal, try to shut down cleanly. // Since we don't have a container id, we can only discard // the run future. })); inspect.onReady(defer(self(), &Self::launchCheck, task)); inspect.onReady( defer(self(), &Self::launchHealthCheck, containerName, task)); }