Image::Appc getTestImage() const { Image::Appc appc; appc.set_name("foo.com/bar"); Label version; version.set_key("version"); version.set_value("1.0.0"); Label arch; arch.set_key("arch"); arch.set_value("amd64"); Label os; os.set_key("os"); os.set_value("linux"); Labels labels; labels.add_labels()->CopyFrom(version); labels.add_labels()->CopyFrom(arch); labels.add_labels()->CopyFrom(os); appc.mutable_labels()->CopyFrom(labels); return appc; }
virtual Result<TaskStatus> slaveTaskStatusDecorator( const FrameworkID& frameworkId, const TaskStatus& status) { LOG(INFO) << "Executing 'slaveTaskStatusDecorator' hook"; Labels labels; // Set one known label. Label* newLabel = labels.add_labels(); newLabel->set_key("bar"); newLabel->set_value("qux"); // Remove label which was set by test. foreach (const Label& oldLabel, status.labels().labels()) { if (oldLabel.key() != "foo") { labels.add_labels()->CopyFrom(oldLabel); } } TaskStatus result; result.mutable_labels()->CopyFrom(labels); // Set an IP address, a network isolation group, and a known label // in network info. This data is later validated by the // 'HookTest.VerifySlaveTaskStatusDecorator' test. NetworkInfo* networkInfo = result.mutable_container_status()->add_network_infos(); // TODO(CD): Deprecated -- remove after 0.27.0. networkInfo->set_ip_address("4.3.2.1"); NetworkInfo::IPAddress* ipAddress = networkInfo->add_ip_addresses(); ipAddress->set_ip_address("4.3.2.1"); networkInfo->add_groups("public"); Label* networkInfoLabel = networkInfo->mutable_labels()->add_labels(); networkInfoLabel->set_key("net_foo"); networkInfoLabel->set_value("net_bar"); return result; }
virtual Result<Labels> slaveTaskStatusLabelDecorator( const FrameworkID& frameworkId, const TaskStatus& status) { LOG(INFO) << "Executing 'slaveTaskStatusLabelDecorator' hook"; Labels labels; // Set one known label. Label* newLabel = labels.add_labels(); newLabel->set_key("bar"); newLabel->set_value("qux"); // Remove label which was set by test. foreach (const Label& oldLabel, status.labels().labels()) { if (oldLabel.key() != "foo") { labels.add_labels()->CopyFrom(oldLabel); } } return labels; }
virtual Result<Labels> masterLaunchTaskLabelDecorator( const TaskInfo& taskInfo, const FrameworkInfo& frameworkInfo, const SlaveInfo& slaveInfo) { LOG(INFO) << "Executing 'masterLaunchTaskLabelDecorator' hook"; Labels labels; Label *label = labels.add_labels(); label->set_key(testLabelKey); label->set_value(testLabelValue); return labels; }
virtual Result<Labels> masterLaunchTaskLabelDecorator( const TaskInfo& taskInfo, const FrameworkInfo& frameworkInfo, const SlaveInfo& slaveInfo) { LOG(INFO) << "Executing 'masterLaunchTaskLabelDecorator' hook"; Labels labels; // Set one known label. Label* newLabel = labels.add_labels(); newLabel->set_key(testLabelKey); newLabel->set_value(testLabelValue); // Remove the 'testRemoveLabelKey' label which was set by the test. foreach (const Label& oldLabel, taskInfo.labels().labels()) { if (oldLabel.key() != testRemoveLabelKey) { labels.add_labels()->CopyFrom(oldLabel); } } return labels; }
// TODO(nnielsen): Split hook tests into multiple modules to avoid // interference. virtual Result<Labels> slaveRunTaskLabelDecorator( const TaskInfo& taskInfo, const ExecutorInfo& executorInfo, const FrameworkInfo& frameworkInfo, const SlaveInfo& slaveInfo) { LOG(INFO) << "Executing 'slaveRunTaskLabelDecorator' hook"; Labels labels; // Set one known label. Label* newLabel = labels.add_labels(); newLabel->set_key("baz"); newLabel->set_value("qux"); // Remove label which was set by test. foreach (const Label& oldLabel, taskInfo.labels().labels()) { if (oldLabel.key() != "foo") { labels.add_labels()->CopyFrom(oldLabel); } } return labels; }
virtual Result<Labels> slaveTaskStatusLabelDecorator( const FrameworkID& frameworkId, const TaskStatus& status) { LOG(INFO) << "CalicoHook::task status label decorator"; if (!status.has_executor_id()) { LOG(WARNING) << "CalicoHook:: task status has no valid executor id"; return None(); } const ExecutorID executorId = status.executor_id(); if (!executors->contains(executorId)) { LOG(WARNING) << "CalicoHook:: no valid container id for: " << executorId; return None(); } const ContainerID containerId = executors->at(executorId); if (infos == NULL || !infos->contains(containerId)) { LOG(WARNING) << "CalicoHook:: no valid infos for: " << containerId; return None(); } const Info* info = (*infos)[containerId]; if (info->ipAddress.isNone()) { LOG(WARNING) << "CalicoHook:: no valid IP address"; return None(); } Labels labels; if (status.has_labels()) { labels.CopyFrom(status.labels()); } // Set IPAddress label. Label* label = labels.add_labels(); label->set_key(ipAddressLabelKey); label->set_value(info->ipAddress.get()); LOG(INFO) << "CalicoHook:: added label " << label->key() << ":" << label->value(); return labels; }
// This test verifies that the slave task status label decorator can // add and remove labels from a TaskStatus during the status update // sequence. A TaskStatus with two labels ("foo":"bar" and // "bar":"baz") is sent from the executor. The labels get modified by // the slave hook to strip the "foo":"bar" pair and/ add a new // "baz":"qux" pair. TEST_F(HookTest, VerifySlaveTaskStatusDecorator) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), &containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers.get().size()); // Start a task. TaskInfo task = createTask(offers.get()[0], "", DEFAULT_EXECUTOR_ID); ExecutorDriver* execDriver; EXPECT_CALL(exec, registered(_, _, _, _)) .WillOnce(SaveArg<0>(&execDriver)); Future<TaskInfo> execTask; EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(FutureArg<1>(&execTask)); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(execTask); // Now send TASK_RUNNING update with two labels. The first label // ("foo:bar") will be removed by the task status hook to ensure // that it can remove labels. The second label will be preserved // and forwarded to Master (and eventually to the framework). // The hook also adds a new label with the same key but a different // value ("bar:quz"). TaskStatus runningStatus; runningStatus.mutable_task_id()->MergeFrom(execTask.get().task_id()); runningStatus.set_state(TASK_RUNNING); // Add two labels to the TaskStatus Labels* labels = runningStatus.mutable_labels(); labels->add_labels()->CopyFrom(createLabel("foo", "bar")); labels->add_labels()->CopyFrom(createLabel("bar", "baz")); execDriver->sendStatusUpdate(runningStatus); AWAIT_READY(status); // The hook will hang an extra label off. const Labels& labels_ = status.get().labels(); EXPECT_EQ(2, labels_.labels_size()); // The test hook will prepend a new "baz":"qux" label. EXPECT_EQ("bar", labels_.labels(0).key()); EXPECT_EQ("qux", labels_.labels(0).value()); // And lastly, we only expect the "foo":"bar" pair to be stripped by // the module. The last pair should be the original "bar":"baz" // pair set by the test. EXPECT_EQ("bar", labels_.labels(1).key()); EXPECT_EQ("baz", labels_.labels(1).value()); // Now validate TaskInfo.container_status. We must have received a // container_status with one network_info set by the test hook module. EXPECT_TRUE(status.get().has_container_status()); EXPECT_EQ(1, status.get().container_status().network_infos().size()); const NetworkInfo networkInfo = status.get().container_status().network_infos(0); // The hook module sets up '4.3.2.1' as the IP address and 'public' as the // network isolation group. The `ip_address` field is deprecated, but the // hook module should continue to set it as well as the new `ip_addresses` // field for now. EXPECT_TRUE(networkInfo.has_ip_address()); EXPECT_EQ("4.3.2.1", networkInfo.ip_address()); EXPECT_EQ(1, networkInfo.ip_addresses().size()); EXPECT_TRUE(networkInfo.ip_addresses(0).has_ip_address()); EXPECT_EQ("4.3.2.1", networkInfo.ip_addresses(0).ip_address()); EXPECT_EQ(1, networkInfo.groups().size()); EXPECT_EQ("public", networkInfo.groups(0)); EXPECT_TRUE(networkInfo.has_labels()); EXPECT_EQ(1, networkInfo.labels().labels().size()); const Label networkInfoLabel = networkInfo.labels().labels(0); // Finally, the labels set inside NetworkInfo by the hook module. EXPECT_EQ("net_foo", networkInfoLabel.key()); EXPECT_EQ("net_bar", networkInfoLabel.value()); EXPECT_CALL(exec, shutdown(_)) .Times(AtMost(1)); driver.stop(); driver.join(); }
// This test verifies that the slave run task label decorator can add // and remove labels from a task during the launch sequence. A task // with two labels ("foo":"bar" and "bar":"baz") is launched and will // get modified by the slave hook to strip the "foo":"bar" pair and // add a new "baz":"qux" pair. TEST_F(HookTest, VerifySlaveRunTaskHook) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), &containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers.get().size()); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); task.mutable_executor()->CopyFrom(DEFAULT_EXECUTOR_INFO); // Add two labels: (1) will be removed by the hook to ensure that // runTaskHook can remove labels (2) will be preserved to ensure // that the framework can add labels to the task and have those be // available by the end of the launch task sequence when hooks are // used (to protect against hooks removing labels completely). Labels* labels = task.mutable_labels(); labels->add_labels()->CopyFrom(createLabel("foo", "bar")); labels->add_labels()->CopyFrom(createLabel("bar", "baz")); EXPECT_CALL(exec, registered(_, _, _, _)); Future<TaskInfo> taskInfo; EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(DoAll( FutureArg<1>(&taskInfo), SendStatusUpdateFromTask(TASK_RUNNING))); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(taskInfo); // The master hook will hang an extra label off. const Labels& labels_ = taskInfo.get().labels(); ASSERT_EQ(3, labels_.labels_size()); // The slave run task hook will prepend a new "baz":"qux" label. EXPECT_EQ("baz", labels_.labels(0).key()); EXPECT_EQ("qux", labels_.labels(0).value()); // Master launch task hook will still hang off test label. EXPECT_EQ(testLabelKey, labels_.labels(1).key()); EXPECT_EQ(testLabelValue, labels_.labels(1).value()); // And lastly, we only expect the "foo":"bar" pair to be stripped by // the module. The last pair should be the original "bar":"baz" // pair set by the test. EXPECT_EQ("bar", labels_.labels(2).key()); EXPECT_EQ("baz", labels_.labels(2).value()); EXPECT_CALL(exec, shutdown(_)) .Times(AtMost(1)); driver.stop(); driver.join(); }
// This test ensures we don't break the API when it comes to JSON // representation of tasks. TEST(HTTPTest, ModelTask) { TaskID taskId; taskId.set_value("t"); SlaveID slaveId; slaveId.set_value("s"); ExecutorID executorId; executorId.set_value("t"); FrameworkID frameworkId; frameworkId.set_value("f"); TaskState state = TASK_RUNNING; vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->CopyFrom(taskId); status.set_state(state); status.mutable_slave_id()->CopyFrom(slaveId); status.mutable_executor_id()->CopyFrom(executorId); status.set_timestamp(0.0); statuses.push_back(status); Labels labels; labels.add_labels()->CopyFrom(createLabel("ACTION", "port:7987 DENY")); Ports ports; Port* port = ports.add_ports(); port->set_number(80); port->mutable_labels()->CopyFrom(labels); DiscoveryInfo discovery; discovery.set_visibility(DiscoveryInfo::CLUSTER); discovery.set_name("discover"); discovery.mutable_ports()->CopyFrom(ports); TaskInfo taskInfo; taskInfo.set_name("task"); taskInfo.mutable_task_id()->CopyFrom(taskId); taskInfo.mutable_slave_id()->CopyFrom(slaveId); taskInfo.mutable_command()->set_value("echo hello"); taskInfo.mutable_discovery()->CopyFrom(discovery); Task task = createTask(taskInfo, state, frameworkId); task.add_statuses()->CopyFrom(statuses[0]); JSON::Value object = model(task); Try<JSON::Value> expected = JSON::parse( "{" " \"executor_id\":\"\"," " \"framework_id\":\"f\"," " \"id\":\"t\"," " \"name\":\"task\"," " \"resources\":" " {" " \"cpus\":0," " \"disk\":0," " \"gpus\":0," " \"mem\":0" " }," " \"slave_id\":\"s\"," " \"state\":\"TASK_RUNNING\"," " \"statuses\":" " [" " {" " \"state\":\"TASK_RUNNING\"," " \"timestamp\":0" " }" " ]," " \"discovery\":" " {" " \"name\":\"discover\"," " \"ports\":" " {" " \"ports\":" " [" " {" " \"number\":80," " \"labels\":" " {" " \"labels\":" " [" " {" " \"key\":\"ACTION\"," " \"value\":\"port:7987 DENY\"" " }" " ]" " }" " }" " ]" " }," " \"visibility\":\"CLUSTER\"" " }" "}"); ASSERT_SOME(expected); EXPECT_EQ(expected.get(), object); }
// Test that the label decorator hook hangs a new label off the // taskinfo message during master launch task. TEST_F(HookTest, VerifyMasterLaunchTaskHook) { Try<PID<Master>> master = StartMaster(CreateMasterFlags()); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); // Start a mock slave since we aren't testing the slave hooks yet. Try<PID<Slave>> slave = StartSlave(&containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); task.mutable_executor()->CopyFrom(DEFAULT_EXECUTOR_INFO); // Add label which will be removed by the hook. Labels* labels = task.mutable_labels(); Label* label = labels->add_labels(); label->set_key(testRemoveLabelKey); label->set_value(testRemoveLabelValue); vector<TaskInfo> tasks; tasks.push_back(task); Future<RunTaskMessage> runTaskMessage = FUTURE_PROTOBUF(RunTaskMessage(), _, _); EXPECT_CALL(exec, registered(_, _, _, _)); EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)) .WillRepeatedly(Return()); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(runTaskMessage); AWAIT_READY(status); // At launchTasks, the label decorator hook inside should have been // executed and we should see the labels now. Also, verify that the // hook module has stripped the first 'testRemoveLabelKey' label. // We do this by ensuring that only one label is present and that it // is the new 'testLabelKey' label. const Labels &labels_ = runTaskMessage.get().task().labels(); ASSERT_EQ(1, labels_.labels_size()); EXPECT_EQ(labels_.labels().Get(0).key(), testLabelKey); EXPECT_EQ(labels_.labels().Get(0).value(), testLabelValue); driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }