// This test verifies that an authentication request that comes from // the same instance of the framework (e.g., ZK blip) before // 'Master::_registerFramework()' from an earlier attempt, causes the // master to successfully register the framework. TEST_F(MasterAuthorizationTest, DuplicateRegistration) { MockAuthorizer authorizer; Try<PID<Master> > master = StartMaster(&authorizer); ASSERT_SOME(master); // Create a detector for the scheduler driver because we want the // spurious leading master change to be known by the scheduler // driver only. StandaloneMasterDetector detector(master.get()); MockScheduler sched; TestingMesosSchedulerDriver driver(&sched, &detector); Future<Nothing> registered; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureSatisfy(®istered)); // Return pending futures from authorizer. Future<Nothing> future1; Promise<bool> promise1; Future<Nothing> future2; Promise<bool> promise2; EXPECT_CALL(authorizer, authorize(An<const mesos::ACL::ReceiveOffers&>())) .WillOnce(DoAll(FutureSatisfy(&future1), Return(promise1.future()))) .WillOnce(DoAll(FutureSatisfy(&future2), Return(promise2.future()))); driver.start(); // Wait until first authorization attempt is in progress. AWAIT_READY(future1); // Simulate a spurious leading master change at the scheduler. detector.appoint(master.get()); // Wait until second authorization attempt is in progress. AWAIT_READY(future2); // Now complete the first authorization attempt. promise1.set(true); // First registration request should succeed because the // framework PID did not change. AWAIT_READY(registered); Future<FrameworkRegisteredMessage> frameworkRegisteredMessage = FUTURE_PROTOBUF(FrameworkRegisteredMessage(), _, _); // Now complete the second authorization attempt. promise2.set(true); // Master should acknowledge the second registration attempt too. AWAIT_READY(frameworkRegisteredMessage); driver.stop(); driver.join(); Shutdown(); }
TEST(ResourceOffersTest, TaskUsesNoResources) { ASSERT_TRUE(GTEST_IS_THREADSAFE); PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false); MockScheduler sched; MesosSchedulerDriver driver(&sched, "", DEFAULT_EXECUTOR_INFO, master); vector<Offer> offers; trigger resourceOffersCall; EXPECT_CALL(sched, registered(&driver, _)) .Times(1); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(DoAll(SaveArg<1>(&offers), Trigger(&resourceOffersCall))) .WillRepeatedly(Return()); driver.start(); WAIT_UNTIL(resourceOffersCall); EXPECT_NE(0, offers.size()); TaskDescription task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers[0].slave_id()); vector<TaskDescription> tasks; tasks.push_back(task); TaskStatus status; trigger statusUpdateCall; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(DoAll(SaveArg<1>(&status), Trigger(&statusUpdateCall))); driver.launchTasks(offers[0].id(), tasks); WAIT_UNTIL(statusUpdateCall); EXPECT_EQ(task.task_id(), status.task_id()); EXPECT_EQ(TASK_LOST, status.state()); EXPECT_TRUE(status.has_message()); EXPECT_EQ("Task uses no resources", status.message()); driver.stop(); driver.join(); local::shutdown(); }
TEST(HTTPConnectionTest, ClosingRequest) { Http http; http::URL url = http::URL( "http", http.process->self().address.ip, http.process->self().address.port, http.process->self().id + "/get"); Future<http::Connection> connect = http::connect(url); AWAIT_READY(connect); http::Connection connection = connect.get(); // Issue two pipelined requests, the second will not have // 'keepAlive' set. This prevents further requests and leads // to a disconnection upon receiving the second response. Promise<http::Response> promise1, promise2; Future<http::Request> get1, get2; EXPECT_CALL(*http.process, get(_)) .WillOnce(DoAll(FutureArg<0>(&get1), Return(promise1.future()))) .WillOnce(DoAll(FutureArg<0>(&get2), Return(promise2.future()))); http::Request request1, request2; request1.method = "GET"; request2.method = "GET"; request1.url = url; request2.url = url; request1.keepAlive = true; request2.keepAlive = false; Future<http::Response> response1 = connection.send(request1); Future<http::Response> response2 = connection.send(request2); // After a closing request, sends should fail. AWAIT_FAILED(connection.send(request1)); // Complete the responses. promise1.set(http::OK("body")); promise2.set(http::OK("body")); AWAIT_READY(response1); AWAIT_READY(response2); AWAIT_READY(connection.disconnected()); }
virtual void SetUp() { const std::string kArbitraryLocalCredentialUsername = "******"; const std::string kArbitraryLocalCredentialPassword = "******"; const std::string kArbitraryConnectionId = "a_connection_id"; const std::string kArbitraryTransportName = "video"; const std::string kArbitraryDataPacket = "test"; libnice = new MockLibNice; libnice_pointer.reset(libnice); nice_listener = std::make_shared<MockLibNiceConnectionListener>(); ice_config = new erizo::IceConfig(); ufrag = strdup(kArbitraryLocalCredentialUsername.c_str()); pass = strdup(kArbitraryLocalCredentialPassword.c_str()); test_packet = strdup(kArbitraryDataPacket.c_str()); ice_config->media_type = erizo::VIDEO_TYPE; ice_config->transport_name = kArbitraryTransportName; ice_config->ice_components = 1; ice_config->connection_id = kArbitraryConnectionId; EXPECT_CALL(*libnice, NiceAgentNew(_)).Times(1); EXPECT_CALL(*libnice, NiceAgentAddStream(_, _)).Times(1).WillOnce(Return(1)); EXPECT_CALL(*libnice, NiceAgentGetLocalCredentials(_, _, _, _)).Times(1). WillOnce(DoAll(SetArgPointee<2>(ufrag), SetArgPointee<3>(pass), Return(true))); EXPECT_CALL(*libnice, NiceAgentAttachRecv(_, _, _, _, _, _)).Times(1).WillOnce(Return(true)); EXPECT_CALL(*libnice, NiceAgentGatherCandidates(_, _)).Times(1).WillOnce(Return(true)); EXPECT_CALL(*libnice, NiceAgentSetRemoteCredentials(_, _, _, _)).Times(0); EXPECT_CALL(*libnice, NiceAgentSetPortRange(_, _, _, _, _)).Times(0); EXPECT_CALL(*libnice, NiceAgentSetRelayInfo(_, _, _, _, _, _, _)).Times(0); nice_connection = new erizo::LibNiceConnection(libnice_pointer, *ice_config); nice_connection->setIceListener(nice_listener); nice_connection->start(); }
TEST_F(LibNiceConnectionStartTest, start_Configures_Libnice_With_Port_Range) { const std::string kArbitraryLocalCredentialUsername = "******"; const std::string kArbitraryLocalCredentialPassword = "******"; const std::string kArbitraryConnectionId = "a_connection_id"; const std::string kArbitraryTransportName = "video"; const unsigned int kArbitraryMinPort = 1240; const unsigned int kArbitraryMaxPort = 2504; char *ufrag = strdup(kArbitraryLocalCredentialUsername.c_str()); char *pass = strdup(kArbitraryLocalCredentialPassword.c_str()); ice_config->min_port = kArbitraryMinPort; ice_config->max_port = kArbitraryMaxPort; ice_config->transport_name = kArbitraryTransportName; ice_config->media_type = erizo::VIDEO_TYPE; ice_config->connection_id = kArbitraryConnectionId; ice_config->ice_components = 1; EXPECT_CALL(*libnice, NiceAgentNew(_)).Times(1); EXPECT_CALL(*libnice, NiceAgentAddStream(_, _)).Times(1); EXPECT_CALL(*libnice, NiceAgentGetLocalCredentials(_, _, _, _)).Times(1). WillOnce(DoAll(SetArgPointee<2>(ufrag), SetArgPointee<3>(pass), Return(true))); EXPECT_CALL(*libnice, NiceAgentAttachRecv(_, _, _, _, _, _)).Times(1).WillOnce(Return(true)); EXPECT_CALL(*libnice, NiceAgentGatherCandidates(_, _)).Times(1).WillOnce(Return(true)); EXPECT_CALL(*libnice, NiceAgentSetRemoteCredentials(_, _, _, _)).Times(0); EXPECT_CALL(*libnice, NiceAgentSetPortRange(_, _, _, kArbitraryMinPort, kArbitraryMaxPort)).Times(1); EXPECT_CALL(*libnice, NiceAgentSetRelayInfo(_, _, _, _, _, _, _)).Times(0); erizo::LibNiceConnection nice(libnice_pointer, *ice_config); nice.setIceListener(nice_listener); nice.start(); }
TEST_P(RegistrarTest, storeTimeout) { Clock::pause(); MockStorage storage; State state(&storage); Registrar registrar(flags, &state); EXPECT_CALL(storage, get(_)) .WillOnce(Return(None())); Future<Nothing> set; EXPECT_CALL(storage, set(_, _)) .WillOnce(DoAll(FutureSatisfy(&set), Return(Future<bool>()))); Future<Registry> recover = registrar.recover(master); AWAIT_READY(set); Clock::advance(flags.registry_store_timeout); AWAIT_FAILED(recover); Clock::resume(); // Ensure the registrar fails subsequent operations. AWAIT_FAILED(registrar.apply(Owned<Operation>(new AdmitSlave(slave)))); }
void USBHost::GetDFUState(Outcome outcome, uint8_t *state) { Mock::VerifyAndClearExpectations(m_usb_mock); if (outcome == OUTCOME_OK) { EXPECT_CALL(*m_usb_mock, ControlSend(m_usb_handle, _, sizeof(uint8_t))) .WillOnce(DoAll( WithArgs<1, 2>(CopyDataTo(state, sizeof(uint8_t))), Return(USB_DEVICE_CONTROL_TRANSFER_RESULT_SUCCESS))); } else { EXPECT_CALL(*m_usb_mock, ControlStatus(m_usb_handle, USB_DEVICE_CONTROL_STATUS_ERROR)) .WillOnce(Return(USB_DEVICE_CONTROL_TRANSFER_RESULT_SUCCESS)); } USB_SETUP_PACKET packet; packet.bmRequestType = 0xa1; packet.bRequest = DFU_GETSTATE; packet.wValue = 0; packet.wIndex = INTERFACE; packet.wLength = 1; SetupRequest(&packet, sizeof(packet)); Mock::VerifyAndClearExpectations(m_usb_mock); }
TEST(FaultToleranceTest, SlaveLost) { ASSERT_TRUE(GTEST_IS_THREADSAFE); SimpleAllocator a; Master m(&a); PID<Master> master = process::spawn(&m); ProcessBasedIsolationModule isolationModule; Resources resources = Resources::parse("cpus:2;mem:1024"); Slave s(resources, true, &isolationModule); PID<Slave> slave = process::spawn(&s); BasicMasterDetector detector(master, slave, true); MockScheduler sched; MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master); vector<Offer> offers; trigger resourceOffersCall; EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(DoAll(SaveArg<1>(&offers), Trigger(&resourceOffersCall))) .WillRepeatedly(Return()); driver.start(); WAIT_UNTIL(resourceOffersCall); EXPECT_EQ(1, offers.size()); trigger offerRescindedCall, slaveLostCall; EXPECT_CALL(sched, offerRescinded(&driver, offers[0].id())) .WillOnce(Trigger(&offerRescindedCall)); EXPECT_CALL(sched, slaveLost(&driver, offers[0].slave_id())) .WillOnce(Trigger(&slaveLostCall)); process::terminate(slave); WAIT_UNTIL(offerRescindedCall); WAIT_UNTIL(slaveLostCall); driver.stop(); driver.join(); process::wait(slave); process::terminate(master); process::wait(master); }
TEST(ResourceOffersTest, ResourcesGetReofferedWhenUnused) { ASSERT_TRUE(GTEST_IS_THREADSAFE); PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false); MockScheduler sched1; MesosSchedulerDriver driver1(&sched1, "", DEFAULT_EXECUTOR_INFO, master); vector<Offer> offers; trigger sched1ResourceOfferCall; EXPECT_CALL(sched1, registered(&driver1, _)) .Times(1); EXPECT_CALL(sched1, resourceOffers(&driver1, _)) .WillOnce(DoAll(SaveArg<1>(&offers), Trigger(&sched1ResourceOfferCall))) .WillRepeatedly(Return()); driver1.start(); WAIT_UNTIL(sched1ResourceOfferCall); EXPECT_NE(0, offers.size()); vector<TaskDescription> tasks; // Use nothing! driver1.launchTasks(offers[0].id(), tasks); driver1.stop(); driver1.join(); MockScheduler sched2; MesosSchedulerDriver driver2(&sched2, "", DEFAULT_EXECUTOR_INFO, master); trigger sched2ResourceOfferCall; EXPECT_CALL(sched2, registered(&driver2, _)) .Times(1); EXPECT_CALL(sched2, resourceOffers(&driver2, _)) .WillOnce(Trigger(&sched2ResourceOfferCall)) .WillRepeatedly(Return()); EXPECT_CALL(sched2, offerRescinded(&driver2, _)) .Times(AtMost(1)); driver2.start(); WAIT_UNTIL(sched2ResourceOfferCall); driver2.stop(); driver2.join(); local::shutdown(); }
TEST(HTTPTest, Endpoints) { Http http; // First hit '/body' (using explicit sockets and HTTP/1.0). Try<Socket> create = Socket::create(); ASSERT_SOME(create); Socket socket = create.get(); AWAIT_READY(socket.connect(http.process->self().address)); std::ostringstream out; out << "GET /" << http.process->self().id << "/body" << " HTTP/1.0\r\n" << "Connection: Keep-Alive\r\n" << "\r\n"; const string data = out.str(); EXPECT_CALL(*http.process, body(_)) .WillOnce(Return(http::OK())); AWAIT_READY(socket.send(data)); string response = "HTTP/1.1 200 OK"; AWAIT_EXPECT_EQ(response, socket.recv(response.size())); // Now hit '/pipe' (by using http::get). http::Pipe pipe; http::OK ok; ok.type = http::Response::PIPE; ok.reader = pipe.reader(); Future<Nothing> request; EXPECT_CALL(*http.process, pipe(_)) .WillOnce(DoAll(FutureSatisfy(&request), Return(ok))); Future<http::Response> future = http::get(http.process->self(), "pipe"); AWAIT_READY(request); // Write the response. http::Pipe::Writer writer = pipe.writer(); EXPECT_TRUE(writer.write("Hello World\n")); EXPECT_TRUE(writer.close()); AWAIT_READY(future); EXPECT_EQ(http::Status::OK, future->code); EXPECT_EQ(http::Status::string(http::Status::OK), future->status); EXPECT_SOME_EQ("chunked", future->headers.get("Transfer-Encoding")); EXPECT_EQ("Hello World\n", future->body); }
TEST(FaultToleranceTest, FrameworkReregister) { ASSERT_TRUE(GTEST_IS_THREADSAFE); MockFilter filter; process::filter(&filter); EXPECT_MESSAGE(filter, _, _, _) .WillRepeatedly(Return(false)); PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false); MockScheduler sched; MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master); trigger schedRegisteredCall, schedReregisteredCall; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(Trigger(&schedRegisteredCall)); EXPECT_CALL(sched, reregistered(&driver, _)) .WillOnce(Trigger(&schedReregisteredCall)); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillRepeatedly(Return()); EXPECT_CALL(sched, offerRescinded(&driver, _)) .Times(AtMost(1)); process::Message message; EXPECT_MESSAGE(filter, Eq(FrameworkRegisteredMessage().GetTypeName()), _, _) .WillOnce(DoAll(SaveArgField<0>(&process::MessageEvent::message, &message), Return(false))); driver.start(); WAIT_UNTIL(schedRegisteredCall); // Ensures registered message is received. // Simulate a spurious newMasterDetected event (e.g., due to ZooKeeper // expiration) at the scheduler. NewMasterDetectedMessage newMasterDetectedMsg; newMasterDetectedMsg.set_pid(master); process::post(message.to, newMasterDetectedMsg); WAIT_UNTIL(schedReregisteredCall); driver.stop(); driver.join(); local::shutdown(); process::filter(NULL); }
TEST(HTTPConnectionTest, ClosingResponse) { Http http; http::URL url = http::URL( "http", http.process->self().address.ip, http.process->self().address.port, http.process->self().id + "/get"); Future<http::Connection> connect = http::connect(url); AWAIT_READY(connect); http::Connection connection = connect.get(); // Issue two pipelined requests; the server will respond // with a 'Connection: close' for the first response, which // will trigger a disconnection and break the pipeline. This // means that the second request arrives at the server but // the response cannot be received due to the disconnection. Promise<http::Response> promise1; Future<Nothing> get2; EXPECT_CALL(*http.process, get(_)) .WillOnce(Return(promise1.future())) .WillOnce(DoAll(FutureSatisfy(&get2), Return(http::OK()))); http::Request request1, request2; request1.method = "GET"; request2.method = "GET"; request1.url = url; request2.url = url; request1.keepAlive = true; request2.keepAlive = true; Future<http::Response> response1 = connection.send(request1); Future<http::Response> response2 = connection.send(request2); http::Response close = http::OK("body"); close.headers["Connection"] = "close"; // Wait for both requests to arrive, then issue the closing response. AWAIT_READY(get2); promise1.set(close); // The second response will fail because of 'Connection: close'. AWAIT_READY(response1); AWAIT_FAILED(response2); AWAIT_READY(connection.disconnected()); }
TEST(FaultToleranceTest, FrameworkReliableRegistration) { ASSERT_TRUE(GTEST_IS_THREADSAFE); Clock::pause(); MockFilter filter; process::filter(&filter); EXPECT_MESSAGE(filter, _, _, _) .WillRepeatedly(Return(false)); PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false); MockScheduler sched; MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master); trigger schedRegisteredCall; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(Trigger(&schedRegisteredCall)); EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillRepeatedly(Return()); EXPECT_CALL(sched, offerRescinded(&driver, _)) .Times(AtMost(1)); trigger frameworkRegisteredMsg; // Drop the first framework registered message, allow subsequent messages. EXPECT_MESSAGE(filter, Eq(FrameworkRegisteredMessage().GetTypeName()), _, _) .WillOnce(DoAll(Trigger(&frameworkRegisteredMsg), Return(true))) .WillRepeatedly(Return(false)); driver.start(); WAIT_UNTIL(frameworkRegisteredMsg); Clock::advance(1.0); // TODO(benh): Pull out constant from SchedulerProcess. WAIT_UNTIL(schedRegisteredCall); // Ensures registered message is received. driver.stop(); driver.join(); local::shutdown(); process::filter(NULL); Clock::resume(); }
// Like the 'remote' test but uses http::connect. TEST(ProcessTest, Http1) { ASSERT_TRUE(GTEST_IS_THREADSAFE); RemoteProcess process; spawn(process); http::URL url = http::URL( "http", process.self().address.ip, process.self().address.port, process.self().id + "/handler"); Future<http::Connection> connect = http::connect(url); AWAIT_READY(connect); http::Connection connection = connect.get(); Future<UPID> pid; Future<string> body; EXPECT_CALL(process, handler(_, _)) .WillOnce(DoAll(FutureArg<0>(&pid), FutureArg<1>(&body))); http::Request request; request.method = "POST"; request.url = url; request.headers["User-Agent"] = "libprocess/"; request.body = "hello world"; // Send the libprocess request. Note that we will not // receive a 202 due to the use of the `User-Agent` // header, therefore we need to explicitly disconnect! Future<http::Response> response = connection.send(request); AWAIT_READY(body); ASSERT_EQ("hello world", body.get()); AWAIT_READY(pid); ASSERT_EQ(UPID(), pid.get()); EXPECT_TRUE(response.isPending()); AWAIT_READY(connection.disconnect()); terminate(process); wait(process); }
TEST_F(LibNiceConnectionTest, getSelectedPair_Calls_Libnice_And_Returns_Pair) { const std::string kArbitraryRemoteIp = "192.168.1.2"; const int kArbitraryRemotePort = 4242; const std::string kArbitraryLocalIp = "192.168.1.1"; const int kArbitraryLocalPort = 2222; const int kArbitraryPriority = 1; const std::string kArbitraryLocalUsername = "******"; const std::string kArbitraryLocalPassword = "******"; const std::string kArbitraryRemoteUsername = "******"; const std::string kArbitraryRemotePassword = "******"; NiceCandidate* local_candidate = nice_candidate_new(NICE_CANDIDATE_TYPE_HOST); local_candidate->username = strdup(kArbitraryLocalUsername.c_str()); local_candidate->password = strdup(kArbitraryLocalPassword.c_str()); local_candidate->stream_id = (guint) 1; local_candidate->component_id = 1; local_candidate->priority = kArbitraryPriority; local_candidate->transport = NICE_CANDIDATE_TRANSPORT_UDP; nice_address_set_from_string(&local_candidate->addr, kArbitraryLocalIp.c_str()); nice_address_set_from_string(&local_candidate->base_addr, kArbitraryLocalIp.c_str()); nice_address_set_port(&local_candidate->addr, kArbitraryLocalPort); nice_address_set_port(&local_candidate->base_addr, kArbitraryLocalPort); NiceCandidate* remote_candidate = nice_candidate_new(NICE_CANDIDATE_TYPE_HOST); remote_candidate->username = strdup(kArbitraryRemoteUsername.c_str()); remote_candidate->password = strdup(kArbitraryRemotePassword.c_str()); remote_candidate->stream_id = (guint) 1; remote_candidate->component_id = 1; remote_candidate->priority = kArbitraryPriority; remote_candidate->transport = NICE_CANDIDATE_TRANSPORT_UDP; nice_address_set_from_string(&remote_candidate->addr, kArbitraryRemoteIp.c_str()); nice_address_set_port(&remote_candidate->addr, kArbitraryRemotePort); nice_address_set_from_string(&remote_candidate->base_addr, kArbitraryRemoteIp.c_str()); nice_address_set_port(&remote_candidate->base_addr, kArbitraryRemotePort); EXPECT_CALL(*libnice, NiceAgentGetSelectedPair(_, _, _, _, _)).Times(1).WillOnce( DoAll(SetArgPointee<3>(local_candidate), SetArgPointee<4>(remote_candidate), Return(true))); erizo::CandidatePair candidate_pair = nice_connection->getSelectedPair(); EXPECT_EQ(candidate_pair.erizoCandidateIp, kArbitraryLocalIp); EXPECT_EQ(candidate_pair.erizoCandidatePort, kArbitraryLocalPort); EXPECT_EQ(candidate_pair.clientCandidateIp, kArbitraryRemoteIp); EXPECT_EQ(candidate_pair.clientCandidatePort, kArbitraryRemotePort); }
// Tests that an agent endpoint handler forms // correct queries against the authorizer. TEST_P(SlaveEndpointTest, AuthorizedRequest) { const string endpoint = GetParam(); StandaloneMasterDetector detector; MockAuthorizer mockAuthorizer; Future<Nothing> recover = FUTURE_DISPATCH(_, &Slave::__recover); Try<Owned<cluster::Slave>> agent = StartSlave(&detector, &mockAuthorizer); ASSERT_SOME(agent); AWAIT_READY(recover); // Ensure that the slave has finished recovery. Clock::pause(); Clock::settle(); Clock::resume(); Future<authorization::Request> request; EXPECT_CALL(mockAuthorizer, authorized(_)) .WillOnce(DoAll(FutureArg<0>(&request), Return(true))); Future<Response> response = http::get( agent.get()->pid, endpoint, None(), createBasicAuthHeaders(DEFAULT_CREDENTIAL)); AWAIT_READY(request); const string principal = DEFAULT_CREDENTIAL.principal(); EXPECT_EQ(principal, request->subject().value()); // TODO(bbannier): Once agent endpoint handlers use more than just // `GET_ENDPOINT_WITH_PATH` we should factor out the request method // and expected authorization action and parameterize // `SlaveEndpointTest` on that as well in addition to the endpoint. EXPECT_EQ(authorization::GET_ENDPOINT_WITH_PATH, request->action()); EXPECT_EQ("/" + endpoint, request->object().value()); AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response); }
// This test ensures that a framework that is removed while // authorization for registration is in progress is properly handled. TEST_F(MasterAuthorizationTest, FrameworkRemovedBeforeRegistration) { MockAuthorizer authorizer; Try<PID<Master> > master = StartMaster(&authorizer); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); // Return a pending future from authorizer. Future<Nothing> future; Promise<bool> promise; EXPECT_CALL(authorizer, authorize(An<const mesos::ACL::ReceiveOffers&>())) .WillOnce(DoAll(FutureSatisfy(&future), Return(promise.future()))); driver.start(); // Wait until authorization is in progress. AWAIT_READY(future); // Stop the framework. // At this point the framework is disconnected but the master does // not take any action because the framework is not in its map yet. driver.stop(); driver.join(); // Settle the clock here to ensure master handles the framework // 'exited' event. Clock::pause(); Clock::settle(); Clock::resume(); Future<Nothing> frameworkRemoved = FUTURE_DISPATCH(_, &AllocatorProcess::frameworkRemoved); // Now complete authorization. promise.set(true); // When the master tries to link to a non-existent framework PID // it should realize the framework is gone and remove it. AWAIT_READY(frameworkRemoved); Shutdown(); }
TEST(MasterTest, ResourceOfferWithMultipleSlaves) { ASSERT_TRUE(GTEST_IS_THREADSAFE); PID<Master> master = local::launch(10, 2, 1 * Gigabyte, false, false); MockScheduler sched; MesosSchedulerDriver driver(&sched, master); vector<SlaveOffer> offers; trigger resourceOfferCall; EXPECT_CALL(sched, getFrameworkName(&driver)) .WillOnce(Return("")); EXPECT_CALL(sched, getExecutorInfo(&driver)) .WillOnce(Return(DEFAULT_EXECUTOR_INFO)); EXPECT_CALL(sched, registered(&driver, _)) .Times(1); EXPECT_CALL(sched, resourceOffer(&driver, _, _)) .WillOnce(DoAll(SaveArg<2>(&offers), Trigger(&resourceOfferCall))) .WillRepeatedly(Return()); EXPECT_CALL(sched, offerRescinded(&driver, _)) .Times(AtMost(1)); driver.start(); WAIT_UNTIL(resourceOfferCall); EXPECT_NE(0, offers.size()); EXPECT_GE(10, offers.size()); Resources resources(offers[0].resources()); EXPECT_EQ(2, resources.get("cpus", Resource::Scalar()).value()); EXPECT_EQ(1024, resources.get("mem", Resource::Scalar()).value()); driver.stop(); driver.join(); local::shutdown(); }
void USBHost::DFUDownload(DownloadOutcome outcome, uint16_t block_index, const uint8_t *data, uint16_t size) { Mock::VerifyAndClearExpectations(m_usb_mock); switch (outcome) { case DOWNLOAD_OUTCOME_STALL: EXPECT_CALL(*m_usb_mock, ControlStatus(m_usb_handle, USB_DEVICE_CONTROL_STATUS_ERROR)) .WillOnce(Return(USB_DEVICE_CONTROL_TRANSFER_RESULT_SUCCESS)); break; case DOWNLOAD_OUTCOME_OK: EXPECT_CALL(*m_usb_mock, ControlStatus(m_usb_handle, USB_DEVICE_CONTROL_STATUS_OK)) .WillOnce(Return(USB_DEVICE_CONTROL_TRANSFER_RESULT_SUCCESS)); break; case DOWNLOAD_OUTCOME_RECEIVE: EXPECT_CALL(*m_usb_mock, ControlReceive(m_usb_handle, _, size)) .WillOnce(DoAll( WithArgs<1, 2>(CopyDataFrom(data, size)), Return(USB_DEVICE_CONTROL_TRANSFER_RESULT_SUCCESS))); EXPECT_CALL(*m_usb_mock, ControlStatus(m_usb_handle, USB_DEVICE_CONTROL_STATUS_OK)) .WillOnce(Return(USB_DEVICE_CONTROL_TRANSFER_RESULT_SUCCESS)); break; default: {} } USB_SETUP_PACKET packet; packet.bmRequestType = 0x21; packet.bRequest = DFU_DNLOAD; packet.wValue = block_index; packet.wIndex = INTERFACE; packet.wLength = size; SetupRequest(&packet, sizeof(packet)); if (outcome == DOWNLOAD_OUTCOME_RECEIVE) { m_event_handler(USB_DEVICE_EVENT_CONTROL_TRANSFER_DATA_RECEIVED, nullptr, 0); } Mock::VerifyAndClearExpectations(m_usb_mock); }
void USBHost::GetDFUStatus(DFUState *state, DFUStatus *status) { Mock::VerifyAndClearExpectations(m_usb_mock); uint8_t status_response[GET_STATUS_RESPONSE_SIZE]; EXPECT_CALL(*m_usb_mock, ControlSend(m_usb_handle, _, GET_STATUS_RESPONSE_SIZE)) .WillOnce(DoAll( WithArgs<1, 2>(CopyDataTo(status_response, GET_STATUS_RESPONSE_SIZE)), Return(USB_DEVICE_CONTROL_TRANSFER_RESULT_SUCCESS))); USB_SETUP_PACKET packet; packet.bmRequestType = 0xa1; packet.bRequest = DFU_GETSTATUS; packet.wValue = 0; packet.wIndex = INTERFACE; packet.wLength = GET_STATUS_RESPONSE_SIZE; SetupRequest(&packet, sizeof(packet)); Mock::VerifyAndClearExpectations(m_usb_mock); *state = static_cast<DFUState>(status_response[4]); *status = static_cast<DFUStatus>(status_response[0]); }
// Test that we can run the mesos-executor and specify an "override" // command to use via the --override argument. TEST_F(SlaveTest, MesosExecutorWithOverride) { Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); TestContainerizer containerizer; Try<PID<Slave> > slave = StartSlave(&containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); // Launch a task with the command executor. TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); CommandInfo command; command.set_value("sleep 10"); task.mutable_command()->MergeFrom(command); vector<TaskInfo> tasks; tasks.push_back(task); // Expect the launch and just assume it was sucessful since we'll be // launching the executor ourselves manually below. Future<Nothing> launch; EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _)) .WillOnce(DoAll(FutureSatisfy(&launch), Return(true))); // Expect wait after launch is called but don't return anything // until after we've finished everything below. Future<Nothing> wait; process::Promise<containerizer::Termination> promise; EXPECT_CALL(containerizer, wait(_)) .WillOnce(DoAll(FutureSatisfy(&wait), Return(promise.future()))); driver.launchTasks(offers.get()[0].id(), tasks); // Once we get the launch the mesos-executor with --override. AWAIT_READY(launch); // Set up fake environment for executor. map<string, string> environment; environment["MESOS_SLAVE_PID"] = stringify(slave.get()); environment["MESOS_SLAVE_ID"] = stringify(offers.get()[0].slave_id()); environment["MESOS_FRAMEWORK_ID"] = stringify(offers.get()[0].framework_id()); environment["MESOS_EXECUTOR_ID"] = stringify(task.task_id()); environment["MESOS_DIRECTORY"] = ""; // Create temporary file to store validation string. If command is // succesfully replaced, this file will end up containing the string // 'Hello World\n'. Otherwise, the original task command i.e. // 'sleep' will be called and the test will fail. Try<std::string> file = os::mktemp(); ASSERT_SOME(file); string executorCommand = path::join(tests::flags.build_dir, "src", "mesos-executor") + " --override -- /bin/sh -c 'echo hello world >" + file.get() + "'"; // Expect two status updates, one for once the mesos-executor says // the task is running and one for after our overridden command // above finishes. Future<TaskStatus> status1, status2; EXPECT_CALL(sched, statusUpdate(_, _)) .WillOnce(FutureArg<1>(&status1)) .WillOnce(FutureArg<1>(&status2)); Try<process::Subprocess> executor = process::subprocess( executorCommand, process::Subprocess::PIPE(), process::Subprocess::PIPE(), process::Subprocess::PIPE(), environment); ASSERT_SOME(executor); // Scheduler should receive the TASK_RUNNING update. AWAIT_READY(status1); ASSERT_EQ(TASK_RUNNING, status1.get().state()); AWAIT_READY(status2); ASSERT_EQ(TASK_FINISHED, status2.get().state()); AWAIT_READY(wait); containerizer::Termination termination; termination.set_killed(false); termination.set_message("Killed executor"); termination.set_status(0); promise.set(termination); driver.stop(); driver.join(); AWAIT_READY(executor.get().status()); // Verify file contents. Try<std::string> validate = os::read(file.get()); ASSERT_SOME(validate); EXPECT_EQ(validate.get(), "hello world\n"); os::rm(file.get()); Shutdown(); }
// This test ensures that a killTask() can happen between runTask() // and _runTask() and then gets "handled properly". This means that // the task never gets started, but also does not get lost. The end // result is status TASK_KILLED. Essentially, killing the task is // realized while preparing to start it. See MESOS-947. // Temporarily disabled due to MESOS-1945. TEST_F(SlaveTest, DISABLED_KillTaskBetweenRunTaskParts) { Try<PID<Master> > master = StartMaster(); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); StandaloneMasterDetector detector(master.get()); MockSlave slave(CreateSlaveFlags(), &detector, &containerizer); process::spawn(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id()); task.mutable_resources()->MergeFrom(offers.get()[0].resources()); task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO); vector<TaskInfo> tasks; tasks.push_back(task); EXPECT_CALL(exec, registered(_, _, _, _)) .Times(0); EXPECT_CALL(exec, launchTask(_, _)) .Times(0); EXPECT_CALL(exec, shutdown(_)) .Times(0); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillRepeatedly(FutureArg<1>(&status)); EXPECT_CALL(slave, runTask(_, _, _, _, _)) .WillOnce(Invoke(&slave, &MockSlave::unmocked_runTask)); // Saved arguments from Slave::_runTask(). Future<bool> future; FrameworkInfo frameworkInfo; FrameworkID frameworkId; // Skip what Slave::_runTask() normally does, save its arguments for // later, tie reaching the critical moment when to kill the task to // a future. Future<Nothing> _runTask; EXPECT_CALL(slave, _runTask(_, _, _, _, _)) .WillOnce(DoAll(FutureSatisfy(&_runTask), SaveArg<0>(&future), SaveArg<1>(&frameworkInfo), SaveArg<2>(&frameworkId))); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY(_runTask); Future<Nothing> killTask; EXPECT_CALL(slave, killTask(_, _, _)) .WillOnce(DoAll(Invoke(&slave, &MockSlave::unmocked_killTask), FutureSatisfy(&killTask))); driver.killTask(task.task_id()); // Since this is the only task ever for this framework, the // framework should get removed in Slave::_runTask(). // Thus we can observe that this happens before Shutdown(). Future<Nothing> removeFramework; EXPECT_CALL(slave, removeFramework(_)) .WillOnce(DoAll(Invoke(&slave, &MockSlave::unmocked_removeFramework), FutureSatisfy(&removeFramework))); AWAIT_READY(killTask); slave.unmocked__runTask( future, frameworkInfo, frameworkId, master.get(), task); AWAIT_READY(removeFramework); AWAIT_READY(status); EXPECT_EQ(TASK_KILLED, status.get().state()); driver.stop(); driver.join(); process::terminate(slave); process::wait(slave); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }
// Test that the prepare launch docker hook execute before launch // a docker container. Test hook create a file "foo" in the sandbox // directory. When the docker container launched, the sandbox directory // is mounted to the docker container. We validate the hook by verifying // the "foo" file exists in the docker container or not. TEST_F(HookTest, ROOT_DOCKER_VerifySlavePreLaunchDockerHook) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockDocker* mockDocker = new MockDocker(tests::flags.docker, tests::flags.docker_socket); Shared<Docker> docker(mockDocker); slave::Flags flags = CreateSlaveFlags(); Fetcher fetcher; Try<ContainerLogger*> logger = ContainerLogger::create(flags.container_logger); ASSERT_SOME(logger); MockDockerContainerizer containerizer( flags, &fetcher, Owned<ContainerLogger>(logger.get()), docker); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), &containerizer, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); Future<FrameworkID> frameworkId; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureArg<1>(&frameworkId)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(frameworkId); AWAIT_READY(offers); ASSERT_NE(0u, offers.get().size()); const Offer& offer = offers.get()[0]; SlaveID slaveId = offer.slave_id(); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offer.slave_id()); task.mutable_resources()->CopyFrom(offer.resources()); CommandInfo command; command.set_value("test -f " + path::join(flags.sandbox_directory, "foo")); ContainerInfo containerInfo; containerInfo.set_type(ContainerInfo::DOCKER); // TODO(tnachen): Use local image to test if possible. ContainerInfo::DockerInfo dockerInfo; dockerInfo.set_image("alpine"); containerInfo.mutable_docker()->CopyFrom(dockerInfo); task.mutable_command()->CopyFrom(command); task.mutable_container()->CopyFrom(containerInfo); vector<TaskInfo> tasks; tasks.push_back(task); Future<ContainerID> containerId; EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _, _)) .WillOnce(DoAll(FutureArg<0>(&containerId), Invoke(&containerizer, &MockDockerContainerizer::_launch))); Future<TaskStatus> statusRunning; Future<TaskStatus> statusFinished; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&statusRunning)) .WillOnce(FutureArg<1>(&statusFinished)) .WillRepeatedly(DoDefault()); driver.launchTasks(offers.get()[0].id(), tasks); AWAIT_READY_FOR(containerId, Seconds(60)); AWAIT_READY_FOR(statusRunning, Seconds(60)); EXPECT_EQ(TASK_RUNNING, statusRunning.get().state()); AWAIT_READY_FOR(statusFinished, Seconds(60)); EXPECT_EQ(TASK_FINISHED, statusFinished.get().state()); Future<containerizer::Termination> termination = containerizer.wait(containerId.get()); driver.stop(); driver.join(); AWAIT_READY(termination); Future<list<Docker::Container>> containers = docker.get()->ps(true, slave::DOCKER_NAME_PREFIX); AWAIT_READY(containers); // Cleanup all mesos launched containers. foreach (const Docker::Container& container, containers.get()) { AWAIT_READY_FOR(docker.get()->rm(container.id, true), Seconds(30)); } }
// This test verifies that the slave run task label decorator can add // and remove labels from a task during the launch sequence. A task // with two labels ("foo":"bar" and "bar":"baz") is launched and will // get modified by the slave hook to strip the "foo":"bar" pair and // add a new "baz":"qux" pair. TEST_F(HookTest, VerifySlaveRunTaskHook) { Try<Owned<cluster::Master>> master = StartMaster(); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); TestContainerizer containerizer(&exec); Owned<MasterDetector> detector = master.get()->createDetector(); Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), &containerizer); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)); Future<vector<Offer>> offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); ASSERT_EQ(1u, offers.get().size()); TaskInfo task; task.set_name(""); task.mutable_task_id()->set_value("1"); task.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); task.mutable_resources()->CopyFrom(offers.get()[0].resources()); task.mutable_executor()->CopyFrom(DEFAULT_EXECUTOR_INFO); // Add two labels: (1) will be removed by the hook to ensure that // runTaskHook can remove labels (2) will be preserved to ensure // that the framework can add labels to the task and have those be // available by the end of the launch task sequence when hooks are // used (to protect against hooks removing labels completely). Labels* labels = task.mutable_labels(); labels->add_labels()->CopyFrom(createLabel("foo", "bar")); labels->add_labels()->CopyFrom(createLabel("bar", "baz")); EXPECT_CALL(exec, registered(_, _, _, _)); Future<TaskInfo> taskInfo; EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(DoAll( FutureArg<1>(&taskInfo), SendStatusUpdateFromTask(TASK_RUNNING))); driver.launchTasks(offers.get()[0].id(), {task}); AWAIT_READY(taskInfo); // The master hook will hang an extra label off. const Labels& labels_ = taskInfo.get().labels(); ASSERT_EQ(3, labels_.labels_size()); // The slave run task hook will prepend a new "baz":"qux" label. EXPECT_EQ("baz", labels_.labels(0).key()); EXPECT_EQ("qux", labels_.labels(0).value()); // Master launch task hook will still hang off test label. EXPECT_EQ(testLabelKey, labels_.labels(1).key()); EXPECT_EQ(testLabelValue, labels_.labels(1).value()); // And lastly, we only expect the "foo":"bar" pair to be stripped by // the module. The last pair should be the original "bar":"baz" // pair set by the test. EXPECT_EQ("bar", labels_.labels(2).key()); EXPECT_EQ("baz", labels_.labels(2).value()); EXPECT_CALL(exec, shutdown(_)) .Times(AtMost(1)); driver.stop(); driver.join(); }
// Checks that in the event of a master failure and the election of a // new master, if a slave reregisters before a framework that has // resources on reregisters, all used and unused resources are // accounted for correctly. TYPED_TEST(AllocatorZooKeeperTest, SlaveReregistersFirst) { TypeParam allocator1; Try<PID<Master> > master = this->StartMaster(&allocator1); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); slave::Flags flags = this->CreateSlaveFlags(); flags.resources = Option<string>("cpus:2;mem:1024"); Try<PID<Slave> > slave = this->StartSlave(&exec, flags); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, stringify(this->url.get())); EXPECT_CALL(sched, registered(&driver, _, _)); // The framework should be offered all of the resources on the slave // since it is the only framework running. EXPECT_CALL(sched, resourceOffers(&driver, OfferEq(2, 1024))) .WillOnce(LaunchTasks(1, 1, 500, "*")) .WillRepeatedly(DeclineOffers()); EXPECT_CALL(exec, registered(_, _, _, _)); EXPECT_CALL(exec, launchTask(_, _)) .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); Future<TaskStatus> status; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status)); driver.start(); AWAIT_READY(status); EXPECT_EQ(TASK_RUNNING, status.get().state()); // Stop the failing master from telling the slave to shut down when // it is killed. Future<ShutdownMessage> shutdownMessage = DROP_PROTOBUF(ShutdownMessage(), _, _); // Stop the framework from reregistering with the new master until the // slave has reregistered. DROP_PROTOBUFS(ReregisterFrameworkMessage(), _, _); // Shutting down the masters will cause the scheduler to get // disconnected. EXPECT_CALL(sched, disconnected(_)); // Shutting down the masters will also cause the slave to shutdown // frameworks that are not checkpointing, thus causing the executor // to get shutdown. EXPECT_CALL(exec, shutdown(_)) .Times(AtMost(1)); this->ShutdownMasters(); AWAIT_READY(shutdownMessage); MockAllocatorProcess<TypeParam> allocator2; EXPECT_CALL(allocator2, initialize(_, _, _)); Try<PID<Master> > master2 = this->StartMaster(&allocator2); ASSERT_SOME(master2); Future<Nothing> slaveAdded; EXPECT_CALL(allocator2, slaveAdded(_, _, _)) .WillOnce(DoAll(InvokeSlaveAdded(&allocator2), FutureSatisfy(&slaveAdded))); EXPECT_CALL(sched, reregistered(&driver, _)); AWAIT_READY(slaveAdded); EXPECT_CALL(allocator2, frameworkAdded(_, _, _)); Future<vector<Offer> > resourceOffers2; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&resourceOffers2)); // We kill the filter so that ReregisterFrameworkMessages can get // to the master now that the framework has been added, ensuring // that the framework reregisters after the slave. process::filter(NULL); AWAIT_READY(resourceOffers2); // Since the task is still running on the slave, the framework // should only be offered the resources not being used by the task. EXPECT_THAT(resourceOffers2.get(), OfferEq(1, 524)); // Shut everything down. EXPECT_CALL(allocator2, resourcesRecovered(_, _, _)) .WillRepeatedly(DoDefault()); EXPECT_CALL(allocator2, frameworkDeactivated(_)) .Times(AtMost(1)); EXPECT_CALL(allocator2, frameworkRemoved(_)) .Times(AtMost(1)); driver.stop(); driver.join(); EXPECT_CALL(allocator2, slaveRemoved(_)) .Times(AtMost(1)); this->Shutdown(); }
// This test verifies that a reconciliation request that comes before // '_launchTasks()' is ignored. TEST_F(MasterAuthorizationTest, ReconcileTask) { MockAuthorizer authorizer; Try<PID<Master> > master = StartMaster(&authorizer); ASSERT_SOME(master); MockExecutor exec(DEFAULT_EXECUTOR_ID); Try<PID<Slave> > slave = StartSlave(&exec); ASSERT_SOME(slave); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); EXPECT_CALL(sched, registered(&driver, _, _)) .Times(1); Future<vector<Offer> > offers; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers)) .WillRepeatedly(Return()); // Ignore subsequent offers. driver.start(); AWAIT_READY(offers); EXPECT_NE(0u, offers.get().size()); TaskInfo task = createTask(offers.get()[0], "", DEFAULT_EXECUTOR_ID); vector<TaskInfo> tasks; tasks.push_back(task); // Return a pending future from authorizer. Future<Nothing> future; Promise<bool> promise; EXPECT_CALL(authorizer, authorize(An<const mesos::ACL::RunTasks&>())) .WillOnce(DoAll(FutureSatisfy(&future), Return(promise.future()))); driver.launchTasks(offers.get()[0].id(), tasks); // Wait until authorization is in progress. AWAIT_READY(future); // Scheduler shouldn't get an update from reconciliation. EXPECT_CALL(sched, statusUpdate(&driver, _)) .Times(0); Future<ReconcileTasksMessage> reconcileTasksMessage = FUTURE_PROTOBUF(ReconcileTasksMessage(), _, _); vector<TaskStatus> statuses; TaskStatus status; status.mutable_task_id()->CopyFrom(task.task_id()); status.mutable_slave_id()->CopyFrom(offers.get()[0].slave_id()); status.set_state(TASK_STAGING); statuses.push_back(status); driver.reconcileTasks(statuses); AWAIT_READY(reconcileTasksMessage); // Make sure the framework doesn't receive any update. Clock::pause(); Clock::settle(); // Now stop the framework. driver.stop(); driver.join(); Shutdown(); // Must shutdown before 'containerizer' gets deallocated. }
TEST(HTTPConnectionTest, Pipeline) { // We use two Processes here to ensure that libprocess performs // pipelining correctly when requests on a single connection // are going to different Processes. Http http1, http2; http::URL url1 = http::URL( "http", http1.process->self().address.ip, http1.process->self().address.port, http1.process->self().id + "/get"); http::URL url2 = http::URL( "http", http2.process->self().address.ip, http2.process->self().address.port, http2.process->self().id + "/get"); Future<http::Connection> connect = http::connect(url1); AWAIT_READY(connect); http::Connection connection = connect.get(); // Send three pipelined requests. Promise<http::Response> promise1, promise2, promise3; Future<http::Request> get1, get2, get3; EXPECT_CALL(*http1.process, get(_)) .WillOnce(DoAll(FutureArg<0>(&get1), Return(promise1.future()))) .WillOnce(DoAll(FutureArg<0>(&get3), Return(promise3.future()))); EXPECT_CALL(*http2.process, get(_)) .WillOnce(DoAll(FutureArg<0>(&get2), Return(promise2.future()))); http::Request request1, request2, request3; request1.method = "GET"; request2.method = "GET"; request3.method = "GET"; request1.url = url1; request2.url = url2; request3.url = url1; request1.body = "1"; request2.body = "2"; request3.body = "3"; request1.keepAlive = true; request2.keepAlive = true; request3.keepAlive = true; Future<http::Response> response1 = connection.send(request1); Future<http::Response> response2 = connection.send(request2, true); Future<http::Response> response3 = connection.send(request3); // Ensure the requests are all received before any // responses have been sent. AWAIT_READY(get1); AWAIT_READY(get2); AWAIT_READY(get3); EXPECT_EQ("1", get1->body); EXPECT_EQ("2", get2->body); EXPECT_EQ("3", get3->body); // Complete the responses in the opposite order, and ensure // that the pipelining in libprocess sends the responses in // the same order as the requests were received. promise3.set(http::OK("3")); promise2.set(http::OK("2")); EXPECT_TRUE(response1.isPending()); EXPECT_TRUE(response2.isPending()); EXPECT_TRUE(response3.isPending()); promise1.set(http::OK("1")); AWAIT_READY(response1); AWAIT_READY(response2); AWAIT_READY(response3); EXPECT_EQ("1", response1->body); ASSERT_SOME(response2->reader); http::Pipe::Reader reader = response2->reader.get(); AWAIT_EQ("2", reader.read()); AWAIT_EQ("", reader.read()); EXPECT_EQ("3", response3->body); // Disconnect. AWAIT_READY(connection.disconnect()); AWAIT_READY(connection.disconnected()); // After disconnection, sends should fail. AWAIT_FAILED(connection.send(request1)); }
TEST(HTTPConnectionTest, Serial) { Http http; http::URL url = http::URL( "http", http.process->self().address.ip, http.process->self().address.port, http.process->self().id + "/get"); Future<http::Connection> connect = http::connect(url); AWAIT_READY(connect); http::Connection connection = connect.get(); // First test a regular (non-streaming) request. Promise<http::Response> promise1; Future<http::Request> get1; EXPECT_CALL(*http.process, get(_)) .WillOnce(DoAll(FutureArg<0>(&get1), Return(promise1.future()))); http::Request request1; request1.method = "GET"; request1.url = url; request1.body = "1"; request1.keepAlive = true; Future<http::Response> response1 = connection.send(request1); AWAIT_READY(get1); EXPECT_EQ("1", get1->body); promise1.set(http::OK("1")); AWAIT_EXPECT_RESPONSE_BODY_EQ("1", response1); // Now test a streaming response. Promise<http::Response> promise2; Future<http::Request> get2; EXPECT_CALL(*http.process, get(_)) .WillOnce(DoAll(FutureArg<0>(&get2), Return(promise2.future()))); http::Request request2 = request1; request2.body = "2"; Future<http::Response> response2 = connection.send(request2, true); AWAIT_READY(get2); EXPECT_EQ("2", get2->body); promise2.set(http::OK("2")); AWAIT_READY(response2); ASSERT_SOME(response2->reader); http::Pipe::Reader reader = response2->reader.get(); AWAIT_EQ("2", reader.read()); AWAIT_EQ("", reader.read()); // Disconnect. AWAIT_READY(connection.disconnect()); AWAIT_READY(connection.disconnected()); // After disconnection, sends should fail. AWAIT_FAILED(connection.send(request1)); }
// This test verifies that two tasks each launched on a different // slave with same executor id but different executor info are // allowed even when the first task is pending due to authorization. TEST_F(MasterAuthorizationTest, PendingExecutorInfoDiffersOnDifferentSlaves) { MockAuthorizer authorizer; Try<PID<Master> > master = StartMaster(&authorizer); ASSERT_SOME(master); MockScheduler sched; MesosSchedulerDriver driver( &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL); Future<Nothing> registered; EXPECT_CALL(sched, registered(&driver, _, _)) .WillOnce(FutureSatisfy(®istered)); driver.start(); AWAIT_READY(registered); Future<vector<Offer> > offers1; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers1)); // Start the first slave. MockExecutor exec1(DEFAULT_EXECUTOR_ID); Try<PID<Slave> > slave1 = StartSlave(&exec1); ASSERT_SOME(slave1); AWAIT_READY(offers1); EXPECT_NE(0u, offers1.get().size()); // Launch the first task with the default executor id. ExecutorInfo executor1; executor1 = DEFAULT_EXECUTOR_INFO; executor1.mutable_command()->set_value("exit 1"); TaskInfo task1 = createTask( offers1.get()[0], executor1.command().value(), executor1.executor_id()); vector<TaskInfo> tasks1; tasks1.push_back(task1); // Return a pending future from authorizer. Future<Nothing> future; Promise<bool> promise; EXPECT_CALL(authorizer, authorize(An<const mesos::ACL::RunTasks&>())) .WillOnce(DoAll(FutureSatisfy(&future), Return(promise.future()))); driver.launchTasks(offers1.get()[0].id(), tasks1); // Wait until authorization is in progress. AWAIT_READY(future); Future<vector<Offer> > offers2; EXPECT_CALL(sched, resourceOffers(&driver, _)) .WillOnce(FutureArg<1>(&offers2)) .WillRepeatedly(Return()); // Ignore subsequent offers. // Now start the second slave. MockExecutor exec2(DEFAULT_EXECUTOR_ID); Try<PID<Slave> > slave2 = StartSlave(&exec2); ASSERT_SOME(slave2); AWAIT_READY(offers2); EXPECT_NE(0u, offers2.get().size()); // Now launch the second task with the same executor id but // a different executor command. ExecutorInfo executor2; executor2 = executor1; executor2.mutable_command()->set_value("exit 2"); TaskInfo task2 = createTask( offers2.get()[0], executor2.command().value(), executor2.executor_id()); vector<TaskInfo> tasks2; tasks2.push_back(task2); EXPECT_CALL(exec2, registered(_, _, _, _)) .Times(1); EXPECT_CALL(exec2, launchTask(_, _)) .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); Future<TaskStatus> status2; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status2)); EXPECT_CALL(authorizer, authorize(An<const mesos::ACL::RunTasks&>())) .WillOnce(Return(true)); driver.launchTasks(offers2.get()[0].id(), tasks2); AWAIT_READY(status2); ASSERT_EQ(TASK_RUNNING, status2.get().state()); EXPECT_CALL(exec1, registered(_, _, _, _)) .Times(1); EXPECT_CALL(exec1, launchTask(_, _)) .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING)); Future<TaskStatus> status1; EXPECT_CALL(sched, statusUpdate(&driver, _)) .WillOnce(FutureArg<1>(&status1)); // Complete authorization of 'task1'. promise.set(true); AWAIT_READY(status1); ASSERT_EQ(TASK_RUNNING, status1.get().state()); EXPECT_CALL(exec1, shutdown(_)) .Times(AtMost(1)); EXPECT_CALL(exec2, shutdown(_)) .Times(AtMost(1)); driver.stop(); driver.join(); Shutdown(); }
TEST(HTTP, Endpoints) { ASSERT_TRUE(GTEST_IS_THREADSAFE); HttpProcess process; spawn(process); // First hit '/body' (using explicit sockets and HTTP/1.0). int s = socket(AF_INET, SOCK_STREAM, IPPROTO_IP); ASSERT_LE(0, s); sockaddr_in addr; memset(&addr, 0, sizeof(addr)); addr.sin_family = PF_INET; addr.sin_port = htons(process.self().port); addr.sin_addr.s_addr = process.self().ip; ASSERT_EQ(0, connect(s, (sockaddr*) &addr, sizeof(addr))); std::ostringstream out; out << "GET /" << process.self().id << "/body" << " HTTP/1.0\r\n" << "Connection: Keep-Alive\r\n" << "\r\n"; const std::string& data = out.str(); EXPECT_CALL(process, body(_)) .WillOnce(Return(http::OK())); ASSERT_SOME(os::write(s, data)); std::string response = "HTTP/1.1 200 OK"; char temp[response.size()]; ASSERT_LT(0, ::read(s, temp, response.size())); ASSERT_EQ(response, std::string(temp, response.size())); ASSERT_EQ(0, close(s)); // Now hit '/pipe' (by using http::get). int pipes[2]; ASSERT_NE(-1, ::pipe(pipes)); http::OK ok; ok.type = http::Response::PIPE; ok.pipe = pipes[0]; Future<Nothing> pipe; EXPECT_CALL(process, pipe(_)) .WillOnce(DoAll(FutureSatisfy(&pipe), Return(ok))); Future<http::Response> future = http::get(process.self(), "pipe"); AWAIT_READY(pipe); ASSERT_SOME(os::write(pipes[1], "Hello World\n")); ASSERT_SOME(os::close(pipes[1])); AWAIT_READY(future); ASSERT_EQ(http::statuses[200], future.get().status); ASSERT_EQ("chunked", future.get().headers["Transfer-Encoding"]); ASSERT_EQ("Hello World\n", future.get().body); terminate(process); wait(process); }