Пример #1
0
// Test if multiple requests are pending in a queue, for security to establish,
// then we flow RequestContext correctly with each request.
void runRequestContextTest(bool failSecurity) {
  ScopedServerThread sst(getServer());
  TEventBase base;
  auto channel = getClientChannel(&base, *sst.getAddress(), failSecurity);
  TestServiceAsyncClient client(std::move(channel));
  Countdown c(2, [&base](){base.terminateLoopSoon();});

  // Send first request with a unique RequestContext. This would trigger
  // security. Rest of the request would queue behind it.
  folly::RequestContext::create();
  folly::RequestContext::get()->setContextData("first", nullptr);
  client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
    EXPECT_TRUE(folly::RequestContext::get()->hasContextData("first"));
    c.down();
  }, 10);

  // Send another request with a unique RequestContext. This request would
  // queue behind the first one inside HeaderClientChannel.
  folly::RequestContext::create();
  folly::RequestContext::get()->setContextData("second", nullptr);
  client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
    EXPECT_FALSE(folly::RequestContext::get()->hasContextData("first"));
    EXPECT_TRUE(folly::RequestContext::get()->hasContextData("second"));
    c.down();
  }, 10);

  // Now start looping the eventbase to guarantee that all the above requests
  // would always queue.
  base.loopForever();
}
Пример #2
0
TEST(ThriftServer, CallbackOrderingTest) {
  apache::thrift::TestThriftServerFactory<TestInterface> factory;
  auto server = factory.create();
  auto serverHandler = std::make_shared<TestServerEventHandler>();


  TProcessorBase::addProcessorEventHandlerFactory(serverHandler);
  server->setServerEventHandler(serverHandler);

  ScopedServerThread sst(server);
  TEventBase base;
  std::shared_ptr<TAsyncSocket> socket(
    TAsyncSocket::newSocket(&base, *sst.getAddress()));

  TestServiceAsyncClient client(
    std::unique_ptr<HeaderClientChannel,
                    apache::thrift::async::TDelayedDestruction::Destructor>(
                      new HeaderClientChannel(socket)));

  client.noResponse([](ClientReceiveState&& state){}, 10000);
  base.tryRunAfterDelay([&](){
    socket->closeNow();
  }, 1);
  base.tryRunAfterDelay([&](){
    base.terminateLoopSoon();
  }, 20);
  base.loopForever();
  serverHandler->check();
  TProcessorBase::removeProcessorEventHandlerFactory(serverHandler);
}
Пример #3
0
TEST(RequestContext, SimpleTest) {
  TEventBase base;

  EXPECT_FALSE(RequestContext::create());
  EXPECT_TRUE(RequestContext::create());
  EXPECT_TRUE(RequestContext::get() != nullptr);

  EXPECT_EQ(nullptr, RequestContext::get()->getContextData("test"));

  RequestContext::get()->setContextData(
    "test",
    std::unique_ptr<TestData>(new TestData(10)));
  base.runInEventBaseThread([&](){
      EXPECT_TRUE(RequestContext::get() != nullptr);
      auto data = dynamic_cast<TestData*>(
        RequestContext::get()->getContextData("test"))->data_;
      EXPECT_EQ(10, data);
      base.terminateLoopSoon();
    });
  auto th = std::thread([&](){
      base.loopForever();
  });
  th.join();
  EXPECT_TRUE(RequestContext::get() != nullptr);
  auto a = dynamic_cast<TestData*>(
    RequestContext::get()->getContextData("test"));
  auto data = a->data_;
  EXPECT_EQ(10, data);

  RequestContext::setContext(std::shared_ptr<RequestContext>());
  // There should always be a default context
  EXPECT_TRUE(nullptr != RequestContext::get());
}
Пример #4
0
TEST(ThriftServer, IdleTimeoutAfterTest) {
  ScopedServerThread sst(getServer());

  TEventBase base;

  std::shared_ptr<TAsyncSocket> socket(
    TAsyncSocket::newSocket(&base, *sst.getAddress()));

  auto client_channel = HeaderClientChannel::newChannel(socket);
  auto client_channelp = client_channel.get();
  CloseChecker checker;

  client_channel->setCloseCallback(&checker);
  TestServiceAsyncClient client(std::move(client_channel));
  std::string ret;
  client.sync_sendResponse(ret, 20);

  EXPECT_FALSE(checker.getClosed());

  base.tryRunAfterDelay([&base](){
      base.terminateLoopSoon();
    }, 200);
  base.loopForever();
  EXPECT_TRUE(checker.getClosed());
  client_channelp->setCloseCallback(nullptr);
}
Пример #5
0
void TEventServer::stop() {
  // TODO: We really need a memory fence or some locking here to ensure that
  // the compiler doesn't optimize out eventBase.  In practice, most users will
  // only call stop() when the server is actually serving, so this shouldn't be
  // much of an issue.
  TEventBase* eventBase = serveEventBase_;
  if (eventBase) {
    eventBase->terminateLoopSoon();
  }
}
Пример #6
0
void runTest(std::function<void(HeaderClientChannel* channel)> setup) {
  ScopedServerThread sst(getServer());
  TEventBase base;
  auto channel = getClientChannel(&base, *sst.getAddress());
  setup(channel.get());
  TestServiceAsyncClient client(std::move(channel));
  Countdown c(3, [&base](){base.terminateLoopSoon();});

  client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
    EXPECT_FALSE(state.isException());
    EXPECT_TRUE(state.isSecurityActive());
    std::string res;
    try {
      TestServiceAsyncClient::recv_sendResponse(res, state);
    } catch(const std::exception&) {
      EXPECT_TRUE(false);
    }
    EXPECT_EQ(res, "10");
    c.down();
  }, 10);


  // fail on time out
  base.tryRunAfterDelay([] {EXPECT_TRUE(false);}, 5000);

  base.tryRunAfterDelay([&client,&base,&c] {
    client.sendResponse([&base,&c](ClientReceiveState&& state) {
      EXPECT_FALSE(state.isException());
      EXPECT_TRUE(state.isSecurityActive());
      std::string res;
      try {
        TestServiceAsyncClient::recv_sendResponse(res, state);
      } catch(const std::exception&) {
        EXPECT_TRUE(false);
      }
      EXPECT_EQ(res, "10");
      c.down();
    }, 10);
    client.sendResponse([&base,&c](ClientReceiveState&& state) {
      EXPECT_FALSE(state.isException());
      EXPECT_TRUE(state.isSecurityActive());
      std::string res;
      try {
        TestServiceAsyncClient::recv_sendResponse(res, state);
      } catch(const std::exception&) {
        EXPECT_TRUE(false);
      }
      EXPECT_EQ(res, "10");
      c.down();
    }, 10);
  }, 1);

  base.loopForever();
}
Пример #7
0
 void async_tm_update(unique_ptr<HandlerCallback<int32_t>> callback,
                      int32_t currentIndex) override {
   auto callbackp = callback.release();
   EXPECT_EQ(currentIndex, expectIndex_);
   expectIndex_++;
   TEventBase *eb = callbackp->getEventBase();
   callbackp->resultInThread(currentIndex);
   if (expectIndex_ == lastIndex_) {
     success_ = true;
     eb->runInEventBaseThread([eb] { eb->terminateLoopSoon(); });
   }
 }
Пример #8
0
TEST(ThriftServer, IdleTimeoutTest) {

  TEventBase base;

  auto port = Server::get(getServer)->getAddress().getPort();
  std::shared_ptr<TAsyncSocket> socket(
    TAsyncSocket::newSocket(&base, "127.0.0.1", port));

  auto client_channel = HeaderClientChannel::newChannel(socket);
  CloseChecker checker;
  client_channel->setCloseCallback(&checker);
  base.runAfterDelay([&base](){
      base.terminateLoopSoon();
    }, 100);
  base.loopForever();
  EXPECT_TRUE(checker.getClosed());
}
Пример #9
0
TEST(ThriftServer, IdleTimeoutTest) {
  ScopedServerThread sst(getServer());

  TEventBase base;
  std::shared_ptr<TAsyncSocket> socket(
    TAsyncSocket::newSocket(&base, *sst.getAddress()));

  auto client_channel = HeaderClientChannel::newChannel(socket);
  CloseChecker checker;
  client_channel->setCloseCallback(&checker);
  base.tryRunAfterDelay([&base](){
      base.terminateLoopSoon();
    }, 100);
  base.loopForever();
  EXPECT_TRUE(checker.getClosed());
  client_channel->setCloseCallback(nullptr);
}
Пример #10
0
TEST(ThriftServer, ShutdownSocketSetTest) {
  apache::thrift::TestThriftServerFactory<TestInterface> factory;
  auto server = factory.create();
  ScopedServerThread sst(server);
  TEventBase base;
  ReadCallbackTest cb;

  std::shared_ptr<TAsyncSocket> socket2(
    TAsyncSocket::newSocket(&base, *sst.getAddress()));
  socket2->setReadCallback(&cb);

  base.tryRunAfterDelay([&](){
      server->immediateShutdown(true);
    }, 10);
  base.tryRunAfterDelay([&](){
      base.terminateLoopSoon();
    }, 30);
  base.loopForever();
  EXPECT_EQ(cb.eof, true);
}
Пример #11
0
void ThriftServer::stop() {
  TEventBase* eventBase = serveEventBase_;
  if (eventBase) {
    eventBase->terminateLoopSoon();
  }
}
Пример #12
0
void QueueTest::maxReadAtOnce() {
  // Add 100 messages to the queue
  for (int n = 0; n < 100; ++n) {
    queue.putMessage(n);
  }

  TEventBase eventBase;

  // Record how many messages were processed each loop iteration.
  uint32_t messagesThisLoop = 0;
  std::vector<uint32_t> messagesPerLoop;
  std::function<void()> loopFinished = [&] {
    // Record the current number of messages read this loop
    messagesPerLoop.push_back(messagesThisLoop);
    // Reset messagesThisLoop to 0 for the next loop
    messagesThisLoop = 0;

    // To prevent use-after-free bugs when eventBase destructs,
    // prevent calling runInLoop any more after the test is finished.
    // 55 == number of times loop should run.
    if (messagesPerLoop.size() != 55) {
      // Reschedule ourself to run at the end of the next loop
      eventBase.runInLoop(loopFinished);
    }
  };
  // Schedule the first call to loopFinished
  eventBase.runInLoop(loopFinished);

  QueueConsumer consumer;
  // Read the first 50 messages 10 at a time.
  consumer.setMaxReadAtOnce(10);
  consumer.fn = [&](int value) {
    ++messagesThisLoop;
    // After 50 messages, drop to reading only 1 message at a time.
    if (value == 50) {
      consumer.setMaxReadAtOnce(1);
    }
    // Terminate the loop when we reach the end of the messages.
    if (value == 99) {
      eventBase.terminateLoopSoon();
    }
  };
  consumer.startConsuming(&eventBase, &queue);

  // Run the event loop until the consumer terminates it
  eventBase.loop();

  // The consumer should have read all 100 messages in order
  BOOST_CHECK_EQUAL(consumer.messages.size(), 100);
  for (int n = 0; n < 100; ++n) {
    BOOST_CHECK_EQUAL(consumer.messages.at(n), n);
  }

  // Currently TEventBase happens to still run the loop callbacks even after
  // terminateLoopSoon() is called.  However, we don't really want to depend on
  // this behavior.  In case this ever changes in the future, add
  // messagesThisLoop to messagesPerLoop in loop callback isn't invoked for the
  // last loop iteration.
  if (messagesThisLoop > 0) {
    messagesPerLoop.push_back(messagesThisLoop);
    messagesThisLoop = 0;
  }

  // For the first 5 loops it should have read 10 messages each time.
  // After that it should have read 1 messages per loop for the next 50 loops.
  BOOST_CHECK_EQUAL(messagesPerLoop.size(), 55);
  for (int n = 0; n < 5; ++n) {
    BOOST_CHECK_EQUAL(messagesPerLoop.at(n), 10);
  }
  for (int n = 5; n < 55; ++n) {
    BOOST_CHECK_EQUAL(messagesPerLoop.at(n), 1);
  }
}