예제 #1
0
void TNonblockingServer::registerEvents(event_base* user_event_base) {
  userEventBase_ = user_event_base;

  // init listen socket
  if (serverSocket_ == THRIFT_INVALID_SOCKET)
    createAndListenOnSocket();

  // set up the IO threads
  assert(ioThreads_.empty());
  if (!numIOThreads_) {
    numIOThreads_ = DEFAULT_IO_THREADS;
  }

  for (uint32_t id = 0; id < numIOThreads_; ++id) {
    // the first IO thread also does the listening on server socket
    THRIFT_SOCKET listenFd = (id == 0 ? serverSocket_ : THRIFT_INVALID_SOCKET);

    shared_ptr<TNonblockingIOThread> thread(
      new TNonblockingIOThread(this, id, listenFd, useHighPriorityIOThreads_));
    ioThreads_.push_back(thread);
  }

  // Notify handler of the preServe event
  if (eventHandler_) {
    eventHandler_->preServe();
  }

  // Start all of our helper IO threads. Note that the threads run forever,
  // only terminating if stop() is called.
  assert(ioThreads_.size() == numIOThreads_);
  assert(ioThreads_.size() > 0);

  GlobalOutput.printf("TNonblockingServer: Serving on port %d, %d io threads.",
               port_, ioThreads_.size());

  // Launch all the secondary IO threads in separate threads
  if (ioThreads_.size() > 1) {
    ioThreadFactory_.reset(new PlatformThreadFactory(
#if !defined(USE_BOOST_THREAD) && !defined(USE_STD_THREAD)
      PlatformThreadFactory::OTHER,  // scheduler
      PlatformThreadFactory::NORMAL, // priority
      1,                          // stack size (MB)
#endif
      false                       // detached
    ));

    assert(ioThreadFactory_.get());

    // intentionally starting at thread 1, not 0
    for (uint32_t i = 1; i < ioThreads_.size(); ++i) {
      shared_ptr<Thread> thread = ioThreadFactory_->newThread(ioThreads_[i]);
      ioThreads_[i]->setThread(thread);
      thread->start();
    }
  }

  // Register the events for the primary (listener) IO thread
  ioThreads_[0]->registerEvents();
}
/**
 * Main workhorse function, starts up the server listening on a port and
 * loops over the libevent handler.
 */
void TNonblockingServer::serve() {
  // init listen socket
  createAndListenOnSocket();

  // set up the IO threads
  assert(ioThreads_.empty());
  if (!numIOThreads_) {
    numIOThreads_ = DEFAULT_IO_THREADS;
  }

  for (uint32_t id = 0; id < numIOThreads_; ++id) {
    // the first IO thread also does the listening on server socket
    int listenFd = (id == 0 ? serverSocket_ : -1);

    shared_ptr<TNonblockingIOThread> thread(
      new TNonblockingIOThread(this, id, listenFd, useHighPriorityIOThreads_));
    ioThreads_.push_back(thread);
  }

  // Notify handler of the preServe event
  if (eventHandler_ != NULL) {
    eventHandler_->preServe();
  }

  // Start all of our helper IO threads. Note that the threads run forever,
  // only terminating if stop() is called.
  assert(ioThreads_.size() == numIOThreads_);
  assert(ioThreads_.size() > 0);

  GlobalOutput.printf("TNonblockingServer: Serving on port %d, %d io threads.",
               port_, ioThreads_.size());

  // Launch all the secondary IO threads in separate threads
  if (ioThreads_.size() > 1) {
    ioThreadFactory_.reset(new PlatformThreadFactory(
#ifndef USE_BOOST_THREAD
      PlatformThreadFactory::OTHER,  // scheduler
      PlatformThreadFactory::NORMAL, // priority
      1,                          // stack size (MB)
#endif
      false                       // detached
    ));

    assert(ioThreadFactory_.get());

    // intentionally starting at thread 1, not 0
    for (uint32_t i = 1; i < ioThreads_.size(); ++i) {
      shared_ptr<Thread> thread = ioThreadFactory_->newThread(ioThreads_[i]);
      ioThreads_[i]->setThread(thread);
      thread->start();
    }
  }

  // Run the primary (listener) IO thread loop in our main thread; this will
  // only return when the server is shutting down.
  ioThreads_[0]->run();

  // Ensure all threads are finished before exiting serve()
  for (uint32_t i = 0; i < ioThreads_.size(); ++i) {
    ioThreads_[i]->join();
    GlobalOutput.printf("TNonblocking: join done for IO thread #%d", i);
  }
}