コード例 #1
0
ファイル: AsyncSocketTest.cpp プロジェクト: GYGit/folly
TEST(AsyncSocketTest, REUSEPORT) {
  EventBase base;
  auto serverSocket = AsyncServerSocket::newSocket(&base);
  serverSocket->bind(0);
  serverSocket->listen(0);
  serverSocket->startAccepting();

  try {
    serverSocket->setReusePortEnabled(true);
  } catch(...) {
    LOG(INFO) << "Reuse port probably not supported";
    return;
  }

  SocketAddress address;
  serverSocket->getAddress(&address);
  int port = address.getPort();

  auto serverSocket2 = AsyncServerSocket::newSocket(&base);
  serverSocket2->setReusePortEnabled(true);
  serverSocket2->bind(port);
  serverSocket2->listen(0);
  serverSocket2->startAccepting();

}
コード例 #2
0
ファイル: Acceptor.cpp プロジェクト: timorl/neurohex
	std::shared_ptr<Connection> Acceptor::getNextConnection() {
		std::unique_lock<std::mutex> lk(mtx);
		if ( readySockets.empty() && waitingSockets == 0 ) {
			startAccepting(1);
		}
		cv.wait(lk, [this]{return !readySockets.empty();});
		SocketP rdySocket = readySockets.front();
		readySockets.pop();
		return std::shared_ptr<Connection>(new Connection(rdySocket));
	}
コード例 #3
0
ファイル: Acceptor.cpp プロジェクト: timorl/neurohex
	void Acceptor::acceptHandler(const boost::system::error_code& error, SocketP sockPointer){
		std::unique_lock<std::mutex> lk(mtx);
		if (!error){
			readySockets.push(sockPointer);
			waitingSockets--;
			lk.unlock();
			cv.notify_one();
		}

		if ( waitingSockets > 0 ) {
			startAccepting(0);
		}
	}
コード例 #4
0
    void Win32NamedPipeServerTransport::onServerStart(RcfServer & server)
    {
        AsioServerTransport::onServerStart(server);

        mpIoService = mTaskEntries[0].getThreadPool().getIoService();

        RCF_ASSERT(mAcceptorPtr.get() == NULL);

        if ( !mPipeName.empty() )
        {
            startAccepting();
        }

        RCF_LOG_2()(mPipeName) << "Win32NamedPipeServerTransport - listening on named pipe.";
    }
コード例 #5
0
ファイル: AsyncServerSocket.cpp プロジェクト: genorm/folly
void AsyncServerSocket::addAcceptCallback(AcceptCallback *callback,
                                           EventBase *eventBase,
                                           uint32_t maxAtOnce) {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  // If this is the first accept callback and we are supposed to be accepting,
  // start accepting once the callback is installed.
  bool runStartAccepting = accepting_ && callbacks_.empty();

  callbacks_.emplace_back(callback, eventBase);

  SCOPE_SUCCESS {
    // If this is the first accept callback and we are supposed to be accepting,
    // start accepting.
    if (runStartAccepting) {
      startAccepting();
    }
  };

  if (!eventBase) {
    // Run in AsyncServerSocket's eventbase; notify that we are
    // starting to accept connections
    callback->acceptStarted();
    return;
  }

  // Start the remote acceptor.
  //
  // It would be nice if we could avoid starting the remote acceptor if
  // eventBase == eventBase_.  However, that would cause issues if
  // detachEventBase() and attachEventBase() were ever used to change the
  // primary EventBase for the server socket.  Therefore we require the caller
  // to specify a nullptr EventBase if they want to ensure that the callback is
  // always invoked in the primary EventBase, and to be able to invoke that
  // callback more efficiently without having to use a notification queue.
  RemoteAcceptor* acceptor = nullptr;
  try {
    acceptor = new RemoteAcceptor(callback, connectionEventCallback_);
    acceptor->start(eventBase, maxAtOnce, maxNumMsgsInQueue_);
  } catch (...) {
    callbacks_.pop_back();
    delete acceptor;
    throw;
  }
  callbacks_.back().consumer = acceptor;
}
コード例 #6
0
ファイル: AsyncMcServer.cpp プロジェクト: cicerocomp/mcrouter
  void spawn(AsyncMcServer::LoopFn fn, size_t threadId) {
    worker_.setOnShutdownOperation(
      [&] () {
        server_.shutdown();
      });

    thread_ = std::thread{
      [fn, threadId, this] (){
        // Set workers' debug fifo
        if (!server_.opts_.debugFifoPath.empty()) {
          if (auto fifoManager = FifoManager::getInstance()) {
            worker_.setDebugFifo(fifoManager->fetchThreadLocal(
                  server_.opts_.debugFifoPath));
          }
        }

        if (accepting_) {
          startAccepting();

          if (spawnException_) {
            return;
          }
        }

        fn(threadId, evb_, worker_);

        // Detach the server sockets from the acceptor thread.
        // If we don't do this, the TAsyncSSLServerSocket destructor
        // will try to do it, and a segfault will result if the
        // socket destructor runs after the threads' destructors.
        if (accepting_) {
          socket_.reset();
          sslSocket_.reset();
        }
      }};
  }