Example #1
0
/**
 * Creates a new connection either by reusing an object off the stack or
 * by allocating a new one entirely
 */
TNonblockingServer::TConnection* TNonblockingServer::createConnection(
    THRIFT_SOCKET socket, const sockaddr* addr, socklen_t addrLen) {
  // Check the stack
  Guard g(connMutex_);

  // pick an IO thread to handle this connection -- currently round robin
  assert(nextIOThread_ < ioThreads_.size());
  int selectedThreadIdx = nextIOThread_;
  nextIOThread_ = static_cast<uint32_t>((nextIOThread_ + 1) % ioThreads_.size());

  TNonblockingIOThread* ioThread = ioThreads_[selectedThreadIdx].get();

  // Check the connection stack to see if we can re-use
  TConnection* result = NULL;
  if (connectionStack_.empty()) {
    result = new TConnection(socket, ioThread, addr, addrLen);
    ++numTConnections_;
  } else {
    result = connectionStack_.top();
    connectionStack_.pop();
    result->init(socket, ioThread, addr, addrLen);
  }
  activeConnections_.push_back(result);
  return result;
}
Example #2
0
void TNonblockingServer::expireClose(boost::shared_ptr<Runnable> task) {
  TConnection* connection =
    static_cast<TConnection::Task*>(task.get())->getTConnection();
  assert(connection && connection->getServer() &&
         connection->getState() == APP_WAIT_TASK);
  connection->forceClose();
}
/**
 * Server socket had something happen.  We accept all waiting client
 * connections on fd and assign TConnection objects to handle those requests.
 */
void TNonblockingServer::handleEvent(THRIFT_SOCKET fd, short which) {
  (void)which;
  // Make sure that libevent didn't mess up the socket handles
  assert(fd == serverSocket_);

  // Going to accept a new client socket
  stdcxx::shared_ptr<TSocket> clientSocket;

  clientSocket = serverTransport_->accept();
  if (clientSocket) {
    // If we're overloaded, take action here
    if (overloadAction_ != T_OVERLOAD_NO_ACTION && serverOverloaded()) {
      Guard g(connMutex_);
      nConnectionsDropped_++;
      nTotalConnectionsDropped_++;
      if (overloadAction_ == T_OVERLOAD_CLOSE_ON_ACCEPT) {
        clientSocket->close();
        return;
      } else if (overloadAction_ == T_OVERLOAD_DRAIN_TASK_QUEUE) {
        if (!drainPendingTask()) {
          // Nothing left to discard, so we drop connection instead.
          clientSocket->close();
          return;
        }
      }
    }

    // Create a new TConnection for this client socket.
    TConnection* clientConnection = createConnection(clientSocket);

    // Fail fast if we could not create a TConnection object
    if (clientConnection == NULL) {
      GlobalOutput.printf("thriftServerEventHandler: failed TConnection factory");
      clientSocket->close();
      return;
    }

    /*
     * Either notify the ioThread that is assigned this connection to
     * start processing, or if it is us, we'll just ask this
     * connection to do its initial state change here.
     *
     * (We need to avoid writing to our own notification pipe, to
     * avoid possible deadlocks if the pipe is full.)
     *
     * The IO thread #0 is the only one that handles these listen
     * events, so unless the connection has been assigned to thread #0
     * we know it's not on our thread.
     */
    if (clientConnection->getIOThreadNumber() == 0) {
      clientConnection->transition();
    } else {
      if (!clientConnection->notifyIOThread()) {
        GlobalOutput.perror("[ERROR] notifyIOThread failed on fresh connection, closing", errno);
        clientConnection->close();
      }
    }
  }
}
Example #4
0
unsigned int WINAPI UDT::ThreadTryConnect(LPVOID s)
#endif
{
	TConnection *pConn = (TConnection *)s;
	pConn->ThreadConnect();
#ifndef WIN32
	return NULL;
#else
	return 0;
#endif
}
bool TNonblockingServer::drainPendingTask() {
  if (threadManager_) {
    stdcxx::shared_ptr<Runnable> task = threadManager_->removeNextPending();
    if (task) {
      TConnection* connection = static_cast<TConnection::Task*>(task.get())->getTConnection();
      assert(connection && connection->getServer() && connection->getState() == APP_WAIT_TASK);
      connection->forceClose();
      return true;
    }
  }
  return false;
}
Example #6
0
void ConnMng::OnData( int iConn,int iLen, const char *pData )
{
	TConnection *pConn = NULL;
	{

		JMutexAutoLock tmpGuard(m_Sock2ConnMutex);
		pConn = m_mapSock2Conn[iConn];
	}
	if (NULL==pConn)
	{
		return;
	}
	pConn->OnData(pData,iLen);
}
Example #7
0
void ConnMng::OnConnectionMsg( int iConn,enNetworkCode code,int param )
{
	param;
	TConnection *pConn = NULL;
	{

		JMutexAutoLock tmpGuard(m_Sock2ConnMutex);
		map<UDTSOCKET,UDT::TConnection *>::iterator it = m_mapSock2Conn.find(iConn);
		if (it!=m_mapSock2Conn.end())
		{
			pConn = (*it).second;
		}
	}

	if (NL_CODE_NEWCONN==code)
	{
		if (NULL==pConn)
		{
			TConnection *pNewConn = m_pParentMng->ConnFactory(iConn);
			if (!pNewConn) //NULL代表拒绝连接
			{
				ASSERT(FALSE);
				UDT::close(iConn);
				return;
			}
			//tianzuo,2009-6-2,被动连接的,是不是也应该有OnConnected消息?
			//pNewConn->OnConnected(true);
			return;
		}
		else
		{
			pConn->OnConnected(true);
			return;
		}
	}
	else if (NL_CODE_BREAKDOWN==code)
	{
		if (!pConn)
		{
			return;
		}
		pConn->OnDisConnected();
	}
}
Example #8
0
/**
 * Server socket had something happen.  We accept all waiting client
 * connections on fd and assign TConnection objects to handle those requests.
 */
void TNonblockingServer::handleEvent(THRIFT_SOCKET fd, short which) {
  (void) which;
  // Make sure that libevent didn't mess up the socket handles
  assert(fd == serverSocket_);

  // Server socket accepted a new connection
  socklen_t addrLen;
  sockaddr_storage addrStorage;
  sockaddr* addrp = (sockaddr*)&addrStorage;
  addrLen = sizeof(addrStorage);

  // Going to accept a new client socket
  THRIFT_SOCKET clientSocket;

  // Accept as many new clients as possible, even though libevent signaled only
  // one, this helps us to avoid having to go back into the libevent engine so
  // many times
  while ((clientSocket = ::accept(fd, addrp, &addrLen)) != -1) {
    // If we're overloaded, take action here
    if (overloadAction_ != T_OVERLOAD_NO_ACTION && serverOverloaded()) {
      Guard g(connMutex_);
      nConnectionsDropped_++;
      nTotalConnectionsDropped_++;
      if (overloadAction_ == T_OVERLOAD_CLOSE_ON_ACCEPT) {
        ::THRIFT_CLOSESOCKET(clientSocket);
        return;
      } else if (overloadAction_ == T_OVERLOAD_DRAIN_TASK_QUEUE) {
        if (!drainPendingTask()) {
          // Nothing left to discard, so we drop connection instead.
          ::THRIFT_CLOSESOCKET(clientSocket);
          return;
        }
      }
    }

    // Explicitly set this socket to NONBLOCK mode
    int flags;
    if ((flags = THRIFT_FCNTL(clientSocket, THRIFT_F_GETFL, 0)) < 0 ||
        THRIFT_FCNTL(clientSocket, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK) < 0) {
      GlobalOutput.perror("thriftServerEventHandler: set THRIFT_O_NONBLOCK (THRIFT_FCNTL) ", THRIFT_GET_SOCKET_ERROR);
      ::THRIFT_CLOSESOCKET(clientSocket);
      return;
    }

    // Create a new TConnection for this client socket.
    TConnection* clientConnection =
      createConnection(clientSocket, addrp, addrLen);

    // Fail fast if we could not create a TConnection object
    if (clientConnection == NULL) {
      GlobalOutput.printf("thriftServerEventHandler: failed TConnection factory");
      ::THRIFT_CLOSESOCKET(clientSocket);
      return;
    }

    /*
     * Either notify the ioThread that is assigned this connection to
     * start processing, or if it is us, we'll just ask this
     * connection to do its initial state change here.
     *
     * (We need to avoid writing to our own notification pipe, to
     * avoid possible deadlocks if the pipe is full.)
     *
     * The IO thread #0 is the only one that handles these listen
     * events, so unless the connection has been assigned to thread #0
     * we know it's not on our thread.
     */
    if (clientConnection->getIOThreadNumber() == 0) {
      clientConnection->transition();
    } else {
      clientConnection->notifyIOThread();
    }

    // addrLen is written by the accept() call, so needs to be set before the next call.
    addrLen = sizeof(addrStorage);
  }


  // Done looping accept, now we have to make sure the error is due to
  // blocking. Any other error is a problem
  if (THRIFT_GET_SOCKET_ERROR != THRIFT_EAGAIN && THRIFT_GET_SOCKET_ERROR != THRIFT_EWOULDBLOCK) {
    GlobalOutput.perror("thriftServerEventHandler: accept() ", THRIFT_GET_SOCKET_ERROR);
  }
}