void Server::OnRecv(IOEvent* event, DWORD dwNumberOfBytesTransfered) { assert(event); TRACE("[%d] Enter OnRecv()", GetCurrentThreadId()); BYTE* buff = event->GetClient()->GetRecvBuff(); buff[dwNumberOfBytesTransfered] = '\0'; TRACE("[%d] OnRecv : %s", GetCurrentThreadId(), buff); // Create packet by copying recv buff. Packet* packet = Packet::Create(event->GetClient(), event->GetClient()->GetRecvBuff(), dwNumberOfBytesTransfered); // If whatever logics relying on the packet are fast enough, we can manage them here but // assume they are slow. // it's better to request receiving ASAP and handle packets received in another thread. if (!TrySubmitThreadpoolCallback(Server::WorkerProcessRecvPacket, packet, NULL)) { ERROR_CODE(GetLastError(), "Could not start WorkerProcessRecvPacket. call it directly."); Echo(packet); } PostRecv(event->GetClient()); TRACE("[%d] Leave OnRecv()", GetCurrentThreadId()); }
void OnStartBatch() { // Sanity checks if (g_hEmailEvent != NULL) { AddMessage(TEXT("Previous batch is still running...")); return; } AddMessage( TEXT("[%u] ----Start a new batch----"), GetCurrentThreadId()); // Create the synchronization events g_hEmailEvent = CreateEvent(NULL, FALSE, FALSE, TEXT("EmailEvent")); g_hPrintEvent = CreateEvent(NULL, FALSE, FALSE, TEXT("PrintEvent")); // Define the work items to be processed by the thread pool // 1. Keep track of the events that will be set when each action // if finished PSYNCHRO_DATA pSynchData = new SYNCHRO_DATA(); pSynchData->Count = 2; pSynchData->Handles = new HANDLE[2]; pSynchData->Handles[0] = g_hEmailEvent; pSynchData->Handles[1] = g_hPrintEvent; TrySubmitThreadpoolCallback((PTP_SIMPLE_CALLBACK) EndOfBatchCallback, pSynchData, &g_callbackEnvironment); // 2. Start the email action PCALLBACK_DATA pData = new CALLBACK_DATA(); pData->finishEvent = g_hEmailEvent; _tcscpy_s(pData->szAction, MAX_PATH, TEXT("Email")); TrySubmitThreadpoolCallback((PTP_SIMPLE_CALLBACK) ActionCallback, pData, &g_callbackEnvironment); // 3. Start the printing action pData = new CALLBACK_DATA(); pData->finishEvent = g_hPrintEvent; _tcscpy_s(pData->szAction, MAX_PATH, TEXT("Printing")); TrySubmitThreadpoolCallback((PTP_SIMPLE_CALLBACK) ActionCallback, pData, &g_callbackEnvironment); }
void Server::OnClose(IOEvent* event) { assert(event); TRACE("Client's socket has been closed."); // If whatever game logics about this event are fast enough, we can manage them here but I // assume they are slow. if (!m_ShuttingDown && !TrySubmitThreadpoolCallback(Server::WorkerRemoveClient, event->GetClient(), &m_ClientTPENV)) { ERROR_CODE(GetLastError(), "can't start WorkerRemoveClient. call it directly."); RemoveClient(event->GetClient()); } }
void OnRun() { // Setup randomizer srand(GetTickCount()); for(int current = 1; current <= 6; current++) { PTSTR pRequest = new TCHAR[MAX_PATH]; StringCchPrintf(pRequest, MAX_PATH, TEXT("Request %u"), current); if (TrySubmitThreadpoolCallback((PTP_SIMPLE_CALLBACK) SimpleHandler, pRequest, &g_callbackEnvironment)) { AddMessage( TEXT("[%u] Request %u is submitted"), GetCurrentThreadId(), current); } else { AddMessage( TEXT("[%u] Request %u can't be submitted"), GetCurrentThreadId(), current); } } }
void Server::OnAccept(IOEvent* event) { assert(event); TRACE("[%d] Enter OnAccept()", GetCurrentThreadId()); assert(event->GetType() == IOEvent::ACCEPT); // Check if we need to post more accept requests. InterlockedDecrement(&m_NumPostAccept); // Add client in a different thread. // It is because we need to return this function ASAP so that this IO worker thread can process // the other IO notifications. // If adding client is fast enough, we can call it here but I assume it's slow. if (!m_ShuttingDown && !TrySubmitThreadpoolCallback(Server::WorkerAddClient, event->GetClient(), &m_ClientTPENV)) { ERROR_CODE(GetLastError(), "Could not start WorkerAddClient."); AddClient(event->GetClient()); } TRACE("[%d] Leave OnAccept()", GetCurrentThreadId()); }
/**************************************************************************** * The main async help function. * * It either starts a thread or just calls the function directly for platforms * with no thread support. This relies on the fact that PostMessage() does * not actually call the windowproc before the function returns. */ static HANDLE run_query( HWND hWnd, UINT uMsg, LPARAM (*func)(struct async_query_header *), struct async_query_header *query, void *sbuf, INT sbuflen ) { static LONG next_handle = 0xdead; ULONG handle; do handle = LOWORD( InterlockedIncrement( &next_handle )); while (!handle); /* avoid handle 0 */ query->func = func; query->hWnd = hWnd; query->uMsg = uMsg; query->handle = UlongToHandle( handle ); query->sbuf = sbuf; query->sbuflen = sbuflen; if (!TrySubmitThreadpoolCallback( async_worker, query, NULL )) { SetLastError( WSAEWOULDBLOCK ); HeapFree( GetProcessHeap(), 0, query ); return 0; } return UlongToHandle( handle ); }