IOCPConnection::IOCPConnection( OverlappedAllocator* pAllocator, const Socket& sock ) : ISocketConnection(sock) { Init( pAllocator ); interlocked_increment( &g_connectionCount ); }
static unsigned __stdcall worker_thread_entry(void *param) { work_thread_info *thread = param; osd_work_queue *queue = thread->queue; // loop until we exit for ( ;; ) { // block waiting for work or exit DWORD result = WAIT_OBJECT_0; // bail on exit, and only wait if there are no pending items in queue if (!queue->exiting && queue->list == NULL) { begin_timing(thread->waittime); result = WaitForSingleObject(thread->wakeevent, INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live interlocked_exchange32(&thread->active, TRUE); interlocked_increment(&queue->livethreads); // process work items for ( ;; ) { osd_ticks_t stopspin; // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ) { // spin for a while looking for more work begin_timing(thread->spintime); stopspin = osd_ticks() + SPIN_LOOP_TIME; while (queue->list == NULL && osd_ticks() < stopspin) YieldProcessor(); end_timing(thread->spintime); } // if nothing more, release the processor if (queue->list == NULL) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count interlocked_exchange32(&thread->active, FALSE); interlocked_decrement(&queue->livethreads); } return 0; }
INLINE INT32 scalable_lock_acquire(scalable_lock *lock) { #if USE_SCALABLE_LOCKS INT32 myslot = (interlocked_increment(&lock->nextindex) - 1) & (WORK_MAX_THREADS - 1); INT32 backoff = 1; while (!lock->slot[myslot].haslock) { INT32 backcount; for (backcount = 0; backcount < backoff; backcount++) YieldProcessor(); backoff <<= 1; } lock->slot[myslot].haslock = FALSE; return myslot; #else EnterCriticalSection(&lock->section); return 0; #endif }