static void *worker_thread_entry(void *param) { work_thread_info *thread = (work_thread_info *)param; osd_work_queue *queue = thread->queue; // loop until we exit for ( ;; ) { // block waiting for work or exit // bail on exit, and only wait if there are no pending items in queue if (!queue->exiting && queue->list == NULL) { begin_timing(thread->waittime); osd_event_wait(thread->wakeevent, INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live atomic_exchange32(&thread->active, TRUE); atomic_increment32(&queue->livethreads); // process work items for ( ;; ) { osd_ticks_t stopspin; // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL) { // spin for a while looking for more work begin_timing(thread->spintime); stopspin = osd_ticks() + SPIN_LOOP_TIME; do { int spin = 10000; while (--spin && queue->list == NULL) osd_yield_processor(); } while (queue->list == NULL && osd_ticks() < stopspin); end_timing(thread->spintime); } // if nothing more, release the processor if (queue->list == NULL) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count atomic_exchange32(&thread->active, FALSE); atomic_decrement32(&queue->livethreads); } return NULL; }
// It's the object's responsibility to delete pObj within the execute_task() method, if needed! bool task_pool::queue_task(executable_task *pObj, uint64_t data, void *pData_ptr) { VOGL_ASSERT(pObj); task tsk; tsk.m_pObj = pObj; tsk.m_data = data; tsk.m_pData_ptr = pData_ptr; tsk.m_flags = cTaskFlagObject; atomic_increment32(&m_total_submitted_tasks); if (!m_task_stack.try_push(tsk)) { atomic_increment32(&m_total_completed_tasks); return false; } m_tasks_available.release(1); return true; }
bool task_pool::queue_task(task_callback_func pFunc, uint64_t data, void *pData_ptr) { VOGL_ASSERT(pFunc); task tsk; tsk.m_callback = pFunc; tsk.m_data = data; tsk.m_pData_ptr = pData_ptr; tsk.m_flags = 0; atomic_increment32(&m_total_submitted_tasks); if (!m_task_stack.try_push(tsk)) { atomic_increment32(&m_total_completed_tasks); return false; } m_tasks_available.release(1); return true; }
void task_pool::process_task(task &tsk) { if (tsk.m_flags & cTaskFlagObject) tsk.m_pObj->execute_task(tsk.m_data, tsk.m_pData_ptr); else tsk.m_callback(tsk.m_data, tsk.m_pData_ptr); if (atomic_increment32(&m_total_completed_tasks) == m_total_submitted_tasks) { // Try to signal the semaphore (the max count is 1 so this may actually fail). m_all_tasks_completed.try_release(); } }
static void http_client_test_onreply(void* p, void *http, int code) { if(p) { atomic_increment32((int32_t*)p); } if(0 == code) { const char* server = http_client_get_header(http, "Server"); if(server) printf("http server: %s\n", server); } else { printf("http server reply error: %d\n", code); } }
INLINE INT32 scalable_lock_acquire(scalable_lock *lock) { #if USE_SCALABLE_LOCKS INT32 myslot = (atomic_increment32(&lock->nextindex) - 1) & (WORK_MAX_THREADS - 1); INT32 backoff = 1; while (!lock->slot[myslot].haslock) { INT32 backcount; for (backcount = 0; backcount < backoff; backcount++) YieldProcessor(); backoff <<= 1; } lock->slot[myslot].haslock = FALSE; return myslot; #else EnterCriticalSection(&lock->section); return 0; #endif }
// It's the object's responsibility to delete pObj within the execute_task() method, if needed! bool task_pool::queue_task(executable_task* pObj, uint64 data, void* pData_ptr) { LZHAM_ASSERT(m_num_threads); LZHAM_ASSERT(pObj); task tsk; tsk.m_pObj = pObj; tsk.m_data = data; tsk.m_pData_ptr = pData_ptr; tsk.m_flags = cTaskFlagObject; if (!m_task_stack.try_push(tsk)) return false; atomic_increment32(&m_num_outstanding_tasks); m_tasks_available.release(1); return true; }
bool task_pool::queue_task(task_callback_func pFunc, uint64 data, void* pData_ptr) { LZHAM_ASSERT(m_num_threads); LZHAM_ASSERT(pFunc); task tsk; tsk.m_callback = pFunc; tsk.m_data = data; tsk.m_pData_ptr = pData_ptr; tsk.m_flags = 0; if (!m_task_stack.try_push(tsk)) return false; atomic_increment32(&m_num_outstanding_tasks); m_tasks_available.release(1); return true; }
static void worker(void* param) { int i = 0; int n = *(int*)param + 1; while(i++ < n) { #if defined(OS_MAC) system_sleep(arc4random() % 30); #else system_sleep(rand() % 30); #endif if(210 == atomic_increment32(&total)) { printf("[%d] I'm the KING\n", n); assert(i == n); break; } } printf("[%d] done\n", n); }
static void *worker_thread_entry(void *param) { work_thread_info *thread = (work_thread_info *)param; osd_work_queue *queue = thread->queue; #if defined(SDLMAME_MACOSX) void *arp = NewAutoreleasePool(); #endif // loop until we exit for ( ;; ) { // block waiting for work or exit // bail on exit, and only wait if there are no pending items in queue if (queue->exiting) break; if (!queue_has_list_items(queue)) { begin_timing(thread->waittime); osd_event_wait(thread->wakeevent, OSD_EVENT_WAIT_INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live atomic_exchange32(&thread->active, TRUE); atomic_increment32(&queue->livethreads); // process work items for ( ;; ) { // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL) { // spin for a while looking for more work begin_timing(thread->spintime); spin_while(&queue->list, (osd_work_item *)NULL, SPIN_LOOP_TIME); end_timing(thread->spintime); } // if nothing more, release the processor if (!queue_has_list_items(queue)) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count atomic_exchange32(&thread->active, FALSE); atomic_decrement32(&queue->livethreads); } #if defined(SDLMAME_MACOSX) ReleaseAutoreleasePool(arp); #endif return NULL; }
void rtp_member_addref(struct rtp_member *member) { atomic_increment32(&member->ref); }