static void *worker_thread_entry(void *param) { work_thread_info *thread = (work_thread_info *)param; osd_work_queue *queue = thread->queue; // loop until we exit for ( ;; ) { // block waiting for work or exit // bail on exit, and only wait if there are no pending items in queue if (!queue->exiting && queue->list == NULL) { begin_timing(thread->waittime); osd_event_wait(thread->wakeevent, INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live atomic_exchange32(&thread->active, TRUE); atomic_increment32(&queue->livethreads); // process work items for ( ;; ) { osd_ticks_t stopspin; // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL) { // spin for a while looking for more work begin_timing(thread->spintime); stopspin = osd_ticks() + SPIN_LOOP_TIME; do { int spin = 10000; while (--spin && queue->list == NULL) osd_yield_processor(); } while (queue->list == NULL && osd_ticks() < stopspin); end_timing(thread->spintime); } // if nothing more, release the processor if (queue->list == NULL) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count atomic_exchange32(&thread->active, FALSE); atomic_decrement32(&queue->livethreads); } return NULL; }
void task_pool::process_task(task& tsk) { if (tsk.m_flags & cTaskFlagObject) tsk.m_pObj->execute_task(tsk.m_data, tsk.m_pData_ptr); else tsk.m_callback(tsk.m_data, tsk.m_pData_ptr); atomic_decrement32(&m_num_outstanding_tasks); }
void rtp_member_release(struct rtp_member *member) { if(0 == atomic_decrement32(&member->ref)) { size_t i; for(i = 0; i < sizeof(member->sdes)/sizeof(member->sdes[0]); i++) { if(member->sdes[i].data) { assert(member->sdes[i].pt == i); assert(member->sdes[i].len > 0); free(member->sdes[i].data); } } free(member); } }
static void aio_client_release(aio_client_t* client) { if (0 == atomic_decrement32(&client->ref)) { assert(AIO_NONE == client->state); assert(invalid_aio_socket == client->socket); assert(RW_NONE == client->data[RECV].state); assert(RW_NONE == client->data[SEND].state); if (client->handler.ondestroy) client->handler.ondestroy(client->param); spinlock_destroy(&client->locker); #if defined(DEBUG) || defined(_DEBUG) memset(client, 0xCC, sizeof(*client)); #endif free(client); } }
static void worker_thread_process(osd_work_queue *queue, work_thread_info *thread) { int threadid = thread - queue->thread; begin_timing(thread->runtime); // loop until everything is processed while (true) { osd_work_item *item = NULL; bool end_loop = false; // use a critical section to synchronize the removal of items { INT32 lockslot = osd_scalable_lock_acquire(queue->lock); if (queue->list == NULL) { end_loop = true; } else { // pull the item from the queue item = (osd_work_item *)queue->list; if (item != NULL) { queue->list = item->next; if (queue->list == NULL) queue->tailptr = (osd_work_item **)&queue->list; } } osd_scalable_lock_release(queue->lock, lockslot); } if (end_loop) break; // process non-NULL items if (item != NULL) { // call the callback and stash the result begin_timing(thread->actruntime); item->result = (*item->callback)(item->param, threadid); end_timing(thread->actruntime); // decrement the item count after we are done atomic_decrement32(&queue->items); atomic_exchange32(&item->done, TRUE); add_to_stat(&thread->itemsdone, 1); // if it's an auto-release item, release it if (item->flags & WORK_ITEM_FLAG_AUTO_RELEASE) osd_work_item_release(item); // set the result and signal the event else { INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); if (item->event != NULL) { osd_event_set(item->event); add_to_stat(&item->queue->setevents, 1); } osd_scalable_lock_release(item->queue->lock, lockslot); } // if we removed an item and there's still work to do, bump the stats if (queue_has_list_items(queue)) add_to_stat(&queue->extraitems, 1); } } // we don't need to set the doneevent for multi queues because they spin if (queue->waiting) { osd_event_set(queue->doneevent); add_to_stat(&queue->setevents, 1); } end_timing(thread->runtime); }
static void *worker_thread_entry(void *param) { work_thread_info *thread = (work_thread_info *)param; osd_work_queue *queue = thread->queue; #if defined(SDLMAME_MACOSX) void *arp = NewAutoreleasePool(); #endif // loop until we exit for ( ;; ) { // block waiting for work or exit // bail on exit, and only wait if there are no pending items in queue if (queue->exiting) break; if (!queue_has_list_items(queue)) { begin_timing(thread->waittime); osd_event_wait(thread->wakeevent, OSD_EVENT_WAIT_INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live atomic_exchange32(&thread->active, TRUE); atomic_increment32(&queue->livethreads); // process work items for ( ;; ) { // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL) { // spin for a while looking for more work begin_timing(thread->spintime); spin_while(&queue->list, (osd_work_item *)NULL, SPIN_LOOP_TIME); end_timing(thread->spintime); } // if nothing more, release the processor if (!queue_has_list_items(queue)) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count atomic_exchange32(&thread->active, FALSE); atomic_decrement32(&queue->livethreads); } #if defined(SDLMAME_MACOSX) ReleaseAutoreleasePool(arp); #endif return NULL; }