ATTR_HOT void netlist_t::process_queue(const netlist_time &delta) { m_stop = m_time + delta; if (m_mainclock == nullptr) { while ( (m_time < m_stop) && (m_queue.is_not_empty())) { const queue_t::entry_t &e = m_queue.pop(); m_time = e.exec_time(); e.object()->update_devs(); add_to_stat(m_perf_out_processed, 1); } if (m_queue.is_empty()) m_time = m_stop; } else { logic_net_t &mc_net = m_mainclock->m_Q.net().as_logic(); const netlist_time inc = m_mainclock->m_inc; netlist_time mc_time = mc_net.time(); while (m_time < m_stop) { if (m_queue.is_not_empty()) { while (m_queue.top().exec_time() > mc_time) { m_time = mc_time; mc_time += inc; mc_net.toggle_new_Q(); mc_net.update_devs(); //devices::NETLIB_NAME(mainclock)::mc_update(mc_net); } const queue_t::entry_t &e = m_queue.pop(); m_time = e.exec_time(); e.object()->update_devs(); } else { m_time = mc_time; mc_time += inc; mc_net.toggle_new_Q(); mc_net.update_devs(); //devices::NETLIB_NAME(mainclock)::mc_update(mc_net); } add_to_stat(m_perf_out_processed, 1); } mc_net.set_time(mc_time); } }
ATTR_HOT ATTR_ALIGN void netlist_base_t::process_queue(const netlist_time delta) { m_stop = m_time + delta; if (m_mainclock == NULL) { while ( (m_time < m_stop) && (m_queue.is_not_empty())) { const netlist_queue_t::entry_t &e = m_queue.pop(); m_time = e.time(); e.object()->update_devs(); add_to_stat(m_perf_out_processed, 1); if (FATAL_ERROR_AFTER_NS) if (time() > NLTIME_FROM_NS(FATAL_ERROR_AFTER_NS)) error("Stopped"); } if (m_queue.is_empty()) m_time = m_stop; } else { netlist_net_t &mcQ = m_mainclock->m_Q.net(); const netlist_time inc = m_mainclock->m_inc; while (m_time < m_stop) { if (m_queue.is_not_empty()) { while (m_queue.peek().time() > mcQ.time()) { m_time = mcQ.time(); NETLIB_NAME(mainclock)::mc_update(mcQ, m_time + inc); } const netlist_queue_t::entry_t &e = m_queue.pop(); m_time = e.time(); e.object()->update_devs(); } else { m_time = mcQ.time(); NETLIB_NAME(mainclock)::mc_update(mcQ, m_time + inc); } if (FATAL_ERROR_AFTER_NS) if (time() > NLTIME_FROM_NS(FATAL_ERROR_AFTER_NS)) error("Stopped"); add_to_stat(m_perf_out_processed, 1); } } }
ATTR_HOT ATTR_ALIGN void netlist_base_t::process_queue(const netlist_time &delta) { m_stop = m_time + delta; if (m_mainclock == NULL) { while ( (m_time < m_stop) && (m_queue.is_not_empty())) { const netlist_queue_t::entry_t e = *m_queue.pop(); m_time = e.exec_time(); e.object()->update_devs(); add_to_stat(m_perf_out_processed, 1); } if (m_queue.is_empty()) m_time = m_stop; } else { netlist_logic_net_t &mc_net = m_mainclock->m_Q.net().as_logic(); const netlist_time inc = m_mainclock->m_inc; netlist_time mc_time = mc_net.time(); while (m_time < m_stop) { if (m_queue.is_not_empty()) { while (m_queue.peek()->exec_time() > mc_time) { m_time = mc_time; mc_time += inc; NETLIB_NAME(mainclock)::mc_update(mc_net); } const netlist_queue_t::entry_t e = *m_queue.pop(); m_time = e.exec_time(); e.object()->update_devs(); } else { m_time = mc_time; mc_time += inc; NETLIB_NAME(mainclock)::mc_update(mc_net); } add_to_stat(m_perf_out_processed, 1); } mc_net.set_time(mc_time); } }
static void *worker_thread_entry(void *param) { work_thread_info *thread = (work_thread_info *)param; osd_work_queue *queue = thread->queue; // loop until we exit for ( ;; ) { // block waiting for work or exit // bail on exit, and only wait if there are no pending items in queue if (!queue->exiting && queue->list == NULL) { begin_timing(thread->waittime); osd_event_wait(thread->wakeevent, INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live atomic_exchange32(&thread->active, TRUE); atomic_increment32(&queue->livethreads); // process work items for ( ;; ) { osd_ticks_t stopspin; // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL) { // spin for a while looking for more work begin_timing(thread->spintime); stopspin = osd_ticks() + SPIN_LOOP_TIME; do { int spin = 10000; while (--spin && queue->list == NULL) osd_yield_processor(); } while (queue->list == NULL && osd_ticks() < stopspin); end_timing(thread->spintime); } // if nothing more, release the processor if (queue->list == NULL) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count atomic_exchange32(&thread->active, FALSE); atomic_decrement32(&queue->livethreads); } return NULL; }
static unsigned __stdcall worker_thread_entry(void *param) { work_thread_info *thread = param; osd_work_queue *queue = thread->queue; // loop until we exit for ( ;; ) { // block waiting for work or exit DWORD result = WAIT_OBJECT_0; // bail on exit, and only wait if there are no pending items in queue if (!queue->exiting && queue->list == NULL) { begin_timing(thread->waittime); result = WaitForSingleObject(thread->wakeevent, INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live interlocked_exchange32(&thread->active, TRUE); interlocked_increment(&queue->livethreads); // process work items for ( ;; ) { osd_ticks_t stopspin; // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ) { // spin for a while looking for more work begin_timing(thread->spintime); stopspin = osd_ticks() + SPIN_LOOP_TIME; while (queue->list == NULL && osd_ticks() < stopspin) YieldProcessor(); end_timing(thread->spintime); } // if nothing more, release the processor if (queue->list == NULL) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count interlocked_exchange32(&thread->active, FALSE); interlocked_decrement(&queue->livethreads); } return 0; }
static void *worker_thread_entry(void *param) { work_thread_info *thread = (work_thread_info *)param; osd_work_queue &queue = thread->queue; // loop until we exit for ( ;; ) { // block waiting for work or exit // bail on exit, and only wait if there are no pending items in queue if (queue.exiting) break; if (!queue_has_list_items(&queue)) { begin_timing(thread->waittime); thread->wakeevent.wait( OSD_EVENT_WAIT_INFINITE); end_timing(thread->waittime); } if (queue.exiting) break; // indicate that we are live thread->active = TRUE; ++queue.livethreads; // process work items for ( ;; ) { // process as much as we can worker_thread_process(&queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue.flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue.list.load() == nullptr) { // spin for a while looking for more work begin_timing(thread->spintime); spin_while<std::atomic<osd_work_item *>, osd_work_item *>(&queue.list, (osd_work_item *)nullptr, SPIN_LOOP_TIME); end_timing(thread->spintime); } // if nothing more, release the processor if (!queue_has_list_items(&queue)) break; add_to_stat(queue.spinloops, 1); } // decrement the live thread count thread->active = FALSE; --queue.livethreads; } return nullptr; }
static void worker_thread_process(osd_work_queue *queue, work_thread_info *thread) { int threadid = thread - queue->thread; begin_timing(thread->runtime); // loop until everything is processed while (true) { osd_work_item *item = NULL; bool end_loop = false; // use a critical section to synchronize the removal of items { INT32 lockslot = osd_scalable_lock_acquire(queue->lock); if (queue->list == NULL) { end_loop = true; } else { // pull the item from the queue item = (osd_work_item *)queue->list; if (item != NULL) { queue->list = item->next; if (queue->list == NULL) queue->tailptr = (osd_work_item **)&queue->list; } } osd_scalable_lock_release(queue->lock, lockslot); } if (end_loop) break; // process non-NULL items if (item != NULL) { // call the callback and stash the result begin_timing(thread->actruntime); item->result = (*item->callback)(item->param, threadid); end_timing(thread->actruntime); // decrement the item count after we are done atomic_decrement32(&queue->items); atomic_exchange32(&item->done, TRUE); add_to_stat(&thread->itemsdone, 1); // if it's an auto-release item, release it if (item->flags & WORK_ITEM_FLAG_AUTO_RELEASE) osd_work_item_release(item); // set the result and signal the event else { INT32 lockslot = osd_scalable_lock_acquire(item->queue->lock); if (item->event != NULL) { osd_event_set(item->event); add_to_stat(&item->queue->setevents, 1); } osd_scalable_lock_release(item->queue->lock, lockslot); } // if we removed an item and there's still work to do, bump the stats if (queue_has_list_items(queue)) add_to_stat(&queue->extraitems, 1); } } // we don't need to set the doneevent for multi queues because they spin if (queue->waiting) { osd_event_set(queue->doneevent); add_to_stat(&queue->setevents, 1); } end_timing(thread->runtime); }
static void *worker_thread_entry(void *param) { work_thread_info *thread = (work_thread_info *)param; osd_work_queue *queue = thread->queue; #if defined(SDLMAME_MACOSX) void *arp = NewAutoreleasePool(); #endif // loop until we exit for ( ;; ) { // block waiting for work or exit // bail on exit, and only wait if there are no pending items in queue if (queue->exiting) break; if (!queue_has_list_items(queue)) { begin_timing(thread->waittime); osd_event_wait(thread->wakeevent, OSD_EVENT_WAIT_INFINITE); end_timing(thread->waittime); } if (queue->exiting) break; // indicate that we are live atomic_exchange32(&thread->active, TRUE); atomic_increment32(&queue->livethreads); // process work items for ( ;; ) { // process as much as we can worker_thread_process(queue, thread); // if we're a high frequency queue, spin for a while before giving up if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL) { // spin for a while looking for more work begin_timing(thread->spintime); spin_while(&queue->list, (osd_work_item *)NULL, SPIN_LOOP_TIME); end_timing(thread->spintime); } // if nothing more, release the processor if (!queue_has_list_items(queue)) break; add_to_stat(&queue->spinloops, 1); } // decrement the live thread count atomic_exchange32(&thread->active, FALSE); atomic_decrement32(&queue->livethreads); } #if defined(SDLMAME_MACOSX) ReleaseAutoreleasePool(arp); #endif return NULL; }
osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_callback callback, INT32 numitems, void *parambase, INT32 paramstep, UINT32 flags) { osd_work_item *itemlist = NULL, *lastitem = NULL; osd_work_item **item_tailptr = &itemlist; INT32 lockslot; int itemnum; // loop over items, building up a local list of work for (itemnum = 0; itemnum < numitems; itemnum++) { osd_work_item *item; // first allocate a new work item; try the free list first INT32 lockslot = osd_scalable_lock_acquire(queue->lock); do { item = (osd_work_item *)queue->free; } while (item != NULL && compare_exchange_ptr((PVOID volatile *)&queue->free, item, item->next) != item); osd_scalable_lock_release(queue->lock, lockslot); // if nothing, allocate something new if (item == NULL) { // allocate the item item = (osd_work_item *)osd_malloc(sizeof(*item)); if (item == NULL) return NULL; item->event = NULL; item->queue = queue; item->done = FALSE; } else { atomic_exchange32(&item->done, FALSE); // needs to be set this way to prevent data race/usage of uninitialized memory on Linux } // fill in the basics item->next = NULL; item->callback = callback; item->param = parambase; item->result = NULL; item->flags = flags; // advance to the next lastitem = item; *item_tailptr = item; item_tailptr = &item->next; parambase = (UINT8 *)parambase + paramstep; } // enqueue the whole thing within the critical section lockslot = osd_scalable_lock_acquire(queue->lock); *queue->tailptr = itemlist; queue->tailptr = item_tailptr; osd_scalable_lock_release(queue->lock, lockslot); // increment the number of items in the queue atomic_add32(&queue->items, numitems); add_to_stat(&queue->itemsqueued, numitems); // look for free threads to do the work if (queue->livethreads < queue->threads) { int threadnum; // iterate over all the threads for (threadnum = 0; threadnum < queue->threads; threadnum++) { work_thread_info *thread = &queue->thread[threadnum]; // if this thread is not active, wake him up if (!thread->active) { osd_event_set(thread->wakeevent); add_to_stat(&queue->setevents, 1); // for non-shared, the first one we find is good enough if (--numitems == 0) break; } } } // if no threads, run the queue now on this thread if (queue->threads == 0) { end_timing(queue->thread[0].waittime); worker_thread_process(queue, &queue->thread[0]); begin_timing(queue->thread[0].waittime); } // only return the item if it won't get released automatically return (flags & WORK_ITEM_FLAG_AUTO_RELEASE) ? NULL : lastitem; }
static void worker_thread_process(osd_work_queue *queue, work_thread_info *thread) { int threadid = thread->id; begin_timing(thread->runtime); // loop until everything is processed while (true) { osd_work_item *item = nullptr; bool end_loop = false; // use a critical section to synchronize the removal of items { std::lock_guard<std::mutex> lock(queue->lock); if (queue->list.load() == nullptr) { end_loop = true; } else { // pull the item from the queue item = (osd_work_item *)queue->list; if (item != nullptr) { queue->list = item->next; if (queue->list.load() == nullptr) queue->tailptr = (osd_work_item **)&queue->list; } } } if (end_loop) break; // process non-NULL items if (item != nullptr) { // call the callback and stash the result begin_timing(thread->actruntime); item->result = (*item->callback)(item->param, threadid); end_timing(thread->actruntime); // decrement the item count after we are done --queue->items; item->done = TRUE; add_to_stat(thread->itemsdone, 1); // if it's an auto-release item, release it if (item->flags & WORK_ITEM_FLAG_AUTO_RELEASE) osd_work_item_release(item); // set the result and signal the event else { std::lock_guard<std::mutex> lock(queue->lock); if (item->event != nullptr) { item->event->set(); add_to_stat(item->queue.setevents, 1); } } // if we removed an item and there's still work to do, bump the stats if (queue_has_list_items(queue)) add_to_stat(queue->extraitems, 1); } } // we don't need to set the doneevent for multi queues because they spin if (queue->waiting) { queue->doneevent.set(); add_to_stat(queue->setevents, 1); } end_timing(thread->runtime); }
osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_callback callback, INT32 numitems, void *parambase, INT32 paramstep, UINT32 flags) { osd_work_item *itemlist = nullptr, *lastitem = nullptr; osd_work_item **item_tailptr = &itemlist; int itemnum; // loop over items, building up a local list of work for (itemnum = 0; itemnum < numitems; itemnum++) { osd_work_item *item; // first allocate a new work item; try the free list first { std::lock_guard<std::mutex> lock(queue->lock); do { item = (osd_work_item *)queue->free; } while (item != nullptr && !queue->free.compare_exchange_weak(item, item->next, std::memory_order_release, std::memory_order_relaxed)); } // if nothing, allocate something new if (item == nullptr) { // allocate the item item = new osd_work_item(*queue); if (item == nullptr) return nullptr; } else { item->done = FALSE; // needs to be set this way to prevent data race/usage of uninitialized memory on Linux } // fill in the basics item->next = nullptr; item->callback = callback; item->param = parambase; item->result = nullptr; item->flags = flags; // advance to the next lastitem = item; *item_tailptr = item; item_tailptr = &item->next; parambase = (UINT8 *)parambase + paramstep; } // enqueue the whole thing within the critical section { std::lock_guard<std::mutex> lock(queue->lock); *queue->tailptr = itemlist; queue->tailptr = item_tailptr; } // increment the number of items in the queue queue->items += numitems; add_to_stat(queue->itemsqueued, numitems); // look for free threads to do the work if (queue->livethreads < queue->threads) { int threadnum; // iterate over all the threads for (threadnum = 0; threadnum < queue->threads; threadnum++) { work_thread_info *thread = queue->thread[threadnum]; // if this thread is not active, wake him up if (!thread->active) { thread->wakeevent.set(); add_to_stat(queue->setevents, 1); // for non-shared, the first one we find is good enough if (--numitems == 0) break; } } } // if no threads, run the queue now on this thread if (queue->threads == 0) { end_timing(queue->thread[0]->waittime); worker_thread_process(queue, queue->thread[0]); begin_timing(queue->thread[0]->waittime); } // only return the item if it won't get released automatically return (flags & WORK_ITEM_FLAG_AUTO_RELEASE) ? nullptr : lastitem; }