Exemplo n.º 1
0
static void *worker_thread_entry(void *param)
{
	work_thread_info *thread = (work_thread_info *)param;
	osd_work_queue *queue = thread->queue;

	// loop until we exit
	for ( ;; )
	{
		// block waiting for work or exit
		// bail on exit, and only wait if there are no pending items in queue
		if (!queue->exiting && queue->list == NULL)
		{
			begin_timing(thread->waittime);
			osd_event_wait(thread->wakeevent, INFINITE);
			end_timing(thread->waittime);
		}
		if (queue->exiting)
			break;

		// indicate that we are live
		atomic_exchange32(&thread->active, TRUE);
		atomic_increment32(&queue->livethreads);

		// process work items
		for ( ;; )
		{
			osd_ticks_t stopspin;

			// process as much as we can
			worker_thread_process(queue, thread);

			// if we're a high frequency queue, spin for a while before giving up
			if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL)
			{
				// spin for a while looking for more work
				begin_timing(thread->spintime);
				stopspin = osd_ticks() + SPIN_LOOP_TIME;
				
				do {
					int spin = 10000;
					while (--spin && queue->list == NULL)
						osd_yield_processor();
				} while (queue->list == NULL && osd_ticks() < stopspin);
				end_timing(thread->spintime);
			}

			// if nothing more, release the processor
			if (queue->list == NULL)
				break;
			add_to_stat(&queue->spinloops, 1);
		}

		// decrement the live thread count
		atomic_exchange32(&thread->active, FALSE);
		atomic_decrement32(&queue->livethreads);
	}
	return NULL;
}
Exemplo n.º 2
0
static unsigned __stdcall worker_thread_entry(void *param)
{
	work_thread_info *thread = param;
	osd_work_queue *queue = thread->queue;

	// loop until we exit
	for ( ;; )
	{
		// block waiting for work or exit
		DWORD result = WAIT_OBJECT_0;

		// bail on exit, and only wait if there are no pending items in queue
		if (!queue->exiting && queue->list == NULL)
		{
			begin_timing(thread->waittime);
			result = WaitForSingleObject(thread->wakeevent, INFINITE);
			end_timing(thread->waittime);
		}
		if (queue->exiting)
			break;

		// indicate that we are live
		interlocked_exchange32(&thread->active, TRUE);
		interlocked_increment(&queue->livethreads);

		// process work items
		for ( ;; )
		{
			osd_ticks_t stopspin;

			// process as much as we can
			worker_thread_process(queue, thread);

			// if we're a high frequency queue, spin for a while before giving up
			if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ)
			{
				// spin for a while looking for more work
				begin_timing(thread->spintime);
				stopspin = osd_ticks() + SPIN_LOOP_TIME;
				while (queue->list == NULL && osd_ticks() < stopspin)
					YieldProcessor();
				end_timing(thread->spintime);
			}

			// if nothing more, release the processor
			if (queue->list == NULL)
				break;
			add_to_stat(&queue->spinloops, 1);
		}

		// decrement the live thread count
		interlocked_exchange32(&thread->active, FALSE);
		interlocked_decrement(&queue->livethreads);
	}
	return 0;
}
Exemplo n.º 3
0
static void *worker_thread_entry(void *param)
{
	work_thread_info *thread = (work_thread_info *)param;
	osd_work_queue &queue = thread->queue;

	// loop until we exit
	for ( ;; )
	{
		// block waiting for work or exit
		// bail on exit, and only wait if there are no pending items in queue
		if (queue.exiting)
			break;

		if (!queue_has_list_items(&queue))
		{
			begin_timing(thread->waittime);
			thread->wakeevent.wait( OSD_EVENT_WAIT_INFINITE);
			end_timing(thread->waittime);
		}

		if (queue.exiting)
			break;

		// indicate that we are live
		thread->active = TRUE;
		++queue.livethreads;

		// process work items
		for ( ;; )
		{
			// process as much as we can
			worker_thread_process(&queue, thread);

			// if we're a high frequency queue, spin for a while before giving up
			if (queue.flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue.list.load() == nullptr)
			{
				// spin for a while looking for more work
				begin_timing(thread->spintime);
				spin_while<std::atomic<osd_work_item *>, osd_work_item *>(&queue.list, (osd_work_item *)nullptr, SPIN_LOOP_TIME);
				end_timing(thread->spintime);
			}

			// if nothing more, release the processor
			if (!queue_has_list_items(&queue))
				break;
			add_to_stat(queue.spinloops, 1);
		}

		// decrement the live thread count
		thread->active = FALSE;
		--queue.livethreads;
	}

	return nullptr;
}
Exemplo n.º 4
0
int osd_work_queue_wait(osd_work_queue *queue, osd_ticks_t timeout)
{
	// if no threads, no waiting
	if (queue->threads == 0)
		return TRUE;

	// if no items, we're done
	if (queue->items == 0)
		return TRUE;

	// if this is a multi queue, help out rather than doing nothing
	if (queue->flags & WORK_QUEUE_FLAG_MULTI)
	{
		work_thread_info *thread = &queue->thread[queue->threads];

		end_timing(thread->waittime);

		// process what we can as a worker thread
		worker_thread_process(queue, thread);

		// if we're a high frequency queue, spin until done
		if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->items != 0)
		{
			osd_ticks_t stopspin = osd_ticks() + timeout;

			// spin until we're done
			begin_timing(thread->spintime);

			do {
				int spin = 10000;
				while (--spin && queue->items != 0)
					osd_yield_processor();
			} while (queue->items != 0 && osd_ticks() < stopspin);
			end_timing(thread->spintime);

			begin_timing(thread->waittime);
			return (queue->items == 0);
		}
		begin_timing(thread->waittime);
	}

	// reset our done event and double-check the items before waiting
	osd_event_reset(queue->doneevent);
	atomic_exchange32(&queue->waiting, TRUE);
	if (queue->items != 0)
		osd_event_wait(queue->doneevent, timeout);
	atomic_exchange32(&queue->waiting, FALSE);

	// return TRUE if we actually hit 0
	return (queue->items == 0);
}
Exemplo n.º 5
0
int osd_work_queue_wait(osd_work_queue *queue, osd_ticks_t timeout)
{
	// if no threads, no waiting
	if (queue->threads == 0)
		return TRUE;

	// if no items, we're done
	if (queue->items == 0)
		return TRUE;

	// if this is a multi queue, help out rather than doing nothing
	if (queue->flags & WORK_QUEUE_FLAG_MULTI)
	{
		work_thread_info *thread = &queue->thread[queue->threads];
		osd_ticks_t stopspin = osd_ticks() + timeout;

		end_timing(thread->waittime);

		// process what we can as a worker thread
		worker_thread_process(queue, thread);

		// if we're a high frequency queue, spin until done
		if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ)
		{
			// spin until we're done
			begin_timing(thread->spintime);
			while (queue->items != 0 && osd_ticks() < stopspin)
				YieldProcessor();
			end_timing(thread->spintime);

			begin_timing(thread->waittime);
			return (queue->items == 0);
		}
		begin_timing(thread->waittime);
	}

	// reset our done event and double-check the items before waiting
	ResetEvent(queue->doneevent);
	interlocked_exchange32(&queue->waiting, TRUE);
	if (queue->items != 0)
		WaitForSingleObject(queue->doneevent, timeout * 1000 / osd_ticks_per_second());
	interlocked_exchange32(&queue->waiting, FALSE);

	// return TRUE if we actually hit 0
	return (queue->items == 0);
}
Exemplo n.º 6
0
int osd_work_queue_wait(osd_work_queue *queue, osd_ticks_t timeout)
{
	// if no threads, no waiting
	if (queue->threads == 0)
		return TRUE;

	// if no items, we're done
	if (queue->items == 0)
		return TRUE;

	// if this is a multi queue, help out rather than doing nothing
	if (queue->flags & WORK_QUEUE_FLAG_MULTI)
	{
		work_thread_info *thread = queue->thread[queue->threads];

		end_timing(thread->waittime);

		// process what we can as a worker thread
		worker_thread_process(queue, thread);

		// if we're a high frequency queue, spin until done
		if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->items != 0)
		{
			// spin until we're done
			begin_timing(thread->spintime);
			spin_while_not<std::atomic<int>,int>(&queue->items, 0, timeout);
			end_timing(thread->spintime);

			begin_timing(thread->waittime);
			return (queue->items == 0);
		}
		begin_timing(thread->waittime);
	}

	// reset our done event and double-check the items before waiting
	queue->doneevent.reset();
	queue->waiting = TRUE;
	if (queue->items != 0)
		queue->doneevent.wait(timeout);
	queue->waiting = FALSE;

	// return TRUE if we actually hit 0
	return (queue->items == 0);
}
Exemplo n.º 7
0
static void *worker_thread_entry(void *param)
{
	work_thread_info *thread = (work_thread_info *)param;
	osd_work_queue *queue = thread->queue;

#if defined(SDLMAME_MACOSX)
	void *arp = NewAutoreleasePool();
#endif

	// loop until we exit
	for ( ;; )
	{
		// block waiting for work or exit
		// bail on exit, and only wait if there are no pending items in queue
		if (queue->exiting)
			break;

		if (!queue_has_list_items(queue))
		{
			begin_timing(thread->waittime);
			osd_event_wait(thread->wakeevent, OSD_EVENT_WAIT_INFINITE);
			end_timing(thread->waittime);
		}

		if (queue->exiting)
			break;

		// indicate that we are live
		atomic_exchange32(&thread->active, TRUE);
		atomic_increment32(&queue->livethreads);

		// process work items
		for ( ;; )
		{
			// process as much as we can
			worker_thread_process(queue, thread);

			// if we're a high frequency queue, spin for a while before giving up
			if (queue->flags & WORK_QUEUE_FLAG_HIGH_FREQ && queue->list == NULL)
			{
				// spin for a while looking for more work
				begin_timing(thread->spintime);
				spin_while(&queue->list, (osd_work_item *)NULL, SPIN_LOOP_TIME);
				end_timing(thread->spintime);
			}

			// if nothing more, release the processor
			if (!queue_has_list_items(queue))
				break;
			add_to_stat(&queue->spinloops, 1);
		}

		// decrement the live thread count
		atomic_exchange32(&thread->active, FALSE);
		atomic_decrement32(&queue->livethreads);
	}

#if defined(SDLMAME_MACOSX)
	ReleaseAutoreleasePool(arp);
#endif

	return NULL;
}
Exemplo n.º 8
0
osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_callback callback, INT32 numitems, void *parambase, INT32 paramstep, UINT32 flags)
{
	osd_work_item *itemlist = NULL, *lastitem = NULL;
	osd_work_item **item_tailptr = &itemlist;
	INT32 lockslot;
	int itemnum;

	// loop over items, building up a local list of work
	for (itemnum = 0; itemnum < numitems; itemnum++)
	{
		osd_work_item *item;

		// first allocate a new work item; try the free list first
		INT32 lockslot = osd_scalable_lock_acquire(queue->lock);
		do
		{
			item = (osd_work_item *)queue->free;
		} while (item != NULL && compare_exchange_ptr((PVOID volatile *)&queue->free, item, item->next) != item);
		osd_scalable_lock_release(queue->lock, lockslot);

		// if nothing, allocate something new
		if (item == NULL)
		{
			// allocate the item
			item = (osd_work_item *)osd_malloc(sizeof(*item));
			if (item == NULL)
				return NULL;
			item->event = NULL;
			item->queue = queue;
			item->done = FALSE;
		}
		else
		{
			atomic_exchange32(&item->done, FALSE); // needs to be set this way to prevent data race/usage of uninitialized memory on Linux
		}

		// fill in the basics
		item->next = NULL;
		item->callback = callback;
		item->param = parambase;
		item->result = NULL;
		item->flags = flags;

		// advance to the next
		lastitem = item;
		*item_tailptr = item;
		item_tailptr = &item->next;
		parambase = (UINT8 *)parambase + paramstep;
	}

	// enqueue the whole thing within the critical section
	lockslot = osd_scalable_lock_acquire(queue->lock);
	*queue->tailptr = itemlist;
	queue->tailptr = item_tailptr;
	osd_scalable_lock_release(queue->lock, lockslot);

	// increment the number of items in the queue
	atomic_add32(&queue->items, numitems);
	add_to_stat(&queue->itemsqueued, numitems);

	// look for free threads to do the work
	if (queue->livethreads < queue->threads)
	{
		int threadnum;

		// iterate over all the threads
		for (threadnum = 0; threadnum < queue->threads; threadnum++)
		{
			work_thread_info *thread = &queue->thread[threadnum];

			// if this thread is not active, wake him up
			if (!thread->active)
			{
				osd_event_set(thread->wakeevent);
				add_to_stat(&queue->setevents, 1);

				// for non-shared, the first one we find is good enough
				if (--numitems == 0)
					break;
			}
		}
	}

	// if no threads, run the queue now on this thread
	if (queue->threads == 0)
	{
		end_timing(queue->thread[0].waittime);
		worker_thread_process(queue, &queue->thread[0]);
		begin_timing(queue->thread[0].waittime);
	}
	// only return the item if it won't get released automatically
	return (flags & WORK_ITEM_FLAG_AUTO_RELEASE) ? NULL : lastitem;
}
Exemplo n.º 9
0
osd_work_item *osd_work_item_queue_multiple(osd_work_queue *queue, osd_work_callback callback, INT32 numitems, void *parambase, INT32 paramstep, UINT32 flags)
{
	osd_work_item *itemlist = nullptr, *lastitem = nullptr;
	osd_work_item **item_tailptr = &itemlist;
	int itemnum;

	// loop over items, building up a local list of work
	for (itemnum = 0; itemnum < numitems; itemnum++)
	{
		osd_work_item *item;

		// first allocate a new work item; try the free list first
		{
			std::lock_guard<std::mutex> lock(queue->lock);
			do
			{
				item = (osd_work_item *)queue->free;
			} while (item != nullptr && !queue->free.compare_exchange_weak(item, item->next, std::memory_order_release, std::memory_order_relaxed));
		}

		// if nothing, allocate something new
		if (item == nullptr)
		{
			// allocate the item
			item = new osd_work_item(*queue);
			if (item == nullptr)
				return nullptr;
		}
		else
		{
			item->done = FALSE; // needs to be set this way to prevent data race/usage of uninitialized memory on Linux
		}

		// fill in the basics
		item->next = nullptr;
		item->callback = callback;
		item->param = parambase;
		item->result = nullptr;
		item->flags = flags;

		// advance to the next
		lastitem = item;
		*item_tailptr = item;
		item_tailptr = &item->next;
		parambase = (UINT8 *)parambase + paramstep;
	}

	// enqueue the whole thing within the critical section
	{
		std::lock_guard<std::mutex> lock(queue->lock);
		*queue->tailptr = itemlist;
		queue->tailptr = item_tailptr;
	}

	// increment the number of items in the queue
	queue->items += numitems;
	add_to_stat(queue->itemsqueued, numitems);

	// look for free threads to do the work
	if (queue->livethreads < queue->threads)
	{
		int threadnum;

		// iterate over all the threads
		for (threadnum = 0; threadnum < queue->threads; threadnum++)
		{
			work_thread_info *thread = queue->thread[threadnum];

			// if this thread is not active, wake him up
			if (!thread->active)
			{
				thread->wakeevent.set();
				add_to_stat(queue->setevents, 1);

				// for non-shared, the first one we find is good enough
				if (--numitems == 0)
					break;
			}
		}
	}

	// if no threads, run the queue now on this thread
	if (queue->threads == 0)
	{
		end_timing(queue->thread[0]->waittime);
		worker_thread_process(queue, queue->thread[0]);
		begin_timing(queue->thread[0]->waittime);
	}
	// only return the item if it won't get released automatically
	return (flags & WORK_ITEM_FLAG_AUTO_RELEASE) ? nullptr : lastitem;
}