示例#1
0
文件: iris-thread.c 项目: crnt/iris
static void
iris_thread_worker_transient (IrisThread  *thread,
                              IrisQueue   *queue)
{
	IrisThreadWork *thread_work = NULL;
	GTimeVal        tv_timeout = {0,0};

	iris_debug (IRIS_DEBUG_THREAD);

	/* The transient mode worker is responsible for helping finish off as
	 * many of the work items as fast as possible.  It is not responsible
	 * for asking for more helpers, just processing work items.  When done
	 * processing work items, it will yield itself back to the scheduler
	 * manager.
	 */

	do {
		g_get_current_time (&tv_timeout);
		g_time_val_add (&tv_timeout, POP_WAIT_TIMEOUT);

		if ((thread_work = iris_queue_timed_pop (queue, &tv_timeout)) != NULL) {
			if (!VERIFY_THREAD_WORK (thread_work))
				continue;
			iris_thread_work_run (thread_work);
			iris_thread_work_free (thread_work);
		}
	} while (thread_work != NULL);

	/* Yield our thread back to the scheduler manager */
	iris_scheduler_manager_yield (thread);
}
示例#2
0
文件: iris-thread.c 项目: antono/iris
static void
iris_thread_worker_transient (IrisThread  *thread,
                              IrisQueue   *queue)
{
	IrisThreadWork *thread_work = NULL;
	GTimeVal        tv_timeout = {0,0};
	gboolean        remove_work;

	iris_debug (IRIS_DEBUG_THREAD);

	/* The transient mode worker is responsible for helping finish off as
	 * many of the work items as fast as possible.  It is not responsible
	 * for asking for more helpers, just processing work items.  When done
	 * processing work items, it will yield itself back to the scheduler
	 * manager.
	 */

	do {
		g_get_current_time (&tv_timeout);
		g_time_val_add (&tv_timeout, POP_WAIT_TIMEOUT);

		thread_work = iris_queue_timed_pop_or_close (queue, &tv_timeout);
		if (thread_work != NULL) {
			if (!g_atomic_int_compare_and_exchange(&thread_work->taken, FALSE, TRUE)) {
				remove_work = g_atomic_int_get (&thread_work->remove);

				if (!remove_work)
					continue;
			} else
				remove_work = g_atomic_int_get (&thread_work->remove);

			if (!remove_work)
				iris_thread_work_run (thread_work);

			iris_thread_work_free (thread_work);
		}
	} while (thread_work != NULL);

	/* Remove the thread from the scheduler (if it's not already removed us due
	 * to being in finalization), and yield our thread back to the scheduler manager */
	if (g_atomic_int_get (&thread->scheduler->in_finalize) == FALSE)
		iris_scheduler_remove_thread (thread->scheduler, thread);
	g_atomic_pointer_set (&thread->scheduler, NULL);

	iris_scheduler_manager_yield (thread);
}
示例#3
0
static gboolean
iris_scheduler_foreach_rrobin_cb (IrisRRobin *rrobin,
                                  gpointer    data,
                                  gpointer    user_data)
{
	IrisQueue                    *queue   = data;
	IrisSchedulerForeachClosure  *closure = user_data;
	gboolean continue_flag = TRUE;
	gint i;

	/* Foreach the queue in a really hacky way. FIXME: be neater!
	 * And make sure the order of work is preserved!!!
	 * In particular, avoid calling queue_length() each time! */
	for (i=0; i<iris_queue_get_length(queue); i++) {
		IrisThreadWork *thread_work = iris_queue_try_pop (queue);
		/* By removing the work from the queue, we know now it can't be executed */

		if (!thread_work)
			break;

		continue_flag = closure->callback (closure->scheduler,
		                                   thread_work,
		                                   thread_work->callback,
		                                   thread_work->data,
		                                   closure->user_data);

		if (g_atomic_int_get (&thread_work->remove) == FALSE)
			iris_queue_push (queue, thread_work);
		else
			iris_thread_work_free (thread_work);

		if (!continue_flag)
			break;
	}

	return continue_flag;
}
示例#4
0
文件: iris-thread.c 项目: crnt/iris
static void
iris_thread_worker_exclusive (IrisThread  *thread,
                              IrisQueue   *queue,
                              gboolean     leader)
{
	GTimeVal        tv_now      = {0,0};
	GTimeVal        tv_req      = {0,0};
	IrisThreadWork *thread_work = NULL;
	gint            per_quanta = 0;      /* Completed items within the
	                                      * last quanta. */
	guint           queued      = 0;     /* Items left in the queue at */
	gboolean        has_resized = FALSE;

	iris_debug (IRIS_DEBUG_THREAD);

	g_get_current_time (&tv_now);
	g_get_current_time (&tv_req);
	queued = iris_queue_length (queue);

	/* Since our thread is in exclusive mode, we are responsible for
	 * asking the scheduler manager to add or remove threads based
	 * on the demand of our work queue.
	 *
	 * If the scheduler has maxed out the number of threads it is
	 * allowed, then we will not ask the scheduler to add more
	 * threads and rebalance.
	 */

get_next_item:

	if (G_LIKELY ((thread_work = iris_queue_pop (queue)) != NULL)) {
		if (!VERIFY_THREAD_WORK (thread_work))
			goto get_next_item;

		iris_thread_work_run (thread_work);
		iris_thread_work_free (thread_work);
		per_quanta++;
	}
	else {
#if 0
		g_warning ("Exclusive thread is done managing, received NULL");
#endif
		return;
	}

	if (G_UNLIKELY (!thread->scheduler->maxed && leader)) {
		g_get_current_time (&tv_now);

		if (G_UNLIKELY (timeout_elapsed (&tv_now, &tv_req))) {
			/* We check to see if we have a bunch more work to do
			 * or a potential edge case where we are processing about
			 * the same speed as the pusher, but it creates enough
			 * contention where we dont speed up. This is because
			 * some schedulers will round-robin or steal.  And unless
			 * we look to add another thread even though we have nothing
			 * in the queue, we know there are more coming.
			 */
			queued = iris_queue_length (queue);
			if (queued == 0 && !has_resized) {
				queued = per_quanta * 2;
				has_resized = TRUE;
			}

			if (per_quanta < queued) {
				/* make sure we are not maxed before asking */
				if (!g_atomic_int_get (&thread->scheduler->maxed))
					iris_scheduler_manager_request (thread->scheduler,
									per_quanta,
									queued);
			}

			per_quanta = 0;
			tv_req = tv_now;
			g_time_val_add (&tv_req, QUANTUM_USECS);
		}
	}

	goto get_next_item;
}
示例#5
0
文件: iris-thread.c 项目: antono/iris
static void
iris_thread_worker_exclusive (IrisThread  *thread,
                              IrisQueue   *queue,
                              gboolean     leader)
{
	GTimeVal        tv_now      = {0,0};
	GTimeVal        tv_req      = {0,0};
	IrisThreadWork *thread_work = NULL;
	gint            per_quanta = 0;      /* Completed items within the
	                                      * last quanta. */
	guint           queued      = 0;     /* Items left in the queue at */
	gboolean        has_resized = FALSE;
	gboolean        remove_work;

	iris_debug (IRIS_DEBUG_THREAD);

	g_get_current_time (&tv_now);
	g_get_current_time (&tv_req);
	queued = iris_queue_get_length (queue);

	/* Since our thread is in exclusive mode, we are responsible for
	 * asking the scheduler manager to add or remove threads based
	 * on the demand of our work queue.
	 *
	 * If the scheduler has maxed out the number of threads it is
	 * allowed, then we will not ask the scheduler to add more
	 * threads and rebalance.
	 */

get_next_item:

	if (G_LIKELY ((thread_work = iris_queue_pop (queue)) != NULL)) {
		if (!g_atomic_int_compare_and_exchange(&thread_work->taken, FALSE, TRUE)) {
			remove_work = g_atomic_int_get (&thread_work->remove);

			if (!remove_work)
				/* We lost a race with another thread (remember a lockfree
				 * queue may pop the same item twice). 
				 */
				goto get_next_item;
			/* else: We lost a race with iris_scheduler_unqueue() */
		} else
			/* We won the race. 'remove' is honoured anyway if we can. */
			remove_work = g_atomic_int_get (&thread_work->remove);

		if (!remove_work) {
			iris_thread_work_run (thread_work);
			per_quanta++;
		}

		iris_thread_work_free (thread_work);
	}
	else {
		/* Queue is closed, so scheduler is finalizing. The scheduler will be
		 * waiting until we set thread->scheduler to NULL.
		 */
		g_atomic_pointer_set (&thread->scheduler, NULL);
		iris_scheduler_manager_yield (thread);
		return;
	}

	if (remove_work)
		goto get_next_item;

	if (G_UNLIKELY (!thread->scheduler->maxed && leader)) {
		g_get_current_time (&tv_now);

		if (G_UNLIKELY (timeout_elapsed (&tv_now, &tv_req))) {
			/* We check to see if we have a bunch more work to do
			 * or a potential edge case where we are processing about
			 * the same speed as the pusher, but it creates enough
			 * contention where we dont speed up. This is because
			 * some schedulers will round-robin or steal.  And unless
			 * we look to add another thread even though we have nothing
			 * in the queue, we know there are more coming.
			 */
			queued = iris_queue_get_length (queue);
			if (queued == 0 && !has_resized) {
				queued = per_quanta * 2;
				has_resized = TRUE;
			}

			if (per_quanta < queued) {
				/* make sure we are not maxed before asking */
				if (!g_atomic_int_get (&thread->scheduler->maxed))
					iris_scheduler_manager_request (thread->scheduler,
									per_quanta,
									queued);
			}

			per_quanta = 0;
			tv_req = tv_now;
			g_time_val_add (&tv_req, QUANTUM_USECS);
		}
	}

	goto get_next_item;
}