static
int ws_push_task(struct starpu_task *task)
{
	unsigned sched_ctx_id = task->sched_ctx;
	struct _starpu_work_stealing_data *ws = (struct _starpu_work_stealing_data*)starpu_sched_ctx_get_policy_data(sched_ctx_id);

	struct _starpu_deque_jobq *deque_queue;
	struct _starpu_job *j = _starpu_get_job_associated_to_task(task);
	int workerid = starpu_worker_get_id();

	unsigned worker = 0;
	struct starpu_worker_collection *workers = starpu_sched_ctx_get_worker_collection(sched_ctx_id);
	struct starpu_sched_ctx_iterator it;
	
	workers->init_iterator(workers, &it);
	/* !! C'est ballot de tout locker! */
	while(workers->has_next(workers, &it))
	{
		worker = workers->get_next(workers, &it);
		starpu_pthread_mutex_t *sched_mutex;
		starpu_pthread_cond_t *sched_cond;
		starpu_worker_get_sched_condition(worker, &sched_mutex, &sched_cond);
		STARPU_PTHREAD_MUTEX_LOCK(sched_mutex);
	}
	
	
	/* If the current thread is not a worker but
	 * the main thread (-1), we find the better one to
	 * put task on its queue */
	if (workerid == -1)
		workerid = select_worker(sched_ctx_id);

	deque_queue = ws->queue_array[workerid];

#ifdef HAVE_AYUDAME_H
	if (AYU_event)
	{
		intptr_t id = workerid;
		AYU_event(AYU_ADDTASKTOQUEUE, j->job_id, &id);
	}
#endif
	_starpu_job_list_push_back(&deque_queue->jobq, j);
	deque_queue->njobs++;
	starpu_push_task_end(task);

	while(workers->has_next(workers, &it))
	{
		worker = workers->get_next(workers, &it);
		starpu_pthread_mutex_t *sched_mutex;
		starpu_pthread_cond_t *sched_cond;
		starpu_worker_get_sched_condition(worker, &sched_mutex, &sched_cond);
#ifndef STARPU_NON_BLOCKING_DRIVERS
		STARPU_PTHREAD_COND_SIGNAL(sched_cond);
#endif
		STARPU_PTHREAD_MUTEX_UNLOCK(sched_mutex);
	}
		
	return 0;
}
Example #2
0
void starpu_tag_remove(starpu_tag_t id)
{
	struct _starpu_tag_table *entry;

#ifdef HAVE_AYUDAME_H
	if (AYU_event)
		AYU_event(AYU_REMOVETASK, id + AYUDAME_OFFSET, NULL);
#endif

	STARPU_PTHREAD_RWLOCK_WRLOCK(&tag_global_rwlock);

	HASH_FIND_UINT64_T(tag_htbl, &id, entry);
	if (entry) HASH_DEL(tag_htbl, entry);

	STARPU_PTHREAD_RWLOCK_UNLOCK(&tag_global_rwlock);

	if (entry)
	{
		_starpu_tag_free(entry->tag);
		free(entry);
	}
}
Example #3
0
/* task depends on the tasks in task array */
void _starpu_task_declare_deps_array(struct starpu_task *task, unsigned ndeps, struct starpu_task *task_array[], int check)
{
	if (ndeps == 0)
		return;

	struct _starpu_job *job;

	job = _starpu_get_job_associated_to_task(task);

	STARPU_PTHREAD_MUTEX_LOCK(&job->sync_mutex);
	if (check)
		STARPU_ASSERT_MSG(
				!job->submitted || !task->destroy || task->detach
#ifdef STARPU_OPENMP
				|| job->continuation
#endif
				, "Task dependencies have to be set before submission (submitted %u destroy %d detach %d)", job->submitted, task->destroy, task->detach);
	else
		STARPU_ASSERT_MSG(job->terminated <= 1, "Task dependencies have to be set before termination (terminated %u)", job->terminated);

	struct _starpu_cg *cg = create_cg_task(ndeps, job);
	STARPU_PTHREAD_MUTEX_UNLOCK(&job->sync_mutex);

	unsigned i;
	for (i = 0; i < ndeps; i++)
	{
		struct starpu_task *dep_task = task_array[i];

		struct _starpu_job *dep_job;
		struct _starpu_cg *back_cg = NULL;

		dep_job = _starpu_get_job_associated_to_task(dep_task);

		STARPU_ASSERT_MSG(dep_job != job, "A task must not depend on itself.");
		STARPU_PTHREAD_MUTEX_LOCK(&dep_job->sync_mutex);
		if (check)
		{
			STARPU_ASSERT_MSG(!dep_job->submitted || !dep_job->task->destroy || dep_job->task->detach, "Unless it is not to be destroyed automatically, a task dependencies have to be set before submission");
			STARPU_ASSERT_MSG(dep_job->submitted != 2, "For resubmited tasks, dependencies have to be set before first re-submission");
			STARPU_ASSERT_MSG(!dep_job->submitted || !dep_job->task->regenerate, "For regenerated tasks, dependencies have to be set before first submission");
		}
		else
			STARPU_ASSERT_MSG(dep_job->terminated <= 1, "Task dependencies have to be set before termination (terminated %u)", dep_job->terminated);
		if (dep_job->task->regenerate)
		{
			/* Make sure we don't regenerate the dependency before this task is finished */
			back_cg = create_cg_task(1, dep_job);
			/* Just do not take that dependency into account for the first submission */
			dep_job->job_successors.ndeps_completed++;
		}
		STARPU_PTHREAD_MUTEX_UNLOCK(&dep_job->sync_mutex);

		_STARPU_TRACE_TASK_DEPS(dep_job, job);
		_starpu_bound_task_dep(job, dep_job);
#ifdef HAVE_AYUDAME_H
		if (AYU_event && check)
		{
			uintptr_t AYU_data[3] = {dep_job->job_id, 0, 0};
			AYU_event(AYU_ADDDEPENDENCY, job->job_id, AYU_data);
		}
#endif

		_starpu_task_add_succ(dep_job, cg);
		if (dep_job->task->regenerate)
			_starpu_task_add_succ(job, back_cg);
	}
}
int _starpu_repush_task(struct _starpu_job *j)
{
	struct starpu_task *task = j->task;
	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
	unsigned nworkers = 0;
	int ret;

	_STARPU_LOG_IN();

	unsigned can_push = _starpu_increment_nready_tasks_of_sched_ctx(task->sched_ctx, task->flops, task);
	task->status = STARPU_TASK_READY;

#ifdef HAVE_AYUDAME_H
	if (AYU_event)
	{
		intptr_t id = -1;
		AYU_event(AYU_ADDTASKTOQUEUE, j->job_id, &id);
	}
#endif
	/* if the context does not have any workers save the tasks in a temp list */
	if(!sched_ctx->is_initial_sched)
	{
		/*if there are workers in the ctx that are not able to execute tasks
		  we consider the ctx empty */
		nworkers = _starpu_nworkers_able_to_execute_task(task, sched_ctx);

		if(nworkers == 0)
		{
			STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
			starpu_task_list_push_front(&sched_ctx->empty_ctx_tasks, task);
			STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
#ifdef STARPU_USE_SC_HYPERVISOR
			if(sched_ctx != NULL && sched_ctx->id != 0 && sched_ctx->perf_counters != NULL 
			   && sched_ctx->perf_counters->notify_empty_ctx)
			{
				_STARPU_TRACE_HYPERVISOR_BEGIN();
				sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
				_STARPU_TRACE_HYPERVISOR_END();
			}
#endif
			return 0;
		}

	}

	if(!can_push)
		return 0;
	/* in case there is no codelet associated to the task (that's a control
	 * task), we directly execute its callback and enforce the
	 * corresponding dependencies */
	if (task->cl == NULL || task->cl->where == STARPU_NOWHERE)
	{
		if (task->prologue_callback_pop_func)
			task->prologue_callback_pop_func(task->prologue_callback_pop_arg);

		if (task->cl && task->cl->specific_nodes)
		{
			/* Nothing to do, but we are asked to fetch data on some memory nodes */
			_starpu_fetch_nowhere_task_input(j);
		}
		else
		{
			if (task->cl)
				__starpu_push_task_output(j);
			_starpu_handle_job_termination(j);
			_STARPU_LOG_OUT_TAG("handle_job_termination");
		}
		return 0;
	}

	ret = _starpu_push_task_to_workers(task);
	if (ret == -EAGAIN)
		/* pushed to empty context, that's fine */
		ret = 0;
	return ret;
}