Example #1
0
/* the generic interface that call the proper underlying implementation */
int _starpu_push_task(starpu_job_t j, unsigned job_is_already_locked)
{
	struct starpu_task *task = j->task;

	task->status = STARPU_TASK_READY;

	/* in case there is no codelet associated to the task (that's a control
	 * task), we directly execute its callback and enforce the
	 * corresponding dependencies */
	if (task->cl == NULL)
	{
		_starpu_handle_job_termination(j, job_is_already_locked);
		return 0;
	}

	if (STARPU_UNLIKELY(task->execute_on_a_specific_worker))
	{
		unsigned workerid = task->workerid;
		struct starpu_worker_s *worker = _starpu_get_worker_struct(workerid);
		
		if (use_prefetch)
		{
			uint32_t memory_node = starpu_worker_get_memory_node(workerid); 
			_starpu_prefetch_task_input_on_node(task, memory_node);
		}

		return _starpu_push_local_task(worker, j);
	}
	else {
		STARPU_ASSERT(policy.push_task);

		return policy.push_task(task);
	}
}
Example #2
0
static void gordon_callback_list_func(void *arg)
{
	struct gordon_task_wrapper_s *task_wrapper = arg; 
	struct starpu_job_list_s *wrapper_list; 

	/* we don't know who will execute that codelet : so we actually defer the
 	 * execution of the StarPU codelet and the job termination later */
	struct starpu_worker_s *worker = task_wrapper->worker;
	STARPU_ASSERT(worker);

	wrapper_list = task_wrapper->list;

	task_wrapper->terminated = 1;

//	_STARPU_DEBUG("gordon callback : push job j %p\n", task_wrapper->j);

	unsigned task_cnt = 0;

	/* XXX 0 was hardcoded */
	while (!starpu_job_list_empty(wrapper_list))
	{
		starpu_job_t j = starpu_job_list_pop_back(wrapper_list);

		struct gordon_ppu_job_s * gordon_task = &task_wrapper->gordon_job[task_cnt];
		struct starpu_perfmodel_t *model = j->task->cl->model;
		if (model && model->benchmarking)
		{
			double measured = (double)gordon_task->measured;
			unsigned cpuid = 0; /* XXX */

			_starpu_update_perfmodel_history(j, STARPU_GORDON_DEFAULT, cpuid, measured);
		}

		_starpu_push_task_output(j->task, 0);
		_starpu_handle_job_termination(j, 0);
		//starpu_wake_all_blocked_workers();

		task_cnt++;
	}

	/* the job list was allocated by the gordon driver itself */
	starpu_job_list_delete(wrapper_list);

	starpu_wake_all_blocked_workers();
	free(task_wrapper->gordon_job);
	free(task_wrapper);
}
Example #3
0
File: cpu.c Project: alucas/StarPU
void *_starpu_cpu_worker(void *arg)
{
   struct starpu_worker_s *cpu_arg = arg;
   unsigned memnode = cpu_arg->memory_node;
   int workerid = cpu_arg->workerid;
   int devid = cpu_arg->devid;

#ifdef STARPU_USE_FXT
   _starpu_fxt_register_thread(cpu_arg->bindid);
#endif
   STARPU_TRACE_WORKER_INIT_START(STARPU_FUT_CPU_KEY, devid, memnode);

   _starpu_bind_thread_on_cpu(cpu_arg->config, cpu_arg->bindid);

   _STARPU_DEBUG("cpu worker %d is ready on logical cpu %d\n", devid, cpu_arg->bindid);

   _starpu_set_local_memory_node_key(&memnode);

   _starpu_set_local_worker_key(cpu_arg);

   snprintf(cpu_arg->name, 32, "CPU %d", devid);

   cpu_arg->status = STATUS_UNKNOWN;

   STARPU_TRACE_WORKER_INIT_END

      /* tell the main thread that we are ready */
      PTHREAD_MUTEX_LOCK(&cpu_arg->mutex);
   cpu_arg->worker_is_initialized = 1;
   PTHREAD_COND_SIGNAL(&cpu_arg->ready_cond);
   PTHREAD_MUTEX_UNLOCK(&cpu_arg->mutex);

   starpu_job_t j;
   int res;

   while (_starpu_machine_is_running())
   {
      STARPU_TRACE_START_PROGRESS(memnode);
      _starpu_datawizard_progress(memnode, 1);
      STARPU_TRACE_END_PROGRESS(memnode);

      _starpu_execute_registered_progression_hooks();

      PTHREAD_MUTEX_LOCK(cpu_arg->sched_mutex);

      /* perhaps there is some local task to be executed first */
      j = _starpu_pop_local_task(cpu_arg);

      /* otherwise ask a task to the scheduler */
      if (!j)
      {
         struct starpu_task *task = _starpu_pop_task();
         if (task)
            j = _starpu_get_job_associated_to_task(task);
      }

      if (j == NULL) 
      {
         if (_starpu_worker_can_block(memnode))
            _starpu_block_worker(workerid, cpu_arg->sched_cond, cpu_arg->sched_mutex);

         PTHREAD_MUTEX_UNLOCK(cpu_arg->sched_mutex);

         continue;
      };

      PTHREAD_MUTEX_UNLOCK(cpu_arg->sched_mutex);

      /* can a cpu perform that task ? */
      if (!STARPU_CPU_MAY_PERFORM(j)) 
      {
         /* put it and the end of the queue ... XXX */
         _starpu_push_task(j, 0);
         continue;
      }

      _starpu_set_current_task(j->task);

      res = execute_job_on_cpu(j, cpu_arg);

      _starpu_set_current_task(NULL);

      if (res) {
         switch (res) {
            case -EAGAIN:
               _starpu_push_task(j, 0);
               continue;
            default: 
               assert(0);
         }
      }

      _starpu_handle_job_termination(j, 0);
   }

   STARPU_TRACE_WORKER_DEINIT_START

      /* In case there remains some memory that was automatically
       * allocated by StarPU, we release it now. Note that data
       * coherency is not maintained anymore at that point ! */
      _starpu_free_all_automatically_allocated_buffers(memnode);

   STARPU_TRACE_WORKER_DEINIT_END(STARPU_FUT_CPU_KEY);

   pthread_exit(NULL);
}
Example #4
0
static void handle_terminated_job(starpu_job_t j)
{
	_starpu_push_task_output(j->task, 0);
	_starpu_handle_job_termination(j, 0);
	starpu_wake_all_blocked_workers();
}
int _starpu_repush_task(struct _starpu_job *j)
{
	struct starpu_task *task = j->task;
	struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(task->sched_ctx);
	unsigned nworkers = 0;
	int ret;

	_STARPU_LOG_IN();

	unsigned can_push = _starpu_increment_nready_tasks_of_sched_ctx(task->sched_ctx, task->flops, task);
	task->status = STARPU_TASK_READY;

#ifdef HAVE_AYUDAME_H
	if (AYU_event)
	{
		intptr_t id = -1;
		AYU_event(AYU_ADDTASKTOQUEUE, j->job_id, &id);
	}
#endif
	/* if the context does not have any workers save the tasks in a temp list */
	if(!sched_ctx->is_initial_sched)
	{
		/*if there are workers in the ctx that are not able to execute tasks
		  we consider the ctx empty */
		nworkers = _starpu_nworkers_able_to_execute_task(task, sched_ctx);

		if(nworkers == 0)
		{
			STARPU_PTHREAD_MUTEX_LOCK(&sched_ctx->empty_ctx_mutex);
			starpu_task_list_push_front(&sched_ctx->empty_ctx_tasks, task);
			STARPU_PTHREAD_MUTEX_UNLOCK(&sched_ctx->empty_ctx_mutex);
#ifdef STARPU_USE_SC_HYPERVISOR
			if(sched_ctx != NULL && sched_ctx->id != 0 && sched_ctx->perf_counters != NULL 
			   && sched_ctx->perf_counters->notify_empty_ctx)
			{
				_STARPU_TRACE_HYPERVISOR_BEGIN();
				sched_ctx->perf_counters->notify_empty_ctx(sched_ctx->id, task);
				_STARPU_TRACE_HYPERVISOR_END();
			}
#endif
			return 0;
		}

	}

	if(!can_push)
		return 0;
	/* in case there is no codelet associated to the task (that's a control
	 * task), we directly execute its callback and enforce the
	 * corresponding dependencies */
	if (task->cl == NULL || task->cl->where == STARPU_NOWHERE)
	{
		if (task->prologue_callback_pop_func)
			task->prologue_callback_pop_func(task->prologue_callback_pop_arg);

		if (task->cl && task->cl->specific_nodes)
		{
			/* Nothing to do, but we are asked to fetch data on some memory nodes */
			_starpu_fetch_nowhere_task_input(j);
		}
		else
		{
			if (task->cl)
				__starpu_push_task_output(j);
			_starpu_handle_job_termination(j);
			_STARPU_LOG_OUT_TAG("handle_job_termination");
		}
		return 0;
	}

	ret = _starpu_push_task_to_workers(task);
	if (ret == -EAGAIN)
		/* pushed to empty context, that's fine */
		ret = 0;
	return ret;
}