Exemple #1
0
void schedule(unsigned int *stack){
	if(active == TRUE){
	  task_t* cur_task = dequeue_task();
	  if(cur_task != NULL){
	    cur_pid = cur_task->pid;
	    dbg_bochs_print("@@@@@@@");
	    dbg_bochs_print(cur_task->name);
	    if(cur_task->status!=NEW){
	      cur_task->esp=*stack;
	    } else {
	      cur_task->status=READY;	      
	      ((task_register_t *)(cur_task->esp))->eip = cur_task->eip;	      	      
	    }
	    enqueue_task(cur_task->pid, cur_task);
	    cur_task=get_task();
	    if(cur_task->status==NEW){
	      cur_task->status=READY;
	    }
	    dbg_bochs_print(" -- ");
	    dbg_bochs_print(cur_task->name);
	    dbg_bochs_print("\n");      
	    //load_pdbr(cur_task->pdir);
	    *stack = cur_task->esp;
	  } else {
	    enqueue_task(cur_task->pid, cur_task);
	  }
	}
	active = FALSE;
	return;
}
Exemple #2
0
/**
 * Create a new task 
 * @author Ivan Gualandri
 * @version 1.0
 * @param task_name The name of the task
 * @param start_function the entry point of the task.
 */
pid_t new_task(char *task_name, void (*start_function)()){
	asm("cli");	
	task_t *new_task;
	table_address_t local_table;
	unsigned int new_pid = request_pid();	
	new_task = (task_t*)kmalloc(sizeof(task_t)); 	
	strcpy(new_task->name, task_name);
	new_task->next = NULL;
	new_task->start_function = start_function;
	new_task->cur_quants=0;
	new_task->pid = new_pid;
	new_task->eip = (unsigned int)start_function;
	new_task->esp = (unsigned int)kmalloc(STACK_SIZE) + STACK_SIZE-100;
	new_task->tty = tty_get_current();
	dbg_bochs_print(new_task->esp);
	new_task->status = NEW;
	new_task->registers = (task_register_t*)new_task->esp;
	new_tss(new_task->registers, start_function);
	local_table = map_kernel();
	new_task->pdir = local_table.page_dir;
	new_task->ptable = local_table.page_table;
	//new_task->pdir = 0;
	//new_task->ptable = 0;
	enqueue_task(new_task->pid, new_task);
	//(task_list.current)->cur_quants = MAX_TICKS;			
	asm("sti");
	return new_pid;
}
Exemple #3
0
void pool_add_task(task_func func) {
  struct task *task;

  task = (struct task*)malloc(sizeof(struct task));
  task->func = func;

  enqueue_task(task);
}
Exemple #4
0
void scheduler_tick(int user_ticks, int sys_ticks)
{
	//...
	if (p->array != rq->active) {
		set_tsk_need_resched(p);
		goto out;
	}
	//...
	if (!--p->time_slice) {
		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
			enqueue_task(p, rq->expired);
		} else
			enqueue_task(p, rq->active);
	} else {
		/* Prevent a too long timeslice allowing a task to monopolize
		 * the CPU. We do this by splitting up the timeslice into smaller pieces.
		 * */
	}
}
Exemple #5
0
void enqueue_ray_task(long qid, Element *e, long mode, long process_id)
{
    Task *t ;

    /* Create task object */
    t = get_task(process_id) ;
    t->task_type = TASK_RAY ;
    t->task.ray.e     = e ;

    /* Put in the queue */
    enqueue_task( qid, t, mode ) ;
}
Exemple #6
0
void schedule() {
  struct task_struct *next_task = NULL;
  struct task_struct *prev_task = current_task;

  register unsigned long sp asm ("sp");
  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, sp = %x\n", __func__, sp);

  need_reschedule = false;

  if (NULL == current_task) while(1);

  if (PROCESS_STATE_DEAD == current_task->sched_en.state) {
	int waiting_pid = current_task->sched_en.blocked_pid;
	if (-1 != waiting_pid) {
	  struct task_struct *waiting_task = find_task_by_pid(waiting_pid);
	  if (waiting_task) {
		dequeue_task(waiting_task);
		waiting_task->sched_en.state = PROCESS_STATE_READY;
		waiting_task->sched_en.blocking_pid = -1;
		enqueue_task(waiting_task, sched_enqueue_flag_timeout);
	  }
	}
	destroy_user_thread(current_task);
	current_task = NULL;
  } else
	scheduler->enqueue_task(current_task, sched_enqueue_flag_timeout);

  next_task = scheduler->pick_next_task();

  if (current_task)
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task->pid = %d\n", __func__, current_task->pid);
  if (next_task)
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, next_task->pid = %d\n", __func__, next_task->pid);

  if (current_task == next_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task == next_task\n", __func__);
	return;
  } else if (NULL == next_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, NULL == next_task\n", __func__);
	return;
  } else if (NULL == current_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, NULL == current_task\n", __func__);
	current_task = next_task;
  } else {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task != next_task\n", __func__);
	current_task = next_task;
  }

  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, context_switch %d <--> %d start\n", __func__, prev_task->pid, next_task->pid);  
  context_switch(prev_task, next_task);
  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, context_switch %d <--> %d finish\n", __func__, prev_task->pid, next_task->pid);
}
	int ThreadScheduler::Wait(lua_State* L, long millis){
		long curTime = currentTimeMillis();

		Task tsk = Task();
		tsk.origin = L;
		tsk.at = curTime + millis;
		tsk.start = curTime;
		tsk.ref = -1;

		enqueue_task(tsk);

		return lua_yield(L, 2);
	}
Exemple #8
0
void enqueue_radavg_task(long qid, Element *e, long mode, long process_id)
{
    Task *t ;

    /* Create task object */
    t = get_task(process_id) ;
    t->task_type = TASK_RAD_AVERAGE ;
    t->task.rad.e     = e ;
    t->task.rad.mode  = mode ;

    /* Put in the queue */
    enqueue_task( qid, t, TASK_INSERT ) ;
}
Exemple #9
0
// create a task from the given function pointer and arguments
// and append it to the end of the task pool
// assumes threading_lock has already been acquired!
static inline
task_pool_p add_to_task_pool(chpl_fn_p fp,
                             void* a,
                             chpl_bool is_executeOn,
                             chpl_task_prvDataImpl_t chpl_data,
                             task_pool_p* p_task_list_head,
                             chpl_bool is_begin_stmt,
                             int lineno, int32_t filename) {
  task_pool_p ptask =
    (task_pool_p) chpl_mem_alloc(sizeof(task_pool_t),
                                        CHPL_RT_MD_TASK_POOL_DESC,
                                        0, 0);
  ptask->id           = get_next_task_id();
  ptask->fun          = fp;
  ptask->arg          = a;
  ptask->is_executeOn = is_executeOn;
  ptask->chpl_data    = chpl_data;
  ptask->filename     = filename;
  ptask->lineno       = lineno;
  ptask->p_list_head  = NULL;
  ptask->next         = NULL;

  enqueue_task(ptask, p_task_list_head);

  chpl_task_do_callbacks(chpl_task_cb_event_kind_create,
                         ptask->filename,
                         ptask->lineno,
                         ptask->id,
                         ptask->is_executeOn);

  if (do_taskReport) {
    chpl_thread_mutexLock(&taskTable_lock);
    chpldev_taskTable_add(ptask->id,
                          ptask->lineno, ptask->filename,
                          (uint64_t) (intptr_t) ptask);
    chpl_thread_mutexUnlock(&taskTable_lock);
  }

  //
  // If we now have more tasks than threads to run them on (taking
  // into account that the current parent of a structured parallel
  // construct can run at least one of that construct's children),
  // try to start another thread.
  //
  if (queued_task_cnt > idle_thread_cnt &&
      (p_task_list_head == NULL || ptask->list_next != NULL || is_begin_stmt)) {
    maybe_add_thread();
  }

  return ptask;
}
	int ThreadScheduler::Delay(lua_State* L, int funcidx, long millis){
		lua_State* NL = lua_newthread(L);
		lua_pushvalue(L, funcidx);
		int r = lua_ref(L, LUA_REGISTRYINDEX);

		long curTime = currentTimeMillis();

		Task tsk = Task();
		tsk.origin = NL;
		tsk.at = curTime + millis;
		tsk.start = curTime;
		tsk.ref = r;

		enqueue_task(tsk);

		return 0;
	}
Exemple #11
0
/*
 * Add a task to the threadpool
 *
 */
int pool_add_task(pool_t *pool, void (*function)(void *), void *argument)
{

  int err = 0;

  // Insert the element into the task queue.
  pool_task_t *task = (pool_task_t*) malloc(sizeof(pool_task_t));
  task->function = function;
  task->argument = argument;

  pthread_mutex_lock(&pool->lock);
  
  enqueue_task(&pool->tasks, task);

  // Notify the threads of a change in the queue using the condition variabale.
  pthread_cond_broadcast(&pool->notify);
  pthread_mutex_unlock(&pool->lock);

  return err;
}
Exemple #12
0
/* If the task TASK is suspended change its state to running and move it to
   the end of the run queue. This function may be called from interrupts. */
void
wake_task(struct task *task)
{
    if((task->flags & (TASK_RUNNING | TASK_FROZEN | TASK_ZOMBIE)) == 0)
    {
	u_long flags;
	save_flags(flags);
	cli();
	remove_node(&task->node);
	task->flags |= TASK_RUNNING;
	enqueue_task(&running_tasks, task);
	if(task->pri > current_task->pri)
	{
	    if(intr_nest_count == 0)
		schedule();
	    else
		need_resched = kernel_module.need_resched = TRUE;
	}
	load_flags(flags);
    }
}
Exemple #13
0
void create_ff_refine_task(Element *e1, Element *e2, long level, long process_id)
{
    Task *t ;

    /* Check existing parallelism */
    if( taskq_too_long(&global->task_queue[ taskqueue_id[process_id] ], n_tasks_per_queue) )
        {
            /* Task queue is too long. Solve it immediately */
            ff_refine_elements( e1, e2, level, process_id ) ;
            return ;
        }

    /* Create a task */
    t = get_task(process_id) ;
    t->task_type = TASK_FF_REFINEMENT ;
    t->task.ref.e1              = e1 ;
    t->task.ref.e2              = e2 ;
    t->task.ref.level           = level ;

    /* Put in the queue */
    enqueue_task( taskqueue_id[process_id], t, TASK_INSERT ) ;
}
Exemple #14
0
/* Add the task TASK to the correct queue, running_tasks if it's
   runnable, suspended_tasks otherwise. */
void
append_task(struct task *task)
{
    u_long flags;
    save_flags(flags);
    cli();
    if(task->flags & TASK_RUNNING)
    {
	enqueue_task(&running_tasks, task);
	if(current_task->pri < task->pri)
	{
	    /* A higher priority task ready to run always gets priority. */
	    if(intr_nest_count == 0)
		schedule();
	    else
		need_resched = kernel_module.need_resched = TRUE;
	}
    }
    else
	append_node(&suspended_tasks, &task->node);
    load_flags(flags);
}
Exemple #15
0
int main(int argc, char **argv)
{
	(void)argc;
	(void)argv;

	if (init_events_init())
		return 1;

	start_logging();

	if (init_rand_seed())
		return 1;

	if (ensure_single_instance())
		return 1;

	if (conf_load())
		return 1;

	if (cl_init())
		return 1;

	if (start_task_queue())
		return 1;

	if (start_state_manager())
		return 1;

	if (start_debug_console())
		return 1;

	if (enqueue_task(main2, NULL))
		return 1;

	if (start_rpc_svc())
		return 1;

	return 0;
}
Exemple #16
0
/* The scheduler; if possible, switch to the next task in the run queue.

   Note that the only reason to *ever* call this function is when the current
   task has suspended itself and needs to actually stop executing. Otherwise
   just set the `need_resched' flag to TRUE and the scheduler will be called
   as soon as is safe.

   Never ever *ever* call this from an interrupt handler! It should be safe
   to be called from an exception handler though.

   Also note that there are no `sliding' priority levels; tasks with high
   priority levels can totally block lower-priority tasks.  */
void
schedule(void)
{
    u_long flags;
    save_flags(flags);
    cli();

#ifdef PARANOID
    if(intr_nest_count != 0)
	kprintf("schedule: Oops, being called with intr_nest_count=%d\n",
		intr_nest_count);
#endif

    /* First reclaim any dead processes.. */
    while(zombies)
    {
	struct task *zombie = (struct task *)zombie_tasks.head;
	remove_node(&zombie->node);
	reclaim_task(zombie);
	zombies--;
    }

    if((current_task->forbid_count > 0)
       && (current_task->flags & TASK_RUNNING))
    {
	/* Non pre-emptible task. */
	load_flags(flags);
	return;
    }

    need_resched = kernel_module.need_resched = FALSE;

    /* Now do the scheduling.. */
    if(current_task->flags & TASK_RUNNING)
    {
	/* Task is still runnable so put it onto the end of the run
	   queue (paying attention to priority levels). */
	remove_node(&current_task->node);
	enqueue_task(&running_tasks, current_task);
    }
    if(!list_empty_p(&running_tasks))
    {
	struct task *next = (struct task *)running_tasks.head;
	if(next->time_left <= 0)
	    next->time_left = next->quantum;
	if(current_task != next)
	{
	    current_task->cpu_time += timer_ticks - current_task->last_sched;
	    if(current_task->flags & TASK_ZOMBIE)
	    {
		append_node(&zombie_tasks, &current_task->node);
		zombies++;
	    }
	    next->sched_count++;
	    next->last_sched = timer_ticks;
	    current_task = next;
	    kernel_module.current_task = next;
	    switch_to_task(next);
#if 1
	    /* Currently we don't handle the math-copro *at all*; clearing
	       this flag simply stops us getting dna exceptions.. */
	    asm volatile ("clts");
#endif
	}
    }
Exemple #17
0
void create_visibility_tasks(Element *e, void (*k)(), long process_id)
{
    long n_tasks ;
    long remainder ;			     /* Residue of MOD(total_undefs)*/
    long i_cnt ;
    Interaction *top, *tail ;
    Task *t ;
    long total_undefs = 0 ;
    long tasks_created = 0 ;

    /* Check number of hard problems */
    for( top = e->vis_undef_inter ; top ; top = top->next ) {
        if( top->visibility == VISIBILITY_UNDEF )
            total_undefs++ ;
	}

    if( total_undefs == 0 )
        {
            /* No process needs to be created. Call the continuation
               immediately */
            (*k)( e, process_id ) ;
            return ;
        }

    /* Check existing parallelism */
    if(   (total_undefs < N_visibility_per_task)
       || taskq_too_long(&global->task_queue[ taskqueue_id[process_id] ], n_tasks_per_queue) )
        {
            /* Task size is small, or the queue is too long.
               Solve it immediately. */
            visibility_task( e, e->vis_undef_inter,
                            e->n_vis_undef_inter, k, process_id ) ;

            return ;
        }

    /* Create multiple tasks. Hard problems (i.e. where visibility comp is
       really necessary) are divided into 'n_tasks' groups by residue
       number division (or Bresenham's DDA) */
    /* Note: once the first task is enqueued, the vis-undef list may be
       modified while other tasks are being created. So, any information
       that is necessary in the for-loop must be read from the element
       and saved locally */

    n_tasks = (total_undefs + N_visibility_per_task - 1)
        / N_visibility_per_task ;
    remainder = 0 ;
    i_cnt = 0 ;
    for( top = e->vis_undef_inter, tail = top ; tail ; tail = tail->next )
        {
            i_cnt++ ;

			int _c = tail->visibility != VISIBILITY_UNDEF;
            if(_c)
                continue ;

            remainder += n_tasks ;

            if( remainder >= total_undefs )
                {
                    /* Create a task */

                    /* For the last task, append following (easy) interactions
                       if there is any */
                    tasks_created++ ;
                    if( tasks_created >= n_tasks )
                        for( ; tail->next ; tail = tail->next, i_cnt++ ) ;

                    /* Set task descriptor */
                    t = get_task(process_id) ;
                    t->task_type = TASK_VISIBILITY ;
                    t->task.vis.e       = e ;
                    t->task.vis.inter   = top ;
                    t->task.vis.n_inter = i_cnt ;
                    t->task.vis.k       = k ;

                    /* Enqueue */
                    enqueue_task( taskqueue_id[process_id], t, TASK_INSERT ) ;

                    /* Update pointer and the residue variable */
                    top = tail->next ;
                    remainder -= total_undefs ;
                    i_cnt = 0 ;
                }
        }
}
Exemple #18
0
int
erts_port_task_schedule(Eterm id,
			ErtsPortTaskHandle *pthp,
			ErtsPortTaskType type,
			ErlDrvEvent event,
			ErlDrvEventData event_data)
{
    ErtsRunQueue *runq;
    Port *pp;
    ErtsPortTask *ptp;
    int enq_port = 0;

    /*
     * NOTE:	We might not have the port lock here. We are only
     *		allowed to access the 'sched', 'tab_status',
     *          and 'id' fields of the port struct while
     *          tasks_lock is held.
     */

    if (pthp && erts_port_task_is_scheduled(pthp)) {
	ASSERT(0);
	erts_port_task_abort(id, pthp);
    }

    ptp = port_task_alloc();

    ASSERT(is_internal_port(id));
    pp = &erts_port[internal_port_index(id)];
    runq = erts_port_runq(pp);

    if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) {
	if (runq)
	    erts_smp_runq_unlock(runq);
	return -1;
    }

    ASSERT(!erts_port_task_is_scheduled(pthp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
#ifdef DEBUG
    /* If we have a taskq and not executing, we should be in port run q */  
    if (pp->sched.taskq && !pp->sched.exe_taskq) {
	ASSERT(pp->sched.prev || runq->ports.start == pp);
    }
#endif

    if (!pp->sched.taskq) {
	pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp);
	enq_port = !pp->sched.exe_taskq;
    }

#ifdef ERTS_SMP
    if (enq_port) {
	ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (xrunq) {
	    /* Port emigrated ... */
	    erts_smp_atomic_set(&pp->run_queue, (long) xrunq);
	    erts_smp_runq_unlock(runq);
	    runq = xrunq;
	}
    }
#endif

    ASSERT(!(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));

    ASSERT(pp->sched.taskq);
    ASSERT(ptp);

    ptp->type = type;
    ptp->event = event;
    ptp->event_data = event_data;

    set_handle(ptp, pthp);

    switch (type) {
    case ERTS_PORT_TASK_FREE:
	erl_exit(ERTS_ABORT_EXIT,
		 "erts_port_task_schedule(): Cannot schedule free task\n");
	break;
    case ERTS_PORT_TASK_INPUT:
    case ERTS_PORT_TASK_OUTPUT:
    case ERTS_PORT_TASK_EVENT:
	erts_smp_atomic_inc(&erts_port_task_outstanding_io_tasks);
	/* Fall through... */
    default:
	enqueue_task(pp->sched.taskq, ptp);
	break;
    }

#if defined(HARD_DEBUG)
    if (pp->sched.exe_taskq || enq_port)
	ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp);
    else
	ERTS_PT_CHK_IN_PORTQ(runq, pp);
#elif defined(DEBUG)
    if (!enq_port && !pp->sched.exe_taskq) {
	/* We should be in port run q */
	ASSERT(pp->sched.prev || runq->ports.start == pp);
    }
#endif

    if (!enq_port) {
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    }
    else {
	enqueue_port(runq, pp);
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
	    
	if (erts_system_profile_flags.runnable_ports) {
	    profile_runnable_port(pp, am_active);
	}

	erts_smp_notify_inc_runq(runq);
    }
    erts_smp_runq_unlock(runq);
    return 0;
}
Exemple #19
0
int
erts_port_task_schedule(Eterm id,
			ErtsPortTaskHandle *pthp,
			ErtsPortTaskType type,
			ErlDrvEvent event,
			ErlDrvEventData event_data)
{
    ErtsRunQueue *runq;
    Port *pp;
    ErtsPortTask *ptp;
    int enq_port = 0;

    /*
     * NOTE:	We might not have the port lock here. We are only
     *		allowed to access the 'sched', 'tab_status',
     *          and 'id' fields of the port struct while
     *          tasks_lock is held.
     */

    if (pthp && erts_port_task_is_scheduled(pthp)) {
	ASSERT(0);
	erts_port_task_abort(id, pthp);
    }

    ptp = port_task_alloc();

    ASSERT(is_internal_port(id));
    pp = &erts_port[internal_port_index(id)];
    runq = erts_port_runq(pp);

    if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) {
	if (runq)
	    erts_smp_runq_unlock(runq);
	return -1;
    }

    ASSERT(!erts_port_task_is_scheduled(pthp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    if (!pp->sched.taskq) {
	pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp);
	enq_port = !pp->sched.exe_taskq;
    }

#ifdef ERTS_SMP
    if (enq_port) {
	ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (xrunq) {
	    /* Port emigrated ... */
	    erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
	    erts_smp_runq_unlock(runq);
	    runq = xrunq;
	}
    }
#endif

    ASSERT(!enq_port || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));

    ASSERT(pp->sched.taskq);
    ASSERT(ptp);

    ptp->type = type;
    ptp->event = event;
    ptp->event_data = event_data;

    set_handle(ptp, pthp);

    switch (type) {
    case ERTS_PORT_TASK_FREE:
	erl_exit(ERTS_ABORT_EXIT,
		 "erts_port_task_schedule(): Cannot schedule free task\n");
	break;
    case ERTS_PORT_TASK_INPUT:
    case ERTS_PORT_TASK_OUTPUT:
    case ERTS_PORT_TASK_EVENT:
	erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
	/* Fall through... */
    default:
	enqueue_task(pp->sched.taskq, ptp);
	break;
    }

#ifndef ERTS_SMP
    /*
     * When (!enq_port && !pp->sched.exe_taskq) is true in the smp case,
     * the port might not be in the run queue. If this is the case, another
     * thread is in the process of enqueueing the port. This very seldom
     * occur, but do occur and is a valid scenario. Debug info showing this
     * enqueue in progress must be introduced before we can enable (modified
     * versions of these) assertions in the smp case again.
     */
#if defined(HARD_DEBUG)
    if (pp->sched.exe_taskq || enq_port)
	ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp);
    else
	ERTS_PT_CHK_IN_PORTQ(runq, pp);
#elif defined(DEBUG)
    if (!enq_port && !pp->sched.exe_taskq) {
	/* We should be in port run q */
	ASSERT(pp->sched.prev || runq->ports.start == pp);
    }
#endif
#endif

    if (!enq_port) {
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
	erts_smp_runq_unlock(runq);
    }
    else {
	enqueue_port(runq, pp);
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
	    
	if (erts_system_profile_flags.runnable_ports) {
	    profile_runnable_port(pp, am_active);
	}

	erts_smp_runq_unlock(runq);

	erts_smp_notify_inc_runq(runq);
    }
    return 0;
}
Exemple #20
0
// create a task from the given function pointer and arguments
// and append it to the end of the task pool
// assumes threading_lock has already been acquired!
static inline
task_pool_p add_to_task_pool(chpl_fn_int_t fid, chpl_fn_p fp,
                             chpl_task_bundle_t* a, size_t a_size,
                             chpl_bool serial_state,
                             chpl_bool countRunningTasks,
                             chpl_bool is_executeOn,
                             task_pool_p* p_task_list_head,
                             chpl_bool is_begin_stmt,
                             int lineno, int32_t filename) {


  size_t payload_size;
  task_pool_p ptask;
  chpl_task_prvDataImpl_t pv;

  memset(&pv, 0, sizeof(pv));

  assert(a_size >= sizeof(chpl_task_bundle_t));

  payload_size = a_size - sizeof(chpl_task_bundle_t);
  ptask = (task_pool_p) chpl_mem_alloc(sizeof(task_pool_t) + payload_size,
                                       CHPL_RT_MD_TASK_ARG_AND_POOL_DESC,
                                       lineno, filename);

  memcpy(&ptask->bundle, a, a_size);

  ptask->p_list_head            = NULL;
  ptask->list_next              = NULL;
  ptask->list_prev              = NULL;
  ptask->next                   = NULL;
  ptask->prev                   = NULL;
  ptask->chpl_data              = pv;
  ptask->bundle.serial_state    = serial_state;
  ptask->bundle.countRunning    = countRunningTasks;
  ptask->bundle.is_executeOn    = is_executeOn;
  ptask->bundle.lineno          = lineno;
  ptask->bundle.filename        = filename;
  ptask->bundle.requestedSubloc = c_sublocid_any_val;
  ptask->bundle.requested_fid   = fid;
  ptask->bundle.requested_fn    = fp;
  ptask->bundle.id              = get_next_task_id();

  enqueue_task(ptask, p_task_list_head);

  chpl_task_do_callbacks(chpl_task_cb_event_kind_create,
                         ptask->bundle.requested_fid,
                         ptask->bundle.filename,
                         ptask->bundle.lineno,
                         ptask->bundle.id,
                         ptask->bundle.is_executeOn);

  if (do_taskReport) {
    chpl_thread_mutexLock(&taskTable_lock);
    chpldev_taskTable_add(ptask->bundle.id,
                          ptask->bundle.lineno, ptask->bundle.filename,
                          (uint64_t) (intptr_t) ptask);
    chpl_thread_mutexUnlock(&taskTable_lock);
  }

  //
  // If we now have more tasks than threads to run them on (taking
  // into account that the current parent of a structured parallel
  // construct can run at least one of that construct's children),
  // try to start another thread.
  //
  if (queued_task_cnt > idle_thread_cnt &&
      (p_task_list_head == NULL || ptask->list_next != NULL || is_begin_stmt)) {
    maybe_add_thread();
  }

  return ptask;
}