Ejemplo n.º 1
0
static void edf_reserve_replenish(resch_task_t *rt, unsigned long cputime)
{
	int cpu = rt->cpu_id;
	unsigned long flags;

	rt->budget += cputime;
	active_queue_lock(cpu, &flags);
	if (rt->prio == RESCH_PRIO_BACKGROUND && task_is_active(rt)) {
		edf_dequeue_task(rt, RESCH_PRIO_BACKGROUND, cpu);
		edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu);
		if (rt == active_highest_prio_task(cpu) && 
			rt->task->state == TASK_INTERRUPTIBLE) {
			resch_task_t *p = active_next_prio_task(rt);
			if (p) {
				p->task->state = TASK_INTERRUPTIBLE;
			}
			active_queue_unlock(cpu, &flags);
			wake_up_process(rt->task);
		}
		else {
			active_queue_unlock(cpu, &flags);
		}
	}
	else {
		active_queue_unlock(cpu, &flags);
	}
}
Ejemplo n.º 2
0
static void edf_reserve_expire(resch_task_t *rt)
{
	unsigned long flags;
	int cpu = rt->cpu_id;

	active_queue_lock(cpu, &flags);
	if (!task_is_active(rt)) {
		active_queue_unlock(cpu, &flags);
		return;
	}
	else {
		int prio = rt->prio;
		resch_task_t *next;

		/* move to the background queue. */
		edf_dequeue_task(rt, prio, cpu);
		edf_enqueue_task(rt, RESCH_PRIO_BACKGROUND, cpu);
		/* next is never NULL. */
		next = active_highest_prio_task(cpu);
		if (rt == next) {
			active_queue_unlock(cpu, &flags);
		}
		else {
			active_queue_unlock(cpu, &flags);
			wake_up_process(next->task);
			rt->task->state = TASK_INTERRUPTIBLE;
		}
	}
}
Ejemplo n.º 3
0
void task_yield() {
  disable_irq();

  struct task_t *current_task = task_current();
  struct list_node_t *next_task_it;
  struct task_t *next_task;

  if ( task_is_active(current_task) ) {
    next_task_it = current_task_it->next;
  }
  else {
    next_task_it = list_first(active_tasks);
  }

  next_task = (struct task_t *)next_task_it->data;
  task_print("task_yield:", current_task, next_task);
  if ( current_task == next_task ) {
    return;
  };
  if ( next_task == 0 ) {
    print_buf("ERROR: next_task=0\n");
  };

  current_task_it = next_task_it;
  task_switch(&current_task->sp, next_task->sp);
}
Ejemplo n.º 4
0
void uos_on_task_switch(task_t *t)
{
    int i;
    unsigned m = TSK_PIN_TIMER<<1;

    if (0)
    if (task_is_active(h_udp_task))
    if (h_udp_task != t)
    {
        debug_printf("%s(%d)\n", t->name, t->prio);
        debug_putchar(0, '$');
    }
    else
        debug_putchar(0, '4');

    //debug_printf("@%s\n", t->name);
    for (i = 0; i < 8; i++, m = m<<1 ){
        if (trace_tasks[i] != 0){
        if ((unsigned)t == trace_tasks[i]){
            TSK_PORT_IO.data |= m;
            //debug_putchar(0,'0'+i);
            //debug_printf("%x[%d]\n", (unsigned)t, i);
        }
        else
            TSK_PORT_IO.data &= ~m;
        }
    }
}
Ejemplo n.º 5
0
/**
 * migrate @rt to the given CPU. 
 */
static void edf_migrate_task(resch_task_t *rt, int cpu_dst)
{
	unsigned long flags;
	int cpu_src = rt->cpu_id;

	if (cpu_src != cpu_dst) {
		active_queue_double_lock(cpu_src, cpu_dst, &flags);
		if (task_is_active(rt)) {
#ifdef RESCH_PREEMPT_TRACE
			/* trace preemption. */
			preempt_out(rt);
#endif
			/* move off the source CPU. */
			edf_dequeue_task(rt, RESCH_PRIO_EDF, cpu_src);

			/* move on the destination CPU. */
			rt->cpu_id = cpu_dst; 
			edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu_dst);

#ifdef RESCH_PREEMPT_TRACE
			/* trace preemption. */
			preempt_in(rt);
#endif
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);

			__migrate_task(rt, cpu_dst);

			/* restart accounting on the new CPU. */
			if (task_is_accounting(rt)) {
				edf_stop_account(rt);
				edf_start_account(rt);
			}
		}
		else {
			rt->cpu_id = cpu_dst;
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);
			__migrate_task(rt, cpu_dst);
		}
	}
	else {
		__migrate_task(rt, cpu_dst);
	}
}
Ejemplo n.º 6
0
/**
 * migrate @rt to the given CPU. 
 */
static void edf_migrate_task(resch_task_t *rt, int cpu_dst)
{
	unsigned long flags;
	int cpu_src = rt->cpu_id;

	if (cpu_src != cpu_dst) {
		active_queue_double_lock(cpu_src, cpu_dst, &flags);
		if (task_is_active(rt)) {
			resch_task_t *next_src = NULL, *curr_dst = NULL;
			/* save the next task on the source CPU. */
			if (rt == active_highest_prio_task(cpu_src)) {
				next_src = active_next_prio_task(rt);
			}
#ifdef NO_LINUX_LOAD_BALANCE
			/* trace preemption. */
			preempt_out(rt);
#endif
			/* move off the source CPU. */
			edf_dequeue_task(rt, rt->prio, cpu_src);

			/* save the current task on the destination CPU. */
			curr_dst = active_prio_task(cpu_dst, RESCH_PRIO_EDF_RUN);

			/* move on the destination CPU. */
			rt->cpu_id = cpu_dst; 
			edf_enqueue_task(rt, rt->prio, cpu_dst);

#ifdef NO_LINUX_LOAD_BALANCE
			/* trace preemption. */
			preempt_in(rt);
#endif
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);

			/* the next task will never preempt the current task. */
			if (next_src) {
				wake_up_process(next_src->task);
			}

			__migrate_task(rt, cpu_dst);

			/* restart accounting on the new CPU. */
			if (task_is_accounting(rt)) {
				edf_stop_account(rt);
				edf_start_account(rt);
			}

			if (curr_dst) {
				if (rt->deadline_time < curr_dst->deadline_time) {
					curr_dst->task->state = TASK_INTERRUPTIBLE;
					set_tsk_need_resched(curr_dst->task);
				}
				else {
					rt->task->state = TASK_INTERRUPTIBLE;
					set_tsk_need_resched(rt->task);
				}
			}
		}
		else {
			rt->cpu_id = cpu_dst;
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);
			__migrate_task(rt, cpu_dst);
		}
	}
	else {
		__migrate_task(rt, cpu_dst);
	}
}