static void enqueue_task_prio(task_t* t,int cpu, int runnable) { runqueue_t* rq=get_runqueue_cpu(cpu); if (t->on_rq || is_idle_task(t)) return; if (t->flags & TF_INSERT_FRONT){ //Clear flag t->flags&=~TF_INSERT_FRONT; sorted_insert_slist_front(&rq->tasks, t, 1, compare_tasks_cpu_burst); //Push task } else sorted_insert_slist(&rq->tasks, t, 1, compare_tasks_cpu_burst); //Push task t->on_rq=TRUE; /* If the task was not runnable before, update the number of runnable tasks in the rq*/ if (!runnable){ task_t* current=rq->cur_task; rq->nr_runnable++; t->last_cpu=cpu; /* Trigger a preemption if this task has a shorter CPU burst than current */ if (preemptive_scheduler && !is_idle_task(current) && t->prio<current->prio) { rq->need_resched=TRUE; current->flags|=TF_INSERT_FRONT; /* To avoid unfair situations in the event another task with the same prio wakes up as well*/ } } }
static inline void insert_sched_event(sched_event_t* event, int cpu) { sorted_insert_slist(&sched_events[cpu], event, 1, compare_sched_event_timeouts); }