static struct task_struct *pick_next_task_dummy(struct rq *rq) { struct dummy_rq *dummy_rq = &rq->dummy; struct sched_dummy_entity *next; int i = 0; /* The queues are browsed ordered by priority. It returns as soon as a nonempty queue is found. */ for (i = 0; i < NR_PRIO_LEVELS; ++i) { struct list_head *queue = &(dummy_rq->queues[i]); if (!list_empty(queue)) { next = list_first_entry(queue, struct sched_dummy_entity, run_list); /* Imagine the following case: a process of the highest priority handled by this sched class * is running while a process of higher priority preempts that process and is handled by another * sched class. Suppose this happens frequently. To make sure that the same process does not * run a fraction of a round robin quantum and that the other processes in the queue gets their * cycle we reset the round robin counter here only when a process has completed a round robin * tour. */ if (rq->curr->dummy_se.rr_tick_count >= get_timeslice()) { rq->curr->dummy_se.rr_tick_count = 0; } return dummy_task_of(next); } }
static void enqueue_task_dummy(struct rq *rq, struct task_struct *p, int flags) { _enqueue_task_dummy(rq, p); p->dummy_se.age_count = 0; //to check init quantum if(p->dummy_se.quantum >= get_timeslice()){ p->dummy_se.quantum = 0; } inc_nr_running(rq); printk(KERN_CRIT "enqueue: %d\n",p->pid); }
static void check_preempt_curr_dummy(struct rq *rq, struct task_struct *p, int flags) { /* Here two cases are handled: * - if a process of higher priority becomes runnable; and * - if a process of the same priority as the running process becomes runnable * in which case the running process is preempted only if it has already * exceeded its round robin quantum. */ if (rq->curr->prio > p->prio) { resched_task(rq->curr); } else if (rq->curr->prio == p->prio) { if (p->dummy_se.rr_tick_count >= get_timeslice()) { dequeue_task_dummy(rq, rq->curr, 0); enqueue_task_dummy(rq, rq->curr, 0); resched_task(rq->curr); } } }