static void edf_reserve_replenish(resch_task_t *rt, unsigned long cputime) { int cpu = rt->cpu_id; unsigned long flags; rt->budget += cputime; active_queue_lock(cpu, &flags); if (rt->prio == RESCH_PRIO_BACKGROUND && task_is_active(rt)) { edf_dequeue_task(rt, RESCH_PRIO_BACKGROUND, cpu); edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu); if (rt == active_highest_prio_task(cpu) && rt->task->state == TASK_INTERRUPTIBLE) { resch_task_t *p = active_next_prio_task(rt); if (p) { p->task->state = TASK_INTERRUPTIBLE; } active_queue_unlock(cpu, &flags); wake_up_process(rt->task); } else { active_queue_unlock(cpu, &flags); } } else { active_queue_unlock(cpu, &flags); } }
static void edf_reserve_expire(resch_task_t *rt) { unsigned long flags; int cpu = rt->cpu_id; active_queue_lock(cpu, &flags); if (!task_is_active(rt)) { active_queue_unlock(cpu, &flags); return; } else { int prio = rt->prio; resch_task_t *next; /* move to the background queue. */ edf_dequeue_task(rt, prio, cpu); edf_enqueue_task(rt, RESCH_PRIO_BACKGROUND, cpu); /* next is never NULL. */ next = active_highest_prio_task(cpu); if (rt == next) { active_queue_unlock(cpu, &flags); } else { active_queue_unlock(cpu, &flags); wake_up_process(next->task); rt->task->state = TASK_INTERRUPTIBLE; } } }
static void request_migration(edf_wm_task_t *et, int cpu_dst) { unsigned long flags; resch_task_t *hp; INIT_LIST_HEAD(&et->migration_list); /* insert the task to the waiting list for the migration thread. */ spin_lock_irqsave(&kthread[cpu_dst].lock, flags); list_add_tail(&et->migration_list, &kthread[cpu_dst].list); spin_unlock(&kthread[cpu_dst].lock); /* wake up the migration thread running on the destination CPU. */ wake_up_process(kthread[cpu_dst].task); et->rt->task->state = TASK_UNINTERRUPTIBLE; set_tsk_need_resched(et->rt->task); local_irq_restore(flags); active_queue_lock(cpu_dst, &flags); hp = active_highest_prio_task(cpu_dst); if (hp) { set_tsk_need_resched(hp->task); } active_queue_unlock(cpu_dst, &flags); smp_send_reschedule(cpu_dst); }
void task_complete_event(resch_task_t *rt) { unsigned long flags; unsigned long timestamp; resch_task_t *next; resch_task_t temp; unsigned long tempa; tempa = linux_timestamp_microsec(0); if (rt == NULL) { printk(KERN_WARNING "(task_complete) rt == NULL???\n"); return; } timestamp = jiffies; //linux_timestamp_microsec(0); active_queue_lock(rt->cpu_id, &flags); next = active_next_prio_task(rt); active_queue_unlock(rt->cpu_id, &flags); if (next != NULL) { store_event(rt, next, NULL, NULL, 0, timestamp, FALSE, FALSE); } else { temp.pid = 0; store_event(rt, &temp, NULL, NULL, 0, timestamp, FALSE, FALSE); } mikaoverhead += linux_timestamp_microsec(0)-tempa; }
/** * complete the current job of the given task. */ static void edf_job_complete(resch_task_t *rt) { int cpu = rt->cpu_id; unsigned long flags; active_queue_lock(cpu, &flags); edf_dequeue_task(rt, RESCH_PRIO_EDF, cpu); active_queue_unlock(cpu, &flags); }
static void edf_job_start(resch_task_t *rt) { unsigned long flags; int cpu = rt->cpu_id; active_queue_lock(cpu, &flags); edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu); active_queue_unlock(cpu, &flags); }
/** * complete the current job of the given task. */ static void edf_job_complete(resch_task_t *rt) { int cpu = rt->cpu_id; int prio = rt->prio; unsigned long flags; resch_task_t *next; active_queue_lock(cpu, &flags); edf_dequeue_task(rt, prio, cpu); rt->prio = RESCH_PRIO_EDF; next = active_highest_prio_task(cpu); active_queue_unlock(cpu, &flags); if (next) { wake_up_process(next->task); } }
/** * called when the given task starts a new job. */ static void edf_job_start(resch_task_t *rt) { unsigned long flags; int cpu = rt->cpu_id; resch_task_t *hp; active_queue_lock(cpu, &flags); edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu); hp = active_highest_prio_task(cpu); if (rt == hp) { resch_task_t *curr = active_next_prio_task(rt); if (curr) { curr->task->state = TASK_INTERRUPTIBLE; set_tsk_need_resched(curr->task); } } else { rt->task->state = TASK_INTERRUPTIBLE; } active_queue_unlock(cpu, &flags); }
void task_release_event(resch_task_t *rt) { unsigned long flags; unsigned long timestamp; resch_task_t *curr; resch_task_t temp; unsigned long tempa; tempa = linux_timestamp_microsec(0); if (rt == NULL) { printk(KERN_WARNING "(task_release) rt == NULL???\n"); return; } timestamp = jiffies; //linux_timestamp_microsec(0); active_queue_lock(rt->cpu_id, &flags); curr = active_highest_prio_task(rt->cpu_id); active_queue_unlock(rt->cpu_id, &flags); if (curr != NULL) { if (rt->prio > curr->prio) { // This means context switch! store_event(curr, rt, NULL, NULL, 1, timestamp, FALSE, FALSE); } else { // Released task is not preempting, just register a task release (but no context switch)... store_event(rt, rt, NULL, NULL, 2, timestamp, FALSE, FALSE); } } else { // Ready queue was empty (besides the new task), means that idle task was running... temp.pid = 0; store_event(&temp, rt, NULL, NULL, 1, timestamp, FALSE, FALSE); } mikaoverhead += linux_timestamp_microsec(0)-tempa; }