Ejemplo n.º 1
0
static void edf_reserve_replenish(resch_task_t *rt, unsigned long cputime)
{
	int cpu = rt->cpu_id;
	unsigned long flags;

	rt->budget += cputime;
	active_queue_lock(cpu, &flags);
	if (rt->prio == RESCH_PRIO_BACKGROUND && task_is_active(rt)) {
		edf_dequeue_task(rt, RESCH_PRIO_BACKGROUND, cpu);
		edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu);
		if (rt == active_highest_prio_task(cpu) && 
			rt->task->state == TASK_INTERRUPTIBLE) {
			resch_task_t *p = active_next_prio_task(rt);
			if (p) {
				p->task->state = TASK_INTERRUPTIBLE;
			}
			active_queue_unlock(cpu, &flags);
			wake_up_process(rt->task);
		}
		else {
			active_queue_unlock(cpu, &flags);
		}
	}
	else {
		active_queue_unlock(cpu, &flags);
	}
}
Ejemplo n.º 2
0
void task_complete_event(resch_task_t *rt) {

	unsigned long flags;
	unsigned long timestamp;
	resch_task_t *next;
	resch_task_t temp;
	unsigned long tempa;

	tempa = linux_timestamp_microsec(0);	

	if (rt == NULL) {
		printk(KERN_WARNING "(task_complete) rt == NULL???\n");
		return;
	}

	timestamp = jiffies; //linux_timestamp_microsec(0);

	active_queue_lock(rt->cpu_id, &flags);
	next = active_next_prio_task(rt);
	active_queue_unlock(rt->cpu_id, &flags);

	if (next != NULL) {
		store_event(rt, next, NULL, NULL, 0, timestamp, FALSE, FALSE);
	}
	else {
		temp.pid = 0;
		store_event(rt, &temp, NULL, NULL, 0, timestamp, FALSE, FALSE);
	}

	mikaoverhead += linux_timestamp_microsec(0)-tempa;

}
Ejemplo n.º 3
0
/**
 * called when the given task starts a new job.
 */
static void edf_job_start(resch_task_t *rt)
{
	unsigned long flags;
	int cpu = rt->cpu_id;
	resch_task_t *hp;

	active_queue_lock(cpu, &flags);

	edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu);
	hp = active_highest_prio_task(cpu);
	if (rt == hp) {
		resch_task_t *curr = active_next_prio_task(rt);
		if (curr) {
			curr->task->state = TASK_INTERRUPTIBLE;
			set_tsk_need_resched(curr->task);
		}
	}
	else {
		rt->task->state = TASK_INTERRUPTIBLE;
	}

	active_queue_unlock(cpu, &flags);
}
Ejemplo n.º 4
0
/**
 * migrate @rt to the given CPU. 
 */
static void edf_migrate_task(resch_task_t *rt, int cpu_dst)
{
	unsigned long flags;
	int cpu_src = rt->cpu_id;

	if (cpu_src != cpu_dst) {
		active_queue_double_lock(cpu_src, cpu_dst, &flags);
		if (task_is_active(rt)) {
			resch_task_t *next_src = NULL, *curr_dst = NULL;
			/* save the next task on the source CPU. */
			if (rt == active_highest_prio_task(cpu_src)) {
				next_src = active_next_prio_task(rt);
			}
#ifdef NO_LINUX_LOAD_BALANCE
			/* trace preemption. */
			preempt_out(rt);
#endif
			/* move off the source CPU. */
			edf_dequeue_task(rt, rt->prio, cpu_src);

			/* save the current task on the destination CPU. */
			curr_dst = active_prio_task(cpu_dst, RESCH_PRIO_EDF_RUN);

			/* move on the destination CPU. */
			rt->cpu_id = cpu_dst; 
			edf_enqueue_task(rt, rt->prio, cpu_dst);

#ifdef NO_LINUX_LOAD_BALANCE
			/* trace preemption. */
			preempt_in(rt);
#endif
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);

			/* the next task will never preempt the current task. */
			if (next_src) {
				wake_up_process(next_src->task);
			}

			__migrate_task(rt, cpu_dst);

			/* restart accounting on the new CPU. */
			if (task_is_accounting(rt)) {
				edf_stop_account(rt);
				edf_start_account(rt);
			}

			if (curr_dst) {
				if (rt->deadline_time < curr_dst->deadline_time) {
					curr_dst->task->state = TASK_INTERRUPTIBLE;
					set_tsk_need_resched(curr_dst->task);
				}
				else {
					rt->task->state = TASK_INTERRUPTIBLE;
					set_tsk_need_resched(rt->task);
				}
			}
		}
		else {
			rt->cpu_id = cpu_dst;
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);
			__migrate_task(rt, cpu_dst);
		}
	}
	else {
		__migrate_task(rt, cpu_dst);
	}
}