struct rt_info* sched_hvdf(struct list_head *head, int flags)
{
	struct rt_info *task, *best = local_task(head->next);

	/*Check for each entry in the list*/
	list_for_each_entry(task, head, task_list[LOCAL_LIST]){

	/* Check if task is aborted, if yes, the return it. Otherwise
	   check if the deadline is blown, if yes, abort the task using 
	   abort_thread(). {Done in handle_task_failure() function.}
	 */
		if(check_task_failure(task, flags))
			return task;

	/* Calculate inverse value density for the current task.*/
		livd(task, false, flags);

	/* Compare the inverse value densities and assign the higher value
	   density task to best.
	 */
		if(task->local_ivd < best->local_ivd)
			best = task ;

	}


	if(flags & SCHED_FLAG_PI)
		best = get_pi_task(best, head, flags);

	return best;
}
Ejemplo n.º 2
0
struct rt_info* sched_dasa_nd(struct list_head *head, int flags)
{
	struct rt_info *task, *best = local_task(head->next);
	struct rt_info *new_schedule=NULL, *prev_schedule = NULL;
	list_for_each_entry(task, head, task_list[LOCAL_LIST]){
		if(check_task_failure(task, flags))
			return task;

		initialize_lists(task);
		livd(task, false, flags); //updating value density values for all tasks in queue
		if(task->local_ivd < best->local_ivd)
			best = task;
		}
	copy_list(task, LOCAL_LIST, SCHED_LIST1);
	quicksort(best, SCHED_LIST1, SORT_KEY_LVD, 0);
	//best points to task with highest value density
	new_schedule = task = best;
	while(1) {
		prev_schedule = new_schedule;
		if (insert_on_list(task, new_schedule, SCHED_LIST2, SORT_KEY_DEADLINE, 0))
			new_schedule = task;

		if (!list_is_feasible(new_schedule, SCHED_LIST2)){
			list_remove(task, SCHED_LIST2);
			new_schedule = prev_schedule;
			}
		//moving to next task
		task = task_list_entry(task->task_list[SCHED_LIST1].next, SCHED_LIST1);
		if(task == best) //list iteration completed
			break;
		}

	return (new_schedule == NULL) ? best : new_schedule;
}
Ejemplo n.º 3
0
struct rt_info * sched_gmua(struct list_head *head, struct global_sched_domain *g)
{
	struct rt_info *it, *n, *best_tdead = NULL;
	struct rt_info *best_dead[NR_CPUS];
	int cpu_id, cpus = count_global_cpus(g);

	for(cpu_id = 0; cpu_id < NR_CPUS; cpu_id++)
		best_dead[cpu_id] = NULL;

	list_for_each_entry_safe(it, n, head, task_list[GLOBAL_LIST])  {
		if(check_task_failure(it, SCHED_FLAG_NONE)) {
			_remove_task_global(it, g);
			continue;
		}

		livd(it, false, SCHED_FLAG_NONE);
		initialize_lists(it);

		if(!best_tdead)
			best_tdead = it;
		else if(insert_on_list(it, best_tdead, LIST_TDEAD, SORT_KEY_DEADLINE, 0))
			best_tdead = it;
	}

	if(!best_tdead)
		goto out;

	initialize_cpu_state();

	/* Assign the zero in degree tasks to the processors */
	it = best_tdead;
	do {
		cpu_id = find_processor(cpus);
		insert_cpu_task(it, cpu_id);
		update_cpu_exec_times(cpu_id, it, true);
		it = task_list_entry(it->task_list[LIST_TDEAD].next, LIST_TDEAD);
	} while(it != best_tdead);

	for(cpu_id = 0;  cpu_id < cpus; cpu_id++) {
		best_dead[cpu_id] = create_feasible_schedule(cpu_id);
	}

	build_list_array(best_dead, cpus);

out:
	return best_dead[0];
}
Ejemplo n.º 4
0
struct rt_info* sched_hvdf(struct list_head *head, int flags)
{
	struct rt_info *it, *best = local_task(head->next);

	list_for_each_entry(it, head, task_list[LOCAL_LIST]) {
		if(check_task_failure(it, flags))
			return it;

		livd(it, false, flags);
		if(it->local_ivd < best->local_ivd)
			best = it;
	}

	if(flags & SCHED_FLAG_PI)
		best = get_pi_task(best, head, flags);

	return best;
}