struct rt_info* sched_dasa_nd(struct list_head *head, int flags) { struct rt_info *task, *best = local_task(head->next); struct rt_info *new_schedule=NULL, *prev_schedule = NULL; list_for_each_entry(task, head, task_list[LOCAL_LIST]){ if(check_task_failure(task, flags)) return task; initialize_lists(task); livd(task, false, flags); //updating value density values for all tasks in queue if(task->local_ivd < best->local_ivd) best = task; } copy_list(task, LOCAL_LIST, SCHED_LIST1); quicksort(best, SCHED_LIST1, SORT_KEY_LVD, 0); //best points to task with highest value density new_schedule = task = best; while(1) { prev_schedule = new_schedule; if (insert_on_list(task, new_schedule, SCHED_LIST2, SORT_KEY_DEADLINE, 0)) new_schedule = task; if (!list_is_feasible(new_schedule, SCHED_LIST2)){ list_remove(task, SCHED_LIST2); new_schedule = prev_schedule; } //moving to next task task = task_list_entry(task->task_list[SCHED_LIST1].next, SCHED_LIST1); if(task == best) //list iteration completed break; } return (new_schedule == NULL) ? best : new_schedule; }
/* Quicksort a doubly linked list */ static void _quicksort(struct rt_info *start, struct rt_info *end, int i, int key, int before) { struct rt_info *pivot = task_list_entry(start->task_list[i].next, i); struct rt_info *it = task_list_entry(pivot->task_list[i].next, i); struct rt_info *next; int low = 0, high = 0; while(it != end) { next = task_list_entry(it->task_list[i].next, i); if(compare(it, pivot, key, before)) { list_move_after(start, it, i); low++; } else high++; it = next; } if(high > 1) _quicksort(pivot, end, i, key, before); if(low > 1) _quicksort(start, pivot, i, key, before); }
struct rt_info * sched_gmua(struct list_head *head, struct global_sched_domain *g) { struct rt_info *it, *n, *best_tdead = NULL; struct rt_info *best_dead[NR_CPUS]; int cpu_id, cpus = count_global_cpus(g); for(cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) best_dead[cpu_id] = NULL; list_for_each_entry_safe(it, n, head, task_list[GLOBAL_LIST]) { if(check_task_failure(it, SCHED_FLAG_NONE)) { _remove_task_global(it, g); continue; } livd(it, false, SCHED_FLAG_NONE); initialize_lists(it); if(!best_tdead) best_tdead = it; else if(insert_on_list(it, best_tdead, LIST_TDEAD, SORT_KEY_DEADLINE, 0)) best_tdead = it; } if(!best_tdead) goto out; initialize_cpu_state(); /* Assign the zero in degree tasks to the processors */ it = best_tdead; do { cpu_id = find_processor(cpus); insert_cpu_task(it, cpu_id); update_cpu_exec_times(cpu_id, it, true); it = task_list_entry(it->task_list[LIST_TDEAD].next, LIST_TDEAD); } while(it != best_tdead); for(cpu_id = 0; cpu_id < cpus; cpu_id++) { best_dead[cpu_id] = create_feasible_schedule(cpu_id); } build_list_array(best_dead, cpus); out: return best_dead[0]; }
/* Check the task list for the cpu and check for schedule feasibility */ struct rt_info* create_feasible_schedule(int cpu_id) { struct rt_info *it, *best_dead, *head, *best_ivd, *last_ivd; struct cpu_info *cur_cpu = NULL; struct timespec exec_ts; int removed; cur_cpu = get_cpu_state(cpu_id); head = cur_cpu->head; best_dead = head; if(!head) goto out; best_ivd = head; it = task_list_entry(head->task_list[LIST_CPUTSK].next, LIST_CPUTSK); do { if(insert_on_list(it, best_ivd, LIST_CPUIVD, SORT_KEY_GVD, 0)) best_ivd = it; it = task_list_entry(it->task_list[LIST_CPUTSK].next, LIST_CPUTSK); } while(it != head); last_ivd = task_list_entry(best_ivd->task_list[LIST_CPUIVD].prev, LIST_CPUIVD); do { removed = 0; it = best_dead; exec_ts = current_kernel_time(); do { add_ts(&exec_ts, &(it->left), &exec_ts); if(earlier_deadline(&(it->deadline), &exec_ts)) { list_remove(last_ivd, LIST_CPUTSK); if(last_ivd == best_dead) { best_dead = task_list_entry(last_ivd->task_list[LIST_CPUTSK].next, LIST_CPUTSK); } last_ivd = task_list_entry(last_ivd->task_list[LIST_CPUIVD].prev, LIST_CPUIVD); removed = 1; } it = task_list_entry(it->task_list[LIST_CPUTSK].next, LIST_CPUTSK); } while(removed == 0 && it != best_dead); } while(last_ivd != best_ivd && removed == 1); out: cur_cpu->best_dead = best_dead; return best_dead; }