void timer_tick(int num_ticks) { /* see time_elapsed for time mgmt */ #ifdef DEFERRABLE { struct sched_thd *t; assert(num_ticks > 0); ticks += num_ticks; for (t = FIRST_LIST(&servers, sched_next, sched_prev) ; t != &servers ; t = FIRST_LIST(t, sched_next, sched_prev)) { struct sched_accounting *sa = sched_get_accounting(t); unsigned long T_exp = sa->T_exp, T = sa->T; assert(T); if (T_exp <= ticks) { unsigned long off = T - (ticks % T); //printc("(%ld+%ld/%ld @ %ld)\n", sa->C_used, (unsigned long)sa->pol_cycles, T, T_exp); sa->T_exp = ticks + off; sa->C_used = 0; // sa->pol_cycles = 0; if (sched_thd_suspended(t)) { t->flags &= ~THD_SUSPENDED; if (sched_thd_ready(t)) { fp_add_thd(t, sched_get_metric(t)->priority); } } } } } #endif }
void time_elapsed(struct sched_thd *t, u32_t processing_time) { struct sched_accounting *sa; assert(t); sa = sched_get_accounting(t); sa->pol_cycles += processing_time; sa->cycles += processing_time; if (sa->cycles >= QUANTUM) { while (sa->cycles > QUANTUM) { sa->cycles -= QUANTUM; sa->ticks++; } /* round robin */ if (sched_thd_ready(t) && !sched_thd_suspended(t)) { assert(!sched_thd_inactive_evt(t)); assert(!sched_thd_blocked(t)); fp_move_end_runnable(t); } #ifdef DEFERRABLE #endif } if (sa->pol_cycles > QUANTUM) { sa->pol_cycles -= QUANTUM; if (sa->T) { sa->C_used++; if (sa->C_used >= sa->C) { sched_set_thd_urgency(t, NUM_PRIOS); if (sched_thd_ready(t)) fp_rem_thd(t); t->flags |= THD_SUSPENDED; } } } }
static inline void fp_add_thd(struct sched_thd *t, unsigned short int prio) { assert(prio < NUM_PRIOS); assert(sched_thd_ready(t)); assert(!sched_thd_suspended(t)); sched_get_metric(t)->priority = prio; sched_set_thd_urgency(t, prio); fp_move_end_runnable(t); return; }
static inline void fp_move_end_runnable(struct sched_thd *t) { struct sched_thd *head; unsigned short int p = sched_get_metric(t)->priority; assert(sched_thd_ready(t)); assert(!sched_thd_suspended(t)); head = &priorities[p].runnable; REM_LIST(t, prio_next, prio_prev); ADD_LIST(LAST_LIST(head, prio_next, prio_prev), t, prio_next, prio_prev); mask_set(p); }
static inline void fp_add_thd(struct sched_thd *t, unsigned short int prio) { assert(prio < NUM_PRIOS); assert(sched_thd_ready(t)); assert(!sched_thd_suspended(t)); sched_get_metric(t)->priority = prio; sched_set_thd_urgency(t, prio); /* printc("<< thread %d (prio %d) is added back to Q>>\n", t->id, prio); */ fp_move_end_runnable(t); return; }
void runqueue_print(void) { struct sched_thd *t; int i = 0; printc("Core %ld: Running threads (thd, prio, ticks):\n", cos_cpuid()); for (i = 0 ; i < NUM_PRIOS ; i++) { for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->priorities[i].runnable, prio_next, prio_prev) ; t != &PERCPU_GET(fprr_state)->priorities[i].runnable ; t = FIRST_LIST(t, prio_next, prio_prev)) { struct sched_accounting *sa = sched_get_accounting(t); unsigned long diff = sa->ticks - sa->prev_ticks; //if (!(diff || sa->cycles)) continue; printc("\t%d, %d, %ld+%ld/%d\n", t->id, i, diff, (unsigned long)sa->cycles, QUANTUM); sa->prev_ticks = sa->ticks; sa->cycles = 0; } } #ifdef DEFERRABLE printc("Suspended threads (thd, prio, ticks):\n"); for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->servers, sched_next, sched_prev) ; t != &PERCPU_GET(fprr_state)->servers ; t = FIRST_LIST(t, sched_next, sched_prev)) { struct sched_accounting *sa = sched_get_accounting(t); unsigned long diff = sa->ticks - sa->prev_ticks; if (!sched_thd_suspended(t)) continue; if (diff || sa->cycles) { printc("\t%d, %d, %ld+%ld/%d\n", t->id, sched_get_metric(t)->priority, diff, (unsigned long)sa->cycles, QUANTUM); sa->prev_ticks = sa->ticks; sa->cycles = 0; } } #endif printc("done printing runqueue.\n"); }
void thread_wakeup(struct sched_thd *t) { assert(t); assert(!sched_thd_member(t)); if (!sched_thd_suspended(t)) fp_add_thd(t, sched_get_metric(t)->priority); }