struct sched_thd * sched_get_thread_in_spd_from_runqueue(spdid_t spdid, spdid_t target, int index) { struct sched_thd *t; int i, cnt = 0; /* copied from runqueue_print, a better way would use a visitor */ for (i = 0 ; i < NUM_PRIOS ; i++) { for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->priorities[i].runnable, prio_next, prio_prev) ; t != &PERCPU_GET(fprr_state)->priorities[i].runnable ; t = FIRST_LIST(t, prio_next, prio_prev)) { /* TODO: do we care to differentiate if the thread is * currently in the spd, versus previously? */ if (cos_thd_cntl(COS_THD_INV_SPD, t->id, target, 0) >= 0) if (cnt++ == index) return t; } } #ifdef DEFERRABLE for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->servers, sched_next, sched_prev) ; t != &PERCPU_GET(fprr_state)->servers ; t = FIRST_LIST(t, sched_next, sched_prev)) { if (cos_thd_cntl(COS_THD_INV_SPD, t->id, target, 0) >= 0) if (cnt++ == index) return t; } #endif return 0; }
static int fp_thread_params(struct sched_thd *t, char *p) { int prio, tmp; char curr = p[0]; struct sched_thd *c; assert(t); switch (curr) { case 'r': /* priority relative to current thread */ c = sched_get_current(); assert(c); tmp = atoi(&p[1]); prio = sched_get_metric(c)->priority + tmp; memcpy(sched_get_accounting(t), sched_get_accounting(c), sizeof(struct sched_accounting)); #ifdef DEFERRABLE if (sched_get_accounting(t)->T) ADD_LIST(&PERCPU_GET(fprr_state)->servers, t, sched_next, sched_prev); #endif if (prio > PRIO_LOWEST) prio = PRIO_LOWEST; break; case 'a': /* absolute priority */ prio = atoi(&p[1]); break; case 'i': /* idle thread */ prio = PRIO_LOWEST; break; case 't': /* timer thread */ prio = PRIO_HIGHEST; break; #ifdef DEFERRABLE case 'd': { prio = ds_parse_params(t, p); if (EMPTY_LIST(t, sched_next, sched_prev) && sched_get_accounting(t)->T) { ADD_LIST(&PERCPU_GET(fprr_state)->servers, t, sched_next, sched_prev); } fp_move_end_runnable(t); break; } #endif default: printc("unknown priority option @ %s, setting to low\n", p); prio = PRIO_LOW; } if (sched_thd_ready(t)) fp_rem_thd(t); fp_add_thd(t, prio); return 0; }
void sched_initialization(void) { int i; for (i = 0 ; i < NUM_PRIOS ; i++) { sched_init_thd(&PERCPU_GET(fprr_state)->priorities[i].runnable, 0, THD_FREE); } PERCPU_GET(fprr_state)->active = 0; #ifdef DEFERRABLE sched_init_thd(&PERCPU_GET(fprr_state)->servers, 0, THD_FREE); PERCPU_GET(fprr_state)->ticks = 0; #endif }
static inline void fp_add_start_runnable(struct sched_thd *t) { struct sched_thd *head; u16_t p = sched_get_metric(t)->priority; assert(sched_thd_ready(t)); head = &PERCPU_GET(fprr_state)->priorities[p].runnable; ADD_LIST(head, t, prio_next, prio_prev); mask_set(p); }
static inline void fp_move_end_runnable(struct sched_thd *t) { struct sched_thd *head; unsigned short int p = sched_get_metric(t)->priority; assert(sched_thd_ready(t)); assert(!sched_thd_suspended(t)); head = &PERCPU_GET(fprr_state)->priorities[p].runnable; REM_LIST(t, prio_next, prio_prev); ADD_LIST(LAST_LIST(head, prio_next, prio_prev), t, prio_next, prio_prev); mask_set(p); }
void timer_tick(int num_ticks) { /* see time_elapsed for time mgmt */ #ifdef DEFERRABLE { struct sched_thd *t; assert(num_ticks > 0); PERCPU_GET(fprr_state)->ticks += num_ticks; for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->servers, sched_next, sched_prev) ; t != &PERCPU_GET(fprr_state)->servers ; t = FIRST_LIST(t, sched_next, sched_prev)) { struct sched_accounting *sa = sched_get_accounting(t); unsigned long T_exp = sa->T_exp, T = sa->T; assert(T); if (T_exp <= PERCPU_GET(fprr_state)->ticks) { unsigned long off = T - (PERCPU_GET(fprr_state)->ticks % T); //printc("(%ld+%ld/%ld @ %ld)\n", sa->C_used, (unsigned long)sa->pol_cycles, T, T_exp); sa->T_exp = PERCPU_GET(fprr_state)->ticks + off; sa->C_used = 0; // sa->pol_cycles = 0; if (sched_thd_suspended(t)) { t->flags &= ~THD_SUSPENDED; if (sched_thd_ready(t)) { fp_add_thd(t, sched_get_metric(t)->priority); } } } } } #endif }
void runqueue_print(void) { struct sched_thd *t; int i = 0; printc("Core %ld: Running threads (thd, prio, ticks):\n", cos_cpuid()); for (i = 0 ; i < NUM_PRIOS ; i++) { for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->priorities[i].runnable, prio_next, prio_prev) ; t != &PERCPU_GET(fprr_state)->priorities[i].runnable ; t = FIRST_LIST(t, prio_next, prio_prev)) { struct sched_accounting *sa = sched_get_accounting(t); unsigned long diff = sa->ticks - sa->prev_ticks; //if (!(diff || sa->cycles)) continue; printc("\t%d, %d, %ld+%ld/%d\n", t->id, i, diff, (unsigned long)sa->cycles, QUANTUM); sa->prev_ticks = sa->ticks; sa->cycles = 0; } } #ifdef DEFERRABLE printc("Suspended threads (thd, prio, ticks):\n"); for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->servers, sched_next, sched_prev) ; t != &PERCPU_GET(fprr_state)->servers ; t = FIRST_LIST(t, sched_next, sched_prev)) { struct sched_accounting *sa = sched_get_accounting(t); unsigned long diff = sa->ticks - sa->prev_ticks; if (!sched_thd_suspended(t)) continue; if (diff || sa->cycles) { printc("\t%d, %d, %ld+%ld/%d\n", t->id, sched_get_metric(t)->priority, diff, (unsigned long)sa->cycles, QUANTUM); sa->prev_ticks = sa->ticks; sa->cycles = 0; } } #endif printc("done printing runqueue.\n"); }
static inline unsigned short int mask_high(void) { u32_t v = PERCPU_GET(fprr_state)->active; unsigned short int r = 0; /* Assume 2s compliment here. Could instead do a check for * while (v & 1)..., but that's another op in the main loop */ v = v & -v; /* only set least signif bit */ while (v != 1) { v >>= 1; r++; } return r; }
void cos_init(void) { static volatile int first = 1, second = 1, spin = 0, count = 0, i = 0; int tid = 0; int ret = 0; printc("Prio: %d\n", sched_priority(cos_get_thd_id())); printc("TID: %d\n", cos_get_thd_id()); if (first) { first = 0; union sched_param sp; sp.c.type = SCHEDP_PRIO; sp.c.value = 15; if (sched_create_thd(cos_spd_id(), sp.v, 0, 0) == 0) BUG(); printc("!!!!!!!!!!!!Thread #%d\n", (int) cos_get_thd_id()); return; } else if (second) { second = 0; printc("Calling cntl_thd\n"); if (parent_sched_child_cntl_thd(cos_spd_id())) BUG(); if (cos_sched_cntl(COS_SCHED_EVT_REGION, 0, (long)PERCPU_GET(cos_sched_notifications))) BUG(); printc("Called cntl_thd\n"); if ((tid = parent_sched_child_thd_crt(cos_spd_id(), cos_spd_id())) == -1) BUG(); printc("THID = %d\n", tid); if (parent_sched_child_timer_int(cos_spd_id(), 0, 1) != 0) BUG(); if ((ret = cos_switch_thread(tid, 0))) printc("Switch: %d\n", ret); if (parent_sched_child_timer_int(cos_spd_id(), 0, 1) != 0) BUG(); if ((ret = cos_switch_thread(tid, 0))) printc("Switch: %d\n", ret); while (1) { i++; if (i % 100000000 == 0) { printc("Stutter\n"); } } return; } else { while (1) { spin++; if (spin % 100000000 == 0) { printc("f\n"); } } return; } printc("I finished\n"); }
static struct sched_thd *fp_get_highest_prio(void) { struct sched_thd *t, *head; u16_t p = mask_high(); head = &(PERCPU_GET(fprr_state)->priorities[p].runnable); t = FIRST_LIST(head, prio_next, prio_prev); assert(t != head); assert(sched_thd_ready(t)); assert(sched_get_metric(t)); assert(sched_get_metric(t)->priority == p); assert(!sched_thd_free(t)); return t; }
void sched_exit(void) { int i; *PERCPU_GET(initialized_core) = 0; if (cos_cpuid() == INIT_CORE) { /* The init core waiting for all cores to exit. */ for (i = 0; i < NUM_CPU ; i++) if (*PERCPU_GET_TARGET(initialized_core, i)) i = 0; /* Don't delete the memory until all cores exit */ mman_release_all(); } parent_sched_exit(); }
void cos_upcall_fn(upcall_type_t t, void *arg1, void *arg2, void *arg3) { switch (t) { case COS_UPCALL_THD_CREATE: if (cos_cpuid() == INIT_CORE) { int i; for (i = 0; i < NUM_CPU; i++) *PERCPU_GET_TARGET(initialized_core, i) = 0; mm_init(); } else { /* Make sure that the initializing core does * the initialization before any other core * progresses */ while (*PERCPU_GET_TARGET(initialized_core, INIT_CORE) == 0) ; } *PERCPU_GET(initialized_core) = 1; break; default: BUG(); return; } return; }
static inline void mask_unset(unsigned short int p) { PERCPU_GET(fprr_state)->active &= ~(1 << p); }
static inline void mask_set(unsigned short int p) { PERCPU_GET(fprr_state)->active |= 1 << p; }
int thread_param_set(struct sched_thd *t, struct sched_param_s *ps) { unsigned int prio = PRIO_LOWEST; struct sched_thd *c = sched_get_current(); assert(t); while (ps->type != SCHEDP_NOOP) { switch (ps->type) { case SCHEDP_RPRIO: case SCHEDP_RLPRIO: /* The relative priority has been converted to absolute priority in relative_prio_convert(). */ prio = ps->value; /* FIXME: When the IPI handling thread is * creating a thread (requested by a remote * core) , since we can't copy accounting info * from the actual parent (which is on a * different core), we zero the accounting * info instead of touching remote * data-structures. */ if (sched_curr_is_IPI_handler()) sched_clear_accounting(t); else memcpy(sched_get_accounting(t), sched_get_accounting(c), sizeof(struct sched_accounting)); #ifdef DEFERRABLE if (sched_get_accounting(t)->T) ADD_LIST(&PERCPU_GET(fprr_state)->servers, t, sched_next, sched_prev); #endif if (prio > PRIO_LOWEST) prio = PRIO_LOWEST; break; case SCHEDP_PRIO: /* absolute priority */ prio = ps->value; break; case SCHEDP_IDLE: /* idle thread */ prio = PRIO_LOWEST; break; case SCHEDP_INIT: /* idle thread */ prio = PRIO_LOW; break; case SCHEDP_TIMER: /* timer thread */ prio = PRIO_HIGHEST; break; case SCHEDP_IPI_HANDLER: prio = IPI_HANDLER_PRIO; break; case SCHEDP_CORE_ID: assert(ps->value == cos_cpuid()); break; #ifdef DEFERRABLE case SCHEDP_BUDGET: prio = sched_get_metric(t)->priority; sched_get_accounting(t)->C = ps->value; sched_get_accounting(t)->C_used = 0; fp_move_end_runnable(t); break; case SCHEDP_WINDOW: prio = sched_get_metric(t)->priority; sched_get_accounting(t)->T = ps->value; sched_get_accounting(t)->T_exp = 0; if (EMPTY_LIST(t, sched_next, sched_prev) && sched_get_accounting(t)->T) { ADD_LIST(&PERCPU_GET(fprr_state)->servers, t, sched_next, sched_prev); } fp_move_end_runnable(t); break; #endif default: printc("fprr: core %ld received unknown priority option\n", cos_cpuid()); prio = PRIO_LOW; } ps++; } /* printc("fprr: cpu %d has new thd %d @ prio %d\n", cos_cpuid(), t->id, prio); */ if (sched_thd_ready(t)) fp_rem_thd(t); fp_add_thd(t, prio); return 0; }