void time_elapsed(struct sched_thd *t, u32_t processing_time)
{
	struct sched_accounting *sa;

	assert(t);
	sa = sched_get_accounting(t);
	sa->pol_cycles += processing_time;
	sa->cycles     += processing_time;
	if (sa->cycles >= QUANTUM) {
		while (sa->cycles > QUANTUM) {
			sa->cycles -= QUANTUM;
			sa->ticks++;
		}
		/* round robin */
		if (sched_thd_ready(t) && !sched_thd_suspended(t)) {
			assert(!sched_thd_inactive_evt(t));
			assert(!sched_thd_blocked(t));
			fp_move_end_runnable(t);
		}
#ifdef DEFERRABLE
#endif
	}
	if (sa->pol_cycles > QUANTUM) {
		sa->pol_cycles -= QUANTUM;
		if (sa->T) {
			sa->C_used++;
			if (sa->C_used >= sa->C) {
				sched_set_thd_urgency(t, NUM_PRIOS);
				if (sched_thd_ready(t)) fp_rem_thd(t);
				t->flags |= THD_SUSPENDED;
			}
		}
	}
}
void timer_tick(int num_ticks)
{
/* see time_elapsed for time mgmt */
#ifdef DEFERRABLE
	{
		struct sched_thd *t;

		assert(num_ticks > 0);
		ticks += num_ticks;
		for (t = FIRST_LIST(&servers, sched_next, sched_prev) ;
		     t != &servers                                    ;
		     t = FIRST_LIST(t, sched_next, sched_prev))
		{
			struct sched_accounting *sa = sched_get_accounting(t);
			unsigned long T_exp = sa->T_exp, T = sa->T;
			assert(T);

			if (T_exp <= ticks) {
				unsigned long off = T - (ticks % T);

				//printc("(%ld+%ld/%ld @ %ld)\n", sa->C_used, (unsigned long)sa->pol_cycles, T, T_exp);
				sa->T_exp  = ticks + off;
				sa->C_used = 0;
//				sa->pol_cycles = 0;
				if (sched_thd_suspended(t)) {
					t->flags &= ~THD_SUSPENDED;
					if (sched_thd_ready(t)) {
						fp_add_thd(t, sched_get_metric(t)->priority);
					}
				}
			}
		}
	}
#endif
}
static inline void fp_add_start_runnable(struct sched_thd *t)
{
	struct sched_thd *head;
	u16_t p = sched_get_metric(t)->priority;

	assert(sched_thd_ready(t));
	head = &priorities[p].runnable;
	ADD_LIST(head, t, prio_next, prio_prev);
	mask_set(p);
}
static int fp_thread_params(struct sched_thd *t, char *p)
{
	int prio, tmp;
	char curr = p[0];
	struct sched_thd *c;
	
	assert(t);
	switch (curr) {
	case 'r':
		/* priority relative to current thread */
		c = sched_get_current();
		assert(c);
		tmp = atoi(&p[1]);
		prio = sched_get_metric(c)->priority + tmp;
		memcpy(sched_get_accounting(t), sched_get_accounting(c), sizeof(struct sched_accounting));
#ifdef DEFERRABLE
		if (sched_get_accounting(t)->T) ADD_LIST(&servers, t, sched_next, sched_prev);
#endif

		if (prio > PRIO_LOWEST) prio = PRIO_LOWEST;
		break;
	case 'a':
		/* absolute priority */
		prio = atoi(&p[1]);
		break;
	case 'i':
		/* idle thread */
		prio = PRIO_LOWEST;
		break;
	case 't':
		/* timer thread */
		prio = PRIO_HIGHEST;
		break;
#ifdef DEFERRABLE
	case 'd':
	{
		prio = ds_parse_params(t, p);
		if (EMPTY_LIST(t, sched_next, sched_prev) && 
		    sched_get_accounting(t)->T) {
			ADD_LIST(&servers, t, sched_next, sched_prev);
		}
		fp_move_end_runnable(t);
		break;
	}
#endif
	default:
		printc("unknown priority option @ %s, setting to low\n", p);
		prio = PRIO_LOW;
	}
	if (sched_thd_ready(t)) fp_rem_thd(t);
	fp_add_thd(t, prio);

	return 0;
}
static inline void fp_add_thd(struct sched_thd *t, unsigned short int prio)
{
	assert(prio < NUM_PRIOS);
	assert(sched_thd_ready(t));
	assert(!sched_thd_suspended(t));

	sched_get_metric(t)->priority = prio;
	sched_set_thd_urgency(t, prio);
	fp_move_end_runnable(t);

	return;
}
static inline void fp_move_end_runnable(struct sched_thd *t)
{
	struct sched_thd *head;
	unsigned short int p = sched_get_metric(t)->priority;

	assert(sched_thd_ready(t));
	assert(!sched_thd_suspended(t));
	head = &priorities[p].runnable;
	REM_LIST(t, prio_next, prio_prev);
	ADD_LIST(LAST_LIST(head, prio_next, prio_prev), t, prio_next, prio_prev);
	mask_set(p);
}
static inline void fp_add_thd(struct sched_thd *t, unsigned short int prio)
{
	assert(prio < NUM_PRIOS);
	assert(sched_thd_ready(t));
	assert(!sched_thd_suspended(t));

	sched_get_metric(t)->priority = prio;
	sched_set_thd_urgency(t, prio);
	/* printc("<< thread %d (prio %d) is added back to Q>>\n", t->id, prio); */
	fp_move_end_runnable(t);

	return;
}
Esempio n. 8
0
static struct sched_thd *fp_get_highest_prio(void)
{
	struct sched_thd *t, *head;
	u16_t p = mask_high();

	head = &(PERCPU_GET(fprr_state)->priorities[p].runnable);
	t = FIRST_LIST(head, prio_next, prio_prev);
	assert(t != head);
	assert(sched_thd_ready(t));
	assert(sched_get_metric(t));
	assert(sched_get_metric(t)->priority == p);
	assert(!sched_thd_free(t));
	return t;
}
Esempio n. 9
0
int
thread_param_set(struct sched_thd *t, struct sched_param_s *ps)
{
	unsigned int prio = PRIO_LOWEST;
	struct sched_thd *c = sched_get_current();
	
	assert(t);
	while (ps->type != SCHEDP_NOOP) {
		switch (ps->type) {
		case SCHEDP_RPRIO:
		case SCHEDP_RLPRIO:
			/* The relative priority has been converted to absolute priority in relative_prio_convert(). */
			prio = ps->value;
			/* FIXME: When the IPI handling thread is
			 * creating a thread (requested by a remote
			 * core) , since we can't copy accounting info
			 * from the actual parent (which is on a
			 * different core), we zero the accounting
			 * info instead of touching remote
			 * data-structures. */
			if (sched_curr_is_IPI_handler())
				sched_clear_accounting(t);
			else
				memcpy(sched_get_accounting(t), sched_get_accounting(c), sizeof(struct sched_accounting));
#ifdef DEFERRABLE
			if (sched_get_accounting(t)->T) ADD_LIST(&PERCPU_GET(fprr_state)->servers, t, sched_next, sched_prev);
#endif
			if (prio > PRIO_LOWEST) prio = PRIO_LOWEST;
			break;
		case SCHEDP_PRIO:
			/* absolute priority */
			prio = ps->value;
			break;
		case SCHEDP_IDLE:
			/* idle thread */
			prio = PRIO_LOWEST;
			break;
		case SCHEDP_INIT:
			/* idle thread */
			prio = PRIO_LOW;
			break;
		case SCHEDP_TIMER:
			/* timer thread */
			prio = PRIO_HIGHEST;
			break;
		case SCHEDP_IPI_HANDLER:
			prio = IPI_HANDLER_PRIO;
			break;
		case SCHEDP_CORE_ID:
			assert(ps->value == cos_cpuid());
			break;
#ifdef DEFERRABLE
		case SCHEDP_BUDGET:
			prio = sched_get_metric(t)->priority;
			sched_get_accounting(t)->C = ps->value;
			sched_get_accounting(t)->C_used = 0;
			fp_move_end_runnable(t);
			break;
		case SCHEDP_WINDOW:
			prio = sched_get_metric(t)->priority;
			sched_get_accounting(t)->T = ps->value;
			sched_get_accounting(t)->T_exp = 0;
			if (EMPTY_LIST(t, sched_next, sched_prev) && 
			    sched_get_accounting(t)->T) {
				ADD_LIST(&PERCPU_GET(fprr_state)->servers, t, sched_next, sched_prev);
			}
			fp_move_end_runnable(t);
			break;
#endif
		default:
			printc("fprr: core %ld received unknown priority option\n", cos_cpuid());
			prio = PRIO_LOW;
		}
		ps++;
	}
	/* printc("fprr: cpu %d has new thd %d @ prio %d\n", cos_cpuid(), t->id, prio); */
	if (sched_thd_ready(t)) fp_rem_thd(t);

	fp_add_thd(t, prio);
	
	return 0;
}