Esempio n. 1
0
static void sys_unmap_wrap(struct x86_exregs *regs)
{
	L4_Word_t control = regs->eax;

	/* TODO: pass utcb to sys_unmap()? */
	void *utcb = thread_get_utcb(get_current_thread());
	if((control & 0x3f) > 0) L4_VREG(utcb, L4_TCR_MR(0)) = regs->esi;
	sys_unmap(control);

	regs->esi = L4_VREG(utcb, L4_TCR_MR(0));
}
Esempio n. 2
0
/* call latent interrupt handlers, resolve preemptions, return the winner.
 * caller must check for retval == current && I ∈ current.PreemptFlags, and
 * engage max_delay as appropriate.
 */
struct thread *irq_call_deferred(struct thread *next)
{
	assert(!x86_irq_is_enabled());
	assert(!kernel_irq_ok);

	/* initialize resolution state, feed it a primary event. */
	struct thread *current = get_current_thread();
	void *cur_utcb = current != NULL ? thread_get_utcb(current) : NULL;
	next = sched_resolve_next(current, cur_utcb, current, next);

	int n_defer_masks = (max_irq_handler + WORD_BITS) / WORD_BITS, n_done;
	do {
		if(!irq_defer_active) break;
		n_done = 0;
		for(int i=0; i < n_defer_masks; i++) {
			L4_Word_t m = defer_set_masks[i];
			while(m != 0) {
				int b = ffsl(m) - 1;
				assert(b >= 0);
				m &= ~(1ul << b);
				int vecn = i * WORD_BITS + b;
				int n_defers = deferred_vecs[vecn];
				assert(n_defers > 0);
				irq_handler_fn handler = choose_handler(vecn);
				deferred_vecs[vecn] = 0;
				defer_set_masks[i] &= ~(1ul << b);

				x86_irq_enable();
				next = sched_resolve_next(current, cur_utcb, next,
					(*handler)(n_defers > 1 ? -vecn : vecn));
				x86_irq_disable();
				n_done++;
			}
		}
	} while(n_done > 0);
	irq_defer_active = false;

	return next;
}
Esempio n. 3
0
/* the IAPC PIT 1000hz interrupt. */
void isr_irq0_bottom(struct x86_exregs *regs)
{
	uint64_t now = ++global_timer_count;
	(*systemclock_p) += 1000;
	(*global_pic.send_eoi)(0);

	if(now < preempt_timer_count) return;
	TRACE("%s: preempt hit at now=%u\n", __func__, (unsigned)now);

	preempt_timer_count = ~0ull;
	assert(preempt_thread == NULL
		|| thread_is_valid(preempt_thread));

	if(irq_in_kernel(regs) && !kernel_irq_ok) {
		/* defer a magical call to on_preempt() as though it were any old
		 * interrupt.
		 */
		irq_defer(0x20);
		TRACE("%s: preempt deferred\n", __func__);
		return;
	}

	struct thread *current = get_current_thread();
	bool ctx_saved = false;
	if(CHECK_FLAG(preempt_status, PS_DELAYED)
		|| (current != NULL && preempt_thread != NULL
			&& preempt_thread->pri <= current->sens_pri
			&& current->max_delay == 0
			&& CHECK_FLAG_ALL(L4_VREG(thread_get_utcb(current),
				L4_TCR_COP_PREEMPT), 0x60)))
	{
		assert(!CHECK_FLAG(current->flags, TF_SYSCALL));
		save_user_ex(regs);
		ctx_saved = true;
	}
	struct thread *next = on_preempt(0x20);
	if(current == NULL) {
		/* scheduled activation from idle. */
		if(next != NULL) {
			TRACE("%s: scheduled activation of next=%lu:%lu\n", __func__,
				TID_THREADNUM(next->id), TID_VERSION(next->id));
		} else {
			TRACE("%s: idle -> idle\n", __func__);
		}
		kernel_irq_ok = false;
	} else if(next != current) {
		/* async preëmption of @current. */
		assert(!CHECK_FLAG(current->flags, TF_SYSCALL));
		TRACE("%s: async preëmpt of %lu:%lu\n", __func__,
			TID_THREADNUM(current->id), TID_VERSION(current->id));
		if(!ctx_saved) save_user_ex(regs);
		if(!IS_IPC(current->status)) {
			current->status = TS_READY;
			current->wakeup_time = 0;
			sq_update_thread(current);
		}
	}

	return_from_irq(next);
	TRACE("%s: returning to userspace (preempt_timer_count'=%u)\n",
		__func__, (unsigned)preempt_timer_count);
	BUG_ON(next != current, "shouldn't get here!");
}