Example #1
0
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
    struct ipipe_percpu_domain_data *p;
    void (*hook)(void);
    int ret;

    WARN_ON_ONCE(irqs_disabled_hw());

    /*
     * We need to run the IRQ tail hook each time we intercept a
     * syscall, because we know that important operations might be
     * pending there (e.g. Xenomai deferred rescheduling).
     */
    hook = (__typeof__(hook))__ipipe_irq_tail_hook;
    hook();

    /*
     * This routine either returns:
     * 0 -- if the syscall is to be passed to Linux;
     * >0 -- if the syscall should not be passed to Linux, and no
     * tail work should be performed;
     * <0 -- if the syscall should not be passed to Linux but the
     * tail work has to be performed (for handling signals etc).
     */

    if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
            !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
        return 0;

    ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

    hard_local_irq_disable();

    /*
     * This is the end of the syscall path, so we may
     * safely assume a valid Linux task stack here.
     */
    if (current->ipipe_flags & PF_EVTRET) {
        current->ipipe_flags &= ~PF_EVTRET;
        __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
    }

    if (!__ipipe_root_domain_p)
        ret = -1;
    else {
        p = ipipe_root_cpudom_ptr();
        if (__ipipe_ipending_p(p))
            __ipipe_sync_pipeline();
    }

    hard_local_irq_enable();

    return -ret;
}
Example #2
0
int fastcall __ipipe_divert_exception(struct pt_regs *regs, int vector)
{
	if (__ipipe_event_monitored_p(vector) &&
	    __ipipe_dispatch_event(vector,regs) != 0)
		return 1;

	__fixup_if(regs);

	return 0;
}
Example #3
0
int __ipipe_syscall_root(struct pt_regs *regs)
{
	unsigned long flags;
	int ret;

	/*
	 * This routine either returns:
	 * 0 -- if the syscall is to be passed to Linux;
	 * >0 -- if the syscall should not be passed to Linux, and no
	 * tail work should be performed;
	 * <0 -- if the syscall should not be passed to Linux but the
	 * tail work has to be performed (for handling signals etc).
	 */

	if (!__ipipe_syscall_watched_p(current, regs->orig_ax) ||
	    !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
		return 0;

	ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

	local_irq_save_hw(flags);

	if (current->ipipe_flags & PF_EVTRET) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!ipipe_root_domain_p)
		return 1;

	/*
	 * If allowed, sync pending VIRQs before _TIF_NEED_RESCHED is
	 * tested.
	 */
	if (__ipipe_ipending_p(ipipe_root_cpudom_ptr()))
		__ipipe_sync_pipeline();

	if (!ret)
		local_irq_restore_hw(flags);

	return -ret;
}
Example #4
0
asmlinkage int __ipipe_handle_exception(int vector, struct pt_regs *regs, long error_code)
{
	if (!__ipipe_event_monitored_p(vector) ||
	    __ipipe_dispatch_event(vector,regs) == 0) {
		__ipipe_exptr handler = __ipipe_std_extable[vector];
		handler(regs,error_code);
		__fixup_if(regs);
		return 0;
	}

	return 1;
}
Example #5
0
asmlinkage int __ipipe_syscall_root(struct pt_regs regs)
{
	ipipe_declare_cpuid;
	unsigned long flags;

	__fixup_if(&regs);

	/* This routine either returns:
	    0 -- if the syscall is to be passed to Linux;
	   >0 -- if the syscall should not be passed to Linux, and no
	   tail work should be performed;
	   <0 -- if the syscall should not be passed to Linux but the
	   tail work has to be performed (for handling signals etc). */

	if (__ipipe_syscall_watched_p(current, regs.orig_eax) &&
	    __ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
	    __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,&regs) > 0) {
		/* We might enter here over a non-root domain and exit
		 * over the root one as a result of the syscall
		 * (i.e. by recycling the register set of the current
		 * context across the migration), so we need to fixup
		 * the interrupt flag upon return too, so that
		 * __ipipe_unstall_iret_root() resets the correct
		 * stall bit on exit. */
		__fixup_if(&regs);

		if (ipipe_current_domain == ipipe_root_domain) {
			/* Sync pending VIRQs before _TIF_NEED_RESCHED
			 * is tested. */
			ipipe_lock_cpu(flags);
			if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) != 0)
				__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
			ipipe_unlock_cpu(flags);
			return -1;
		}
		return 1;
	}

    return 0;
}
Example #6
0
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain.	 Hw
 * interrupts are off on entry.
 */
int __ipipe_handle_irq(struct pt_regs *regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	int irq, vector = regs->orig_ax;
	struct list_head *head, *pos;
	struct pt_regs *tick_regs;
	int m_ack;

	if (vector < 0) {
		irq = __get_cpu_var(vector_irq)[~vector];
		BUG_ON(irq < 0);
		m_ack = 0;
	} else { /* This is a self-triggered one. */
		irq = vector;
		m_ack = 1;
	}

	this_domain = ipipe_current_domain;

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			__ipipe_dispatch_wired(next_domain, irq);
			goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
		goto finalize_nosync;

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline.
	 */

	__ipipe_walk_pipeline(head);

finalize_nosync:

	/*
	 * Given our deferred dispatching model for regular IRQs, we
	 * only record CPU regs for the last timer interrupt, so that
	 * the timer handler charges CPU times properly. It is assumed
	 * that other interrupt handlers don't actually care for such
	 * information.
	 */

	if (irq == __ipipe_hrtimer_irq) {
		tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
		tick_regs->flags = regs->flags;
		tick_regs->cs = regs->cs;
		tick_regs->ip = regs->ip;
		tick_regs->bp = regs->bp;
#ifdef CONFIG_X86_64
		tick_regs->ss = regs->ss;
		tick_regs->sp = regs->sp;
#endif
		if (!__ipipe_root_domain_p)
			tick_regs->flags &= ~X86_EFLAGS_IF;
	}

	if (user_mode(regs) && (current->ipipe_flags & PF_EVTRET) != 0) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!__ipipe_root_domain_p ||
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
		return 0;

	return 1;
}