Beispiel #1
0
void __ipipe_restore_head(unsigned long x) /* hw interrupt off */
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();

	if (x) {
#ifdef CONFIG_DEBUG_KERNEL
		static int warned;
		if (!warned &&
		    __test_and_set_bit(IPIPE_STALL_FLAG, &p->status)) {
			/*
			 * Already stalled albeit ipipe_restore_head()
			 * should have detected it? Send a warning once.
			 */
			hard_local_irq_enable();
			warned = 1;
			printk(KERN_WARNING
				   "I-pipe: ipipe_restore_head() optimization failed.\n");
			dump_stack();
			hard_local_irq_disable();
		}
#else /* !CONFIG_DEBUG_KERNEL */
		__set_bit(IPIPE_STALL_FLAG, &p->status);
#endif /* CONFIG_DEBUG_KERNEL */
	} else {
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
		if (unlikely(__ipipe_ipending_p(p)))
			__ipipe_sync_pipeline(ipipe_head_domain);
		hard_local_irq_enable();
	}
}
Beispiel #2
0
asmlinkage void __ipipe_unstall_iret_root(struct pt_regs regs)
{
	ipipe_declare_cpuid;

	/* Emulate IRET's handling of the interrupt flag. */

	local_irq_disable_hw();

	ipipe_load_cpuid();

	/* Restore the software state as it used to be on kernel
	   entry. CAUTION: NMIs must *not* return through this
	   emulation. */

	if (!(regs.eflags & X86_EFLAGS_IF)) {
		__set_bit(IPIPE_STALL_FLAG,
			  &ipipe_root_domain->cpudata[cpuid].status);
		regs.eflags |= X86_EFLAGS_IF;
	} else {
		__clear_bit(IPIPE_STALL_FLAG,
			    &ipipe_root_domain->cpudata[cpuid].status);

		/* Only sync virtual IRQs here, so that we don't recurse
		   indefinitely in case of an external interrupt flood. */

		if ((ipipe_root_domain->cpudata[cpuid].
		     irq_pending_hi & IPIPE_IRQMASK_VIRT) != 0)
			__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
	}
}
Beispiel #3
0
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
    struct ipipe_percpu_domain_data *p;
    void (*hook)(void);
    int ret;

    WARN_ON_ONCE(irqs_disabled_hw());

    /*
     * We need to run the IRQ tail hook each time we intercept a
     * syscall, because we know that important operations might be
     * pending there (e.g. Xenomai deferred rescheduling).
     */
    hook = (__typeof__(hook))__ipipe_irq_tail_hook;
    hook();

    /*
     * This routine either returns:
     * 0 -- if the syscall is to be passed to Linux;
     * >0 -- if the syscall should not be passed to Linux, and no
     * tail work should be performed;
     * <0 -- if the syscall should not be passed to Linux but the
     * tail work has to be performed (for handling signals etc).
     */

    if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
            !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
        return 0;

    ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

    hard_local_irq_disable();

    /*
     * This is the end of the syscall path, so we may
     * safely assume a valid Linux task stack here.
     */
    if (current->ipipe_flags & PF_EVTRET) {
        current->ipipe_flags &= ~PF_EVTRET;
        __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
    }

    if (!__ipipe_root_domain_p)
        ret = -1;
    else {
        p = ipipe_root_cpudom_ptr();
        if (__ipipe_ipending_p(p))
            __ipipe_sync_pipeline();
    }

    hard_local_irq_enable();

    return -ret;
}
Beispiel #4
0
void ipipe_unstall_head(void)
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();

	hard_local_irq_disable();

	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (unlikely(__ipipe_ipending_p(p)))
		__ipipe_sync_pipeline(ipipe_head_domain);

	hard_local_irq_enable();
}
int __ipipe_syscall_root(struct pt_regs *regs)
{
	unsigned long flags;
	int ret;

	/*
	 * This routine either returns:
	 * 0 -- if the syscall is to be passed to Linux;
	 * >0 -- if the syscall should not be passed to Linux, and no
	 * tail work should be performed;
	 * <0 -- if the syscall should not be passed to Linux but the
	 * tail work has to be performed (for handling signals etc).
	 */

	if (!__ipipe_syscall_watched_p(current, regs->orig_ax) ||
	    !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
		return 0;

	ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

	local_irq_save_hw(flags);

	if (current->ipipe_flags & PF_EVTRET) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!ipipe_root_domain_p)
		return 1;

	/*
	 * If allowed, sync pending VIRQs before _TIF_NEED_RESCHED is
	 * tested.
	 */
	if (__ipipe_ipending_p(ipipe_root_cpudom_ptr()))
		__ipipe_sync_pipeline();

	if (!ret)
		local_irq_restore_hw(flags);

	return -ret;
}
Beispiel #6
0
asmlinkage void __ipipe_sync_root(void)
{
    void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
    struct ipipe_percpu_domain_data *p;
    unsigned long flags;

    BUG_ON(irqs_disabled());

    flags = hard_local_irq_save();

    if (irq_tail_hook)
        irq_tail_hook();

    clear_thread_flag(TIF_IRQ_SYNC);

    p = ipipe_root_cpudom_ptr();
    if (__ipipe_ipending_p(p))
        __ipipe_sync_pipeline();

    hard_local_irq_restore(flags);
}
void __ipipe_halt_root(void)
{
	struct ipipe_percpu_domain_data *p;

	/* Emulate sti+hlt sequence over the root domain. */

	local_irq_disable_hw();

	p = ipipe_root_cpudom_ptr();

	trace_hardirqs_on();
	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (unlikely(__ipipe_ipending_p(p))) {
		__ipipe_sync_pipeline();
		local_irq_enable_hw();
	} else {
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
		ipipe_trace_end(0x8000000E);
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
		asm volatile("sti; hlt": : :"memory");
	}
}
Beispiel #8
0
void __ipipe_dispatch_irq(unsigned int irq, int flags) /* hw interrupts off */
{
	struct ipipe_domain *ipd;
	struct irq_desc *desc;
	unsigned long control;

	/*
	 * Survival kit when reading this code:
	 *
	 * - we have two main situations, leading to three cases for
	 *   handling interrupts:
	 *
	 *   a) the root domain is alone, no registered head domain
	 *      => all interrupts are delivered via the fast dispatcher.
	 *   b) a head domain is registered
	 *      => head domain IRQs go through the fast dispatcher
	 *      => root domain IRQs go through the interrupt log
	 *
	 * - when no head domain is registered, ipipe_head_domain ==
	 *   ipipe_root_domain == &ipipe_root.
	 *
	 * - the caller tells us whether we should acknowledge this
	 *   IRQ. Even virtual IRQs may require acknowledge on some
	 *   platforms (e.g. arm/SMP).
	 *
	 * - the caller tells us whether we may try to run the IRQ log
	 *   syncer. Typically, demuxed IRQs won't be synced
	 *   immediately.
	 *
	 * - multiplex IRQs most likely have a valid acknowledge
	 *   handler and we may not be called with IPIPE_IRQF_NOACK
	 *   for them. The ack handler for the multiplex IRQ actually
	 *   decodes the demuxed interrupts.
	 */

#ifdef CONFIG_IPIPE_DEBUG
	if (unlikely(irq >= IPIPE_NR_IRQS) ||
	    (irq < NR_IRQS && irq_to_desc(irq) == NULL)) {
		printk(KERN_ERR "I-pipe: spurious interrupt %u\n", irq);
		return;
	}
#endif
	/*
	 * CAUTION: on some archs, virtual IRQs may have acknowledge
	 * handlers. Multiplex IRQs should have one too.
	 */
	desc = irq >= NR_IRQS ? NULL : irq_to_desc(irq);
	if (flags & IPIPE_IRQF_NOACK)
		IPIPE_WARN_ONCE(desc && ipipe_chained_irq_p(irq));
	else {
		ipd = ipipe_head_domain;
		control = ipd->irqs[irq].control;
		if ((control & IPIPE_HANDLE_MASK) == 0)
			ipd = ipipe_root_domain;
		if (ipd->irqs[irq].ackfn)
			ipd->irqs[irq].ackfn(irq, desc);
		if (desc && ipipe_chained_irq_p(irq)) {
			if ((flags & IPIPE_IRQF_NOSYNC) == 0)
				/* Run demuxed IRQ handlers. */
				goto sync;
			return;
		}
	}

	/*
	 * Sticky interrupts must be handled early and separately, so
	 * that we always process them on the current domain.
	 */
	ipd = __ipipe_current_domain;
	control = ipd->irqs[irq].control;
	if (control & IPIPE_STICKY_MASK)
		goto log;

	/*
	 * In case we have no registered head domain
	 * (i.e. ipipe_head_domain == &ipipe_root), we allow
	 * interrupts to go through the fast dispatcher, since we
	 * don't care for additional latency induced by interrupt
	 * disabling at CPU level. Otherwise, we must go through the
	 * interrupt log, and leave the dispatching work ultimately to
	 * __ipipe_sync_pipeline().
	 */
	ipd = ipipe_head_domain;
	control = ipd->irqs[irq].control;
	if (control & IPIPE_HANDLE_MASK) {
		if (unlikely(flags & IPIPE_IRQF_NOSYNC))
			__ipipe_set_irq_pending(ipd, irq);
		else
			__ipipe_dispatch_irq_fast(irq);
		return;
	}

	/*
	 * The root domain must handle all interrupts, so testing the
	 * HANDLE bit for it would be pointless.
	 */
	ipd = ipipe_root_domain;
log:
	__ipipe_set_irq_pending(ipd, irq);

	if (flags & IPIPE_IRQF_NOSYNC)
		return;

	/*
	 * Optimize if we preempted a registered high priority head
	 * domain: we don't need to synchronize the pipeline unless
	 * there is a pending interrupt for it.
	 */
	if (!__ipipe_root_p &&
	    !__ipipe_ipending_p(ipipe_this_cpu_head_context()))
		return;
sync:
	__ipipe_sync_pipeline(ipipe_head_domain);
}