Ejemplo n.º 1
0
void __ipipe_do_sync_pipeline(struct ipipe_domain *top)
{
	struct ipipe_percpu_domain_data *p;
	struct ipipe_domain *ipd;

	/* We must enter over the root domain. */
	IPIPE_WARN_ONCE(__ipipe_current_domain != ipipe_root_domain);
	ipd = top;
next:
	p = ipipe_this_cpu_context(ipd);
	if (test_bit(IPIPE_STALL_FLAG, &p->status))
		return;

	if (__ipipe_ipending_p(p)) {
		if (ipd == ipipe_root_domain)
			__ipipe_sync_stage();
		else {
			/* Switching to head. */
			p->coflags &= ~__IPIPE_ALL_R;
			__ipipe_set_current_context(p);
			__ipipe_sync_stage();
			__ipipe_set_current_domain(ipipe_root_domain);
		}
	}

	if (ipd != ipipe_root_domain) {
		ipd = ipipe_root_domain;
		goto next;
	}
}
Ejemplo n.º 2
0
asmlinkage void __sched __ipipe_preempt_schedule_irq(void)
{
	struct ipipe_percpu_domain_data *p;
	unsigned long flags;

	BUG_ON(!hard_irqs_disabled());
	local_irq_save(flags);
	hard_local_irq_enable();
	preempt_schedule_irq(); /* Ok, may reschedule now. */
	hard_local_irq_disable();

	/*
	 * Flush any pending interrupt that may have been logged after
	 * preempt_schedule_irq() stalled the root stage before
	 * returning to us, and now.
	 */
	p = ipipe_this_cpu_root_context();
	if (unlikely(__ipipe_ipending_p(p))) {
		add_preempt_count(PREEMPT_ACTIVE);
		trace_hardirqs_on();
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
		__ipipe_sync_stage();
		sub_preempt_count(PREEMPT_ACTIVE);
	}

	__ipipe_restore_root_nosync(flags);
}
Ejemplo n.º 3
0
void __ipipe_restore_head(unsigned long x) /* hw interrupt off */
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();

	if (x) {
#ifdef CONFIG_DEBUG_KERNEL
		static int warned;
		if (!warned &&
		    __test_and_set_bit(IPIPE_STALL_FLAG, &p->status)) {
			/*
			 * Already stalled albeit ipipe_restore_head()
			 * should have detected it? Send a warning once.
			 */
			hard_local_irq_enable();
			warned = 1;
			printk(KERN_WARNING
				   "I-pipe: ipipe_restore_head() optimization failed.\n");
			dump_stack();
			hard_local_irq_disable();
		}
#else /* !CONFIG_DEBUG_KERNEL */
		__set_bit(IPIPE_STALL_FLAG, &p->status);
#endif /* CONFIG_DEBUG_KERNEL */
	} else {
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
		if (unlikely(__ipipe_ipending_p(p)))
			__ipipe_sync_pipeline(ipipe_head_domain);
		hard_local_irq_enable();
	}
}
Ejemplo n.º 4
0
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
    struct ipipe_percpu_domain_data *p;
    void (*hook)(void);
    int ret;

    WARN_ON_ONCE(irqs_disabled_hw());

    /*
     * We need to run the IRQ tail hook each time we intercept a
     * syscall, because we know that important operations might be
     * pending there (e.g. Xenomai deferred rescheduling).
     */
    hook = (__typeof__(hook))__ipipe_irq_tail_hook;
    hook();

    /*
     * This routine either returns:
     * 0 -- if the syscall is to be passed to Linux;
     * >0 -- if the syscall should not be passed to Linux, and no
     * tail work should be performed;
     * <0 -- if the syscall should not be passed to Linux but the
     * tail work has to be performed (for handling signals etc).
     */

    if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
            !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
        return 0;

    ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

    hard_local_irq_disable();

    /*
     * This is the end of the syscall path, so we may
     * safely assume a valid Linux task stack here.
     */
    if (current->ipipe_flags & PF_EVTRET) {
        current->ipipe_flags &= ~PF_EVTRET;
        __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
    }

    if (!__ipipe_root_domain_p)
        ret = -1;
    else {
        p = ipipe_root_cpudom_ptr();
        if (__ipipe_ipending_p(p))
            __ipipe_sync_pipeline();
    }

    hard_local_irq_enable();

    return -ret;
}
Ejemplo n.º 5
0
void ipipe_unstall_head(void)
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();

	hard_local_irq_disable();

	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (unlikely(__ipipe_ipending_p(p)))
		__ipipe_sync_pipeline(ipipe_head_domain);

	hard_local_irq_enable();
}
Ejemplo n.º 6
0
void __ipipe_dispatch_irq_fast(unsigned int irq) /* hw interrupts off */
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_leading_context(), *old;
	struct ipipe_domain *head = p->domain;

	if (unlikely(test_bit(IPIPE_STALL_FLAG, &p->status))) {
		__ipipe_set_irq_pending(head, irq);
		return;
	}

	old = __ipipe_current_context;
	/* Switch to the head domain. */
	__ipipe_set_current_context(p);

	p->irqall[irq]++;
	__set_bit(IPIPE_STALL_FLAG, &p->status);
	barrier();

	if (likely(head != ipipe_root_domain)) {
		head->irqs[irq].handler(irq, head->irqs[irq].cookie);
		__ipipe_run_irqtail(irq);
	} else {
		if (ipipe_virtual_irq_p(irq)) {
			irq_enter();
			head->irqs[irq].handler(irq, head->irqs[irq].cookie);
			irq_exit();
		} else
			head->irqs[irq].handler(irq, head->irqs[irq].cookie);

		root_stall_after_handler();
	}

	hard_local_irq_disable();
	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (__ipipe_current_context == p) {
		__ipipe_set_current_context(old);
		if (old == p) {
			if (__ipipe_ipending_p(p))
				__ipipe_sync_stage();
			return;
		}
	}

	/*
	 * We must be running over the root domain, synchronize
	 * the pipeline for high priority IRQs.
	 */
	__ipipe_do_sync_pipeline(head);
}
Ejemplo n.º 7
0
void ipipe_unstall_root(void)
{
	struct ipipe_percpu_domain_data *p;

	hard_local_irq_disable();

	/* This helps catching bad usage from assembly call sites. */
	ipipe_root_only();

	p = ipipe_this_cpu_root_context();

	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (unlikely(__ipipe_ipending_p(p)))
		__ipipe_sync_stage();

	hard_local_irq_enable();
}
Ejemplo n.º 8
0
int __ipipe_syscall_root(struct pt_regs *regs)
{
	unsigned long flags;
	int ret;

	/*
	 * This routine either returns:
	 * 0 -- if the syscall is to be passed to Linux;
	 * >0 -- if the syscall should not be passed to Linux, and no
	 * tail work should be performed;
	 * <0 -- if the syscall should not be passed to Linux but the
	 * tail work has to be performed (for handling signals etc).
	 */

	if (!__ipipe_syscall_watched_p(current, regs->orig_ax) ||
	    !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
		return 0;

	ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);

	local_irq_save_hw(flags);

	if (current->ipipe_flags & PF_EVTRET) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!ipipe_root_domain_p)
		return 1;

	/*
	 * If allowed, sync pending VIRQs before _TIF_NEED_RESCHED is
	 * tested.
	 */
	if (__ipipe_ipending_p(ipipe_root_cpudom_ptr()))
		__ipipe_sync_pipeline();

	if (!ret)
		local_irq_restore_hw(flags);

	return -ret;
}
Ejemplo n.º 9
0
asmlinkage void __ipipe_sync_root(void)
{
    void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
    struct ipipe_percpu_domain_data *p;
    unsigned long flags;

    BUG_ON(irqs_disabled());

    flags = hard_local_irq_save();

    if (irq_tail_hook)
        irq_tail_hook();

    clear_thread_flag(TIF_IRQ_SYNC);

    p = ipipe_root_cpudom_ptr();
    if (__ipipe_ipending_p(p))
        __ipipe_sync_pipeline();

    hard_local_irq_restore(flags);
}
Ejemplo n.º 10
0
void __ipipe_halt_root(void)
{
	struct ipipe_percpu_domain_data *p;

	/* Emulate sti+hlt sequence over the root domain. */

	local_irq_disable_hw();

	p = ipipe_root_cpudom_ptr();

	trace_hardirqs_on();
	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (unlikely(__ipipe_ipending_p(p))) {
		__ipipe_sync_pipeline();
		local_irq_enable_hw();
	} else {
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
		ipipe_trace_end(0x8000000E);
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
		asm volatile("sti; hlt": : :"memory");
	}
}
Ejemplo n.º 11
0
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain.	 Hw
 * interrupts are off on entry.
 */
int __ipipe_handle_irq(struct pt_regs *regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	int irq, vector = regs->orig_ax;
	struct list_head *head, *pos;
	struct pt_regs *tick_regs;
	int m_ack;

	if (vector < 0) {
		irq = __get_cpu_var(vector_irq)[~vector];
		BUG_ON(irq < 0);
		m_ack = 0;
	} else { /* This is a self-triggered one. */
		irq = vector;
		m_ack = 1;
	}

	this_domain = ipipe_current_domain;

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			__ipipe_dispatch_wired(next_domain, irq);
			goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
		goto finalize_nosync;

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline.
	 */

	__ipipe_walk_pipeline(head);

finalize_nosync:

	/*
	 * Given our deferred dispatching model for regular IRQs, we
	 * only record CPU regs for the last timer interrupt, so that
	 * the timer handler charges CPU times properly. It is assumed
	 * that other interrupt handlers don't actually care for such
	 * information.
	 */

	if (irq == __ipipe_hrtimer_irq) {
		tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
		tick_regs->flags = regs->flags;
		tick_regs->cs = regs->cs;
		tick_regs->ip = regs->ip;
		tick_regs->bp = regs->bp;
#ifdef CONFIG_X86_64
		tick_regs->ss = regs->ss;
		tick_regs->sp = regs->sp;
#endif
		if (!__ipipe_root_domain_p)
			tick_regs->flags &= ~X86_EFLAGS_IF;
	}

	if (user_mode(regs) && (current->ipipe_flags & PF_EVTRET) != 0) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!__ipipe_root_domain_p ||
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
		return 0;

	return 1;
}
Ejemplo n.º 12
0
void __ipipe_dispatch_irq(unsigned int irq, int flags) /* hw interrupts off */
{
	struct ipipe_domain *ipd;
	struct irq_desc *desc;
	unsigned long control;

	/*
	 * Survival kit when reading this code:
	 *
	 * - we have two main situations, leading to three cases for
	 *   handling interrupts:
	 *
	 *   a) the root domain is alone, no registered head domain
	 *      => all interrupts are delivered via the fast dispatcher.
	 *   b) a head domain is registered
	 *      => head domain IRQs go through the fast dispatcher
	 *      => root domain IRQs go through the interrupt log
	 *
	 * - when no head domain is registered, ipipe_head_domain ==
	 *   ipipe_root_domain == &ipipe_root.
	 *
	 * - the caller tells us whether we should acknowledge this
	 *   IRQ. Even virtual IRQs may require acknowledge on some
	 *   platforms (e.g. arm/SMP).
	 *
	 * - the caller tells us whether we may try to run the IRQ log
	 *   syncer. Typically, demuxed IRQs won't be synced
	 *   immediately.
	 *
	 * - multiplex IRQs most likely have a valid acknowledge
	 *   handler and we may not be called with IPIPE_IRQF_NOACK
	 *   for them. The ack handler for the multiplex IRQ actually
	 *   decodes the demuxed interrupts.
	 */

#ifdef CONFIG_IPIPE_DEBUG
	if (unlikely(irq >= IPIPE_NR_IRQS) ||
	    (irq < NR_IRQS && irq_to_desc(irq) == NULL)) {
		printk(KERN_ERR "I-pipe: spurious interrupt %u\n", irq);
		return;
	}
#endif
	/*
	 * CAUTION: on some archs, virtual IRQs may have acknowledge
	 * handlers. Multiplex IRQs should have one too.
	 */
	desc = irq >= NR_IRQS ? NULL : irq_to_desc(irq);
	if (flags & IPIPE_IRQF_NOACK)
		IPIPE_WARN_ONCE(desc && ipipe_chained_irq_p(irq));
	else {
		ipd = ipipe_head_domain;
		control = ipd->irqs[irq].control;
		if ((control & IPIPE_HANDLE_MASK) == 0)
			ipd = ipipe_root_domain;
		if (ipd->irqs[irq].ackfn)
			ipd->irqs[irq].ackfn(irq, desc);
		if (desc && ipipe_chained_irq_p(irq)) {
			if ((flags & IPIPE_IRQF_NOSYNC) == 0)
				/* Run demuxed IRQ handlers. */
				goto sync;
			return;
		}
	}

	/*
	 * Sticky interrupts must be handled early and separately, so
	 * that we always process them on the current domain.
	 */
	ipd = __ipipe_current_domain;
	control = ipd->irqs[irq].control;
	if (control & IPIPE_STICKY_MASK)
		goto log;

	/*
	 * In case we have no registered head domain
	 * (i.e. ipipe_head_domain == &ipipe_root), we allow
	 * interrupts to go through the fast dispatcher, since we
	 * don't care for additional latency induced by interrupt
	 * disabling at CPU level. Otherwise, we must go through the
	 * interrupt log, and leave the dispatching work ultimately to
	 * __ipipe_sync_pipeline().
	 */
	ipd = ipipe_head_domain;
	control = ipd->irqs[irq].control;
	if (control & IPIPE_HANDLE_MASK) {
		if (unlikely(flags & IPIPE_IRQF_NOSYNC))
			__ipipe_set_irq_pending(ipd, irq);
		else
			__ipipe_dispatch_irq_fast(irq);
		return;
	}

	/*
	 * The root domain must handle all interrupts, so testing the
	 * HANDLE bit for it would be pointless.
	 */
	ipd = ipipe_root_domain;
log:
	__ipipe_set_irq_pending(ipd, irq);

	if (flags & IPIPE_IRQF_NOSYNC)
		return;

	/*
	 * Optimize if we preempted a registered high priority head
	 * domain: we don't need to synchronize the pipeline unless
	 * there is a pending interrupt for it.
	 */
	if (!__ipipe_root_p &&
	    !__ipipe_ipending_p(ipipe_this_cpu_head_context()))
		return;
sync:
	__ipipe_sync_pipeline(ipipe_head_domain);
}
Ejemplo n.º 13
0
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain. Hw
 * interrupts are masked on entry.
 */
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
    struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
    struct ipipe_domain *this_domain, *next_domain;
    struct list_head *head, *pos;
    struct ipipe_irqdesc *idesc;
    int m_ack, s = -1;

    /*
     * Software-triggered IRQs do not need any ack.  The contents
     * of the register frame should only be used when processing
     * the timer interrupt, but not for handling any other
     * interrupt.
     */
    m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
    this_domain = __ipipe_current_domain;
    idesc = &this_domain->irqs[irq];

    if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control)))
        head = &this_domain->p_link;
    else {
        head = __ipipe_pipeline.next;
        next_domain = list_entry(head, struct ipipe_domain, p_link);
        idesc = &next_domain->irqs[irq];
        if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) {
            if (!m_ack && idesc->acknowledge != NULL)
                idesc->acknowledge(irq, irq_to_desc(irq));
            if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
                s = __test_and_set_bit(IPIPE_STALL_FLAG,
                                       &p->status);
            __ipipe_dispatch_wired(next_domain, irq);
            goto out;
        }
    }

    /* Ack the interrupt. */

    pos = head;
    while (pos != &__ipipe_pipeline) {
        next_domain = list_entry(pos, struct ipipe_domain, p_link);
        idesc = &next_domain->irqs[irq];
        if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) {
            __ipipe_set_irq_pending(next_domain, irq);
            if (!m_ack && idesc->acknowledge != NULL) {
                idesc->acknowledge(irq, irq_to_desc(irq));
                m_ack = 1;
            }
        }
        if (!test_bit(IPIPE_PASS_FLAG, &idesc->control))
            break;
        pos = next_domain->p_link.next;
    }

    /*
     * Now walk the pipeline, yielding control to the highest
     * priority domain that has pending interrupt(s) or
     * immediately to the current domain if the interrupt has been
     * marked as 'sticky'. This search does not go beyond the
     * current domain in the pipeline. We also enforce the
     * additional root stage lock (blackfin-specific).
     */
    if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
        s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);

    /*
     * If the interrupt preempted the head domain, then do not
     * even try to walk the pipeline, unless an interrupt is
     * pending for it.
     */
    if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
            !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
        goto out;

    __ipipe_walk_pipeline(head);
out:
    if (!s)
        __clear_bit(IPIPE_STALL_FLAG, &p->status);
}