Beispiel #1
0
void __ipipe_dispatch_irq_fast(unsigned int irq) /* hw interrupts off */
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_leading_context(), *old;
	struct ipipe_domain *head = p->domain;

	if (unlikely(test_bit(IPIPE_STALL_FLAG, &p->status))) {
		__ipipe_set_irq_pending(head, irq);
		return;
	}

	old = __ipipe_current_context;
	/* Switch to the head domain. */
	__ipipe_set_current_context(p);

	p->irqall[irq]++;
	__set_bit(IPIPE_STALL_FLAG, &p->status);
	barrier();

	if (likely(head != ipipe_root_domain)) {
		head->irqs[irq].handler(irq, head->irqs[irq].cookie);
		__ipipe_run_irqtail(irq);
	} else {
		if (ipipe_virtual_irq_p(irq)) {
			irq_enter();
			head->irqs[irq].handler(irq, head->irqs[irq].cookie);
			irq_exit();
		} else
			head->irqs[irq].handler(irq, head->irqs[irq].cookie);

		root_stall_after_handler();
	}

	hard_local_irq_disable();
	__clear_bit(IPIPE_STALL_FLAG, &p->status);

	if (__ipipe_current_context == p) {
		__ipipe_set_current_context(old);
		if (old == p) {
			if (__ipipe_ipending_p(p))
				__ipipe_sync_stage();
			return;
		}
	}

	/*
	 * We must be running over the root domain, synchronize
	 * the pipeline for high priority IRQs.
	 */
	__ipipe_do_sync_pipeline(head);
}
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain.	 Hw
 * interrupts are off on entry.
 */
int __ipipe_handle_irq(struct pt_regs *regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	int irq, vector = regs->orig_ax;
	struct list_head *head, *pos;
	struct pt_regs *tick_regs;
	int m_ack;

	if (vector < 0) {
		irq = __get_cpu_var(vector_irq)[~vector];
		BUG_ON(irq < 0);
		m_ack = 0;
	} else { /* This is a self-triggered one. */
		irq = vector;
		m_ack = 1;
	}

	this_domain = ipipe_current_domain;

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			__ipipe_dispatch_wired(next_domain, irq);
			goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
		goto finalize_nosync;

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline.
	 */

	__ipipe_walk_pipeline(head);

finalize_nosync:

	/*
	 * Given our deferred dispatching model for regular IRQs, we
	 * only record CPU regs for the last timer interrupt, so that
	 * the timer handler charges CPU times properly. It is assumed
	 * that other interrupt handlers don't actually care for such
	 * information.
	 */

	if (irq == __ipipe_hrtimer_irq) {
		tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
		tick_regs->flags = regs->flags;
		tick_regs->cs = regs->cs;
		tick_regs->ip = regs->ip;
		tick_regs->bp = regs->bp;
#ifdef CONFIG_X86_64
		tick_regs->ss = regs->ss;
		tick_regs->sp = regs->sp;
#endif
		if (!__ipipe_root_domain_p)
			tick_regs->flags &= ~X86_EFLAGS_IF;
	}

	if (user_mode(regs) && (current->ipipe_flags & PF_EVTRET) != 0) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!__ipipe_root_domain_p ||
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
		return 0;

	return 1;
}
Beispiel #3
0
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain. Hw
 * interrupts are masked on entry.
 */
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
	struct ipipe_domain *this_domain, *next_domain;
	struct list_head *head, *pos;
	int m_ack, s = -1;

	/*
	 * Software-triggered IRQs do not need any ack.  The contents
	 * of the register frame should only be used when processing
	 * the timer interrupt, but not for handling any other
	 * interrupt.
	 */
	m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
	this_domain = __ipipe_current_domain;

	if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
				s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
			__ipipe_dispatch_wired(next_domain, irq);
			goto out;
		}
	}

	/* Ack the interrupt. */

	pos = head;
	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline. We also enforce the
	 * additional root stage lock (blackfin-specific).
	 */
	if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
		s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    ipipe_head_cpudom_var(irqpend_himask) == 0)
		goto out;

	__ipipe_walk_pipeline(head);
out:
	if (!s)
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
}
Beispiel #4
0
void __ipipe_dispatch_irq(unsigned int irq, int flags) /* hw interrupts off */
{
	struct ipipe_domain *ipd;
	struct irq_desc *desc;
	unsigned long control;

	/*
	 * Survival kit when reading this code:
	 *
	 * - we have two main situations, leading to three cases for
	 *   handling interrupts:
	 *
	 *   a) the root domain is alone, no registered head domain
	 *      => all interrupts are delivered via the fast dispatcher.
	 *   b) a head domain is registered
	 *      => head domain IRQs go through the fast dispatcher
	 *      => root domain IRQs go through the interrupt log
	 *
	 * - when no head domain is registered, ipipe_head_domain ==
	 *   ipipe_root_domain == &ipipe_root.
	 *
	 * - the caller tells us whether we should acknowledge this
	 *   IRQ. Even virtual IRQs may require acknowledge on some
	 *   platforms (e.g. arm/SMP).
	 *
	 * - the caller tells us whether we may try to run the IRQ log
	 *   syncer. Typically, demuxed IRQs won't be synced
	 *   immediately.
	 *
	 * - multiplex IRQs most likely have a valid acknowledge
	 *   handler and we may not be called with IPIPE_IRQF_NOACK
	 *   for them. The ack handler for the multiplex IRQ actually
	 *   decodes the demuxed interrupts.
	 */

#ifdef CONFIG_IPIPE_DEBUG
	if (unlikely(irq >= IPIPE_NR_IRQS) ||
	    (irq < NR_IRQS && irq_to_desc(irq) == NULL)) {
		printk(KERN_ERR "I-pipe: spurious interrupt %u\n", irq);
		return;
	}
#endif
	/*
	 * CAUTION: on some archs, virtual IRQs may have acknowledge
	 * handlers. Multiplex IRQs should have one too.
	 */
	desc = irq >= NR_IRQS ? NULL : irq_to_desc(irq);
	if (flags & IPIPE_IRQF_NOACK)
		IPIPE_WARN_ONCE(desc && ipipe_chained_irq_p(irq));
	else {
		ipd = ipipe_head_domain;
		control = ipd->irqs[irq].control;
		if ((control & IPIPE_HANDLE_MASK) == 0)
			ipd = ipipe_root_domain;
		if (ipd->irqs[irq].ackfn)
			ipd->irqs[irq].ackfn(irq, desc);
		if (desc && ipipe_chained_irq_p(irq)) {
			if ((flags & IPIPE_IRQF_NOSYNC) == 0)
				/* Run demuxed IRQ handlers. */
				goto sync;
			return;
		}
	}

	/*
	 * Sticky interrupts must be handled early and separately, so
	 * that we always process them on the current domain.
	 */
	ipd = __ipipe_current_domain;
	control = ipd->irqs[irq].control;
	if (control & IPIPE_STICKY_MASK)
		goto log;

	/*
	 * In case we have no registered head domain
	 * (i.e. ipipe_head_domain == &ipipe_root), we allow
	 * interrupts to go through the fast dispatcher, since we
	 * don't care for additional latency induced by interrupt
	 * disabling at CPU level. Otherwise, we must go through the
	 * interrupt log, and leave the dispatching work ultimately to
	 * __ipipe_sync_pipeline().
	 */
	ipd = ipipe_head_domain;
	control = ipd->irqs[irq].control;
	if (control & IPIPE_HANDLE_MASK) {
		if (unlikely(flags & IPIPE_IRQF_NOSYNC))
			__ipipe_set_irq_pending(ipd, irq);
		else
			__ipipe_dispatch_irq_fast(irq);
		return;
	}

	/*
	 * The root domain must handle all interrupts, so testing the
	 * HANDLE bit for it would be pointless.
	 */
	ipd = ipipe_root_domain;
log:
	__ipipe_set_irq_pending(ipd, irq);

	if (flags & IPIPE_IRQF_NOSYNC)
		return;

	/*
	 * Optimize if we preempted a registered high priority head
	 * domain: we don't need to synchronize the pipeline unless
	 * there is a pending interrupt for it.
	 */
	if (!__ipipe_root_p &&
	    !__ipipe_ipending_p(ipipe_this_cpu_head_context()))
		return;
sync:
	__ipipe_sync_pipeline(ipipe_head_domain);
}