示例#1
0
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain. Hw
 * interrupts are masked on entry.
 */
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
	struct ipipe_domain *this_domain, *next_domain;
	struct list_head *head, *pos;
	int m_ack, s = -1;

	/*
	 * Software-triggered IRQs do not need any ack.  The contents
	 * of the register frame should only be used when processing
	 * the timer interrupt, but not for handling any other
	 * interrupt.
	 */
	m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
	this_domain = __ipipe_current_domain;

	if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
				s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
			__ipipe_dispatch_wired(next_domain, irq);
			goto out;
		}
	}

	/* Ack the interrupt. */

	pos = head;
	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline. We also enforce the
	 * additional root stage lock (blackfin-specific).
	 */
	if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
		s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    ipipe_head_cpudom_var(irqpend_himask) == 0)
		goto out;

	__ipipe_walk_pipeline(head);
out:
	if (!s)
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
}
示例#2
0
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain.	 Hw
 * interrupts are off on entry.
 */
int __ipipe_handle_irq(struct pt_regs *regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	int irq, vector = regs->orig_ax;
	struct list_head *head, *pos;
	struct pt_regs *tick_regs;
	int m_ack;

	if (vector < 0) {
		irq = __get_cpu_var(vector_irq)[~vector];
		BUG_ON(irq < 0);
		m_ack = 0;
	} else { /* This is a self-triggered one. */
		irq = vector;
		m_ack = 1;
	}

	this_domain = ipipe_current_domain;

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			__ipipe_dispatch_wired(next_domain, irq);
			goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
		goto finalize_nosync;

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline.
	 */

	__ipipe_walk_pipeline(head);

finalize_nosync:

	/*
	 * Given our deferred dispatching model for regular IRQs, we
	 * only record CPU regs for the last timer interrupt, so that
	 * the timer handler charges CPU times properly. It is assumed
	 * that other interrupt handlers don't actually care for such
	 * information.
	 */

	if (irq == __ipipe_hrtimer_irq) {
		tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
		tick_regs->flags = regs->flags;
		tick_regs->cs = regs->cs;
		tick_regs->ip = regs->ip;
		tick_regs->bp = regs->bp;
#ifdef CONFIG_X86_64
		tick_regs->ss = regs->ss;
		tick_regs->sp = regs->sp;
#endif
		if (!__ipipe_root_domain_p)
			tick_regs->flags &= ~X86_EFLAGS_IF;
	}

	if (user_mode(regs) && (current->ipipe_flags & PF_EVTRET) != 0) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!__ipipe_root_domain_p ||
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
		return 0;

	return 1;
}
示例#3
0
int __ipipe_handle_irq(struct pt_regs regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	unsigned irq = regs.orig_eax;
	struct list_head *head, *pos;
	ipipe_declare_cpuid;
	int m_ack, s_ack;

	ipipe_load_cpuid();

	if (regs.orig_eax < 0) {
		irq &= 0xff;
		m_ack = 0;
	} else
		m_ack = 1;

	this_domain = ipipe_percpu_domain[cpuid];

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
				next_domain->irqs[irq].acknowledge(irq);
			if (likely(__ipipe_dispatch_wired(next_domain, irq)))
				goto finalize;
			else
				goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	s_ack = m_ack;
	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);

		/* For each domain handling the incoming IRQ, mark it as
		   pending in its log. */

		if (test_bit
		    (IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			/* Domains that handle this IRQ are polled for
			   acknowledging it by decreasing priority order. The
			   interrupt must be made pending _first_ in the domain's
			   status flags before the PIC is unlocked. */

			next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
			next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
			__ipipe_set_irq_bit(next_domain, cpuid, irq);

			/* Always get the first master acknowledge available. Once
			   we've got it, allow slave acknowledge handlers to run
			   (until one of them stops us). */

			if (!m_ack)
				m_ack = next_domain->irqs[irq].acknowledge(irq);
			else if (test_bit
				 (IPIPE_SHARED_FLAG,
				  &next_domain->irqs[irq].control) && !s_ack)
				s_ack = next_domain->irqs[irq].acknowledge(irq);
		}

		/* If the domain does not want the IRQ to be passed down the
		   interrupt pipe, exit the loop now. */

		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;

		pos = next_domain->p_link.next;
	}

	if (irq == __ipipe_tick_irq &&
	    __ipipe_pipeline_head_p(ipipe_root_domain) &&
	    ipipe_root_domain->cpudata[cpuid].irq_counters[irq].pending_hits > 1)
		/*
		 * Emulate a loss of clock ticks if Linux is owning
		 * the time source. The drift will be compensated by
		 * the timer support code.
		 */
		ipipe_root_domain->cpudata[cpuid].irq_counters[irq].pending_hits = 1;

finalize:

	if (irq == __ipipe_tick_irq) {
		__ipipe_tick_regs[cpuid].eflags = regs.eflags;
		__ipipe_tick_regs[cpuid].eip = regs.eip;
		__ipipe_tick_regs[cpuid].xcs = regs.xcs;
#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
		/* Linux profiling code needs this. */
		__ipipe_tick_regs[cpuid].ebp = regs.ebp;
#endif	/* CONFIG_SMP && CONFIG_FRAME_POINTER */
	}

	/* Now walk the pipeline, yielding control to the highest
	   priority domain that has pending interrupt(s) or
	   immediately to the current domain if the interrupt has been
	   marked as 'sticky'. This search does not go beyond the
	   current domain in the pipeline. */

	__ipipe_walk_pipeline(head, cpuid);

finalize_nosync:

	ipipe_load_cpuid();

	if (ipipe_percpu_domain[cpuid] != ipipe_root_domain ||
	    test_bit(IPIPE_STALL_FLAG,
		     &ipipe_root_domain->cpudata[cpuid].status))
		return 0;

#ifdef CONFIG_SMP
	/* Prevent a spurious rescheduling from being triggered on
	   preemptible kernels along the way out through
	   ret_from_intr. */
	if (regs.orig_eax < 0)
		__set_bit(IPIPE_STALL_FLAG, &ipipe_root_domain->cpudata[cpuid].status);
#endif	/* CONFIG_SMP */

	return 1;
}