/* * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic * interrupt protection log is maintained here for each domain. Hw * interrupts are off on entry. */ int __ipipe_handle_irq(struct pt_regs *regs) { struct ipipe_domain *this_domain, *next_domain; int irq, vector = regs->orig_ax; struct list_head *head, *pos; struct pt_regs *tick_regs; int m_ack; if (vector < 0) { irq = __get_cpu_var(vector_irq)[~vector]; BUG_ON(irq < 0); m_ack = 0; } else { /* This is a self-triggered one. */ irq = vector; m_ack = 1; } this_domain = ipipe_current_domain; if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)) head = &this_domain->p_link; else { head = __ipipe_pipeline.next; next_domain = list_entry(head, struct ipipe_domain, p_link); if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) { if (!m_ack && next_domain->irqs[irq].acknowledge) next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq)); __ipipe_dispatch_wired(next_domain, irq); goto finalize_nosync; } } /* Ack the interrupt. */ pos = head; while (pos != &__ipipe_pipeline) { next_domain = list_entry(pos, struct ipipe_domain, p_link); if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) { __ipipe_set_irq_pending(next_domain, irq); if (!m_ack && next_domain->irqs[irq].acknowledge) { next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq)); m_ack = 1; } } if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control)) break; pos = next_domain->p_link.next; } /* * If the interrupt preempted the head domain, then do not * even try to walk the pipeline, unless an interrupt is * pending for it. */ if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) goto finalize_nosync; /* * Now walk the pipeline, yielding control to the highest * priority domain that has pending interrupt(s) or * immediately to the current domain if the interrupt has been * marked as 'sticky'. This search does not go beyond the * current domain in the pipeline. */ __ipipe_walk_pipeline(head); finalize_nosync: /* * Given our deferred dispatching model for regular IRQs, we * only record CPU regs for the last timer interrupt, so that * the timer handler charges CPU times properly. It is assumed * that other interrupt handlers don't actually care for such * information. */ if (irq == __ipipe_hrtimer_irq) { tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs); tick_regs->flags = regs->flags; tick_regs->cs = regs->cs; tick_regs->ip = regs->ip; tick_regs->bp = regs->bp; #ifdef CONFIG_X86_64 tick_regs->ss = regs->ss; tick_regs->sp = regs->sp; #endif if (!__ipipe_root_domain_p) tick_regs->flags &= ~X86_EFLAGS_IF; } if (user_mode(regs) && (current->ipipe_flags & PF_EVTRET) != 0) { current->ipipe_flags &= ~PF_EVTRET; __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); } if (!__ipipe_root_domain_p || test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status))) return 0; return 1; }
/* * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic * interrupt protection log is maintained here for each domain. Hw * interrupts are masked on entry. */ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) { struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr(); struct ipipe_domain *this_domain, *next_domain; struct list_head *head, *pos; struct ipipe_irqdesc *idesc; int m_ack, s = -1; /* * Software-triggered IRQs do not need any ack. The contents * of the register frame should only be used when processing * the timer interrupt, but not for handling any other * interrupt. */ m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); this_domain = __ipipe_current_domain; idesc = &this_domain->irqs[irq]; if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control))) head = &this_domain->p_link; else { head = __ipipe_pipeline.next; next_domain = list_entry(head, struct ipipe_domain, p_link); idesc = &next_domain->irqs[irq]; if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) { if (!m_ack && idesc->acknowledge != NULL) idesc->acknowledge(irq, irq_to_desc(irq)); if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status)) s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status); __ipipe_dispatch_wired(next_domain, irq); goto out; } } /* Ack the interrupt. */ pos = head; while (pos != &__ipipe_pipeline) { next_domain = list_entry(pos, struct ipipe_domain, p_link); idesc = &next_domain->irqs[irq]; if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) { __ipipe_set_irq_pending(next_domain, irq); if (!m_ack && idesc->acknowledge != NULL) { idesc->acknowledge(irq, irq_to_desc(irq)); m_ack = 1; } } if (!test_bit(IPIPE_PASS_FLAG, &idesc->control)) break; pos = next_domain->p_link.next; } /* * Now walk the pipeline, yielding control to the highest * priority domain that has pending interrupt(s) or * immediately to the current domain if the interrupt has been * marked as 'sticky'. This search does not go beyond the * current domain in the pipeline. We also enforce the * additional root stage lock (blackfin-specific). */ if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status)) s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status); /* * If the interrupt preempted the head domain, then do not * even try to walk the pipeline, unless an interrupt is * pending for it. */ if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) goto out; __ipipe_walk_pipeline(head); out: if (!s) __clear_bit(IPIPE_STALL_FLAG, &p->status); }