static int __ipipe_common_info_show(struct seq_file *p, void *data) { struct ipipe_domain *ipd = (struct ipipe_domain *)p->private; char handling, lockbit, virtuality; unsigned long ctlbits; unsigned int irq; seq_printf(p, " +--- Handled\n"); seq_printf(p, " |+-- Locked\n"); seq_printf(p, " ||+- Virtual\n"); seq_printf(p, "[IRQ] |||\n"); mutex_lock(&ipd->mutex); for (irq = 0; irq < IPIPE_NR_IRQS; irq++) { ctlbits = ipd->irqs[irq].control; /* * There might be a hole between the last external IRQ * and the first virtual one; skip it. */ if (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq)) continue; if (ipipe_virtual_irq_p(irq) && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)) /* Non-allocated virtual IRQ; skip it. */ continue; if (ctlbits & IPIPE_HANDLE_MASK) handling = 'H'; else handling = '.'; if (ctlbits & IPIPE_LOCK_MASK) lockbit = 'L'; else lockbit = '.'; if (ipipe_virtual_irq_p(irq)) virtuality = 'V'; else virtuality = '.'; seq_printf(p, " %3u: %c%c%c\n", irq, handling, lockbit, virtuality); } mutex_unlock(&ipd->mutex); return 0; }
/* * __ipipe_do_sync_stage() -- Flush the pending IRQs for the current * domain (and processor). This routine flushes the interrupt log (see * "Optimistic interrupt protection" from D. Stodolsky et al. for more * on the deferred interrupt scheme). Every interrupt that occurred * while the pipeline was stalled gets played. * * WARNING: CPU migration may occur over this routine. */ void __ipipe_do_sync_stage(void) { struct ipipe_percpu_domain_data *p; struct ipipe_domain *ipd; int irq; p = __ipipe_current_context; ipd = p->domain; __set_bit(IPIPE_STALL_FLAG, &p->status); smp_wmb(); if (ipd == ipipe_root_domain) trace_hardirqs_off(); for (;;) { irq = __ipipe_next_irq(p); if (irq < 0) break; /* * Make sure the compiler does not reorder wrongly, so * that all updates to maps are done before the * handler gets called. */ barrier(); if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) continue; if (ipd != ipipe_head_domain) hard_local_irq_enable(); if (likely(ipd != ipipe_root_domain)) { ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); __ipipe_run_irqtail(irq); hard_local_irq_disable(); } else if (ipipe_virtual_irq_p(irq)) { irq_enter(); ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); irq_exit(); root_stall_after_handler(); hard_local_irq_disable(); while (__ipipe_check_root_resched()) __ipipe_preempt_schedule_irq(); } else { ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); root_stall_after_handler(); hard_local_irq_disable(); } p = __ipipe_current_context; } if (ipd == ipipe_root_domain) trace_hardirqs_on(); __clear_bit(IPIPE_STALL_FLAG, &p->status); }
void __ipipe_dispatch_irq_fast(unsigned int irq) /* hw interrupts off */ { struct ipipe_percpu_domain_data *p = ipipe_this_cpu_leading_context(), *old; struct ipipe_domain *head = p->domain; if (unlikely(test_bit(IPIPE_STALL_FLAG, &p->status))) { __ipipe_set_irq_pending(head, irq); return; } old = __ipipe_current_context; /* Switch to the head domain. */ __ipipe_set_current_context(p); p->irqall[irq]++; __set_bit(IPIPE_STALL_FLAG, &p->status); barrier(); if (likely(head != ipipe_root_domain)) { head->irqs[irq].handler(irq, head->irqs[irq].cookie); __ipipe_run_irqtail(irq); } else { if (ipipe_virtual_irq_p(irq)) { irq_enter(); head->irqs[irq].handler(irq, head->irqs[irq].cookie); irq_exit(); } else head->irqs[irq].handler(irq, head->irqs[irq].cookie); root_stall_after_handler(); } hard_local_irq_disable(); __clear_bit(IPIPE_STALL_FLAG, &p->status); if (__ipipe_current_context == p) { __ipipe_set_current_context(old); if (old == p) { if (__ipipe_ipending_p(p)) __ipipe_sync_stage(); return; } } /* * We must be running over the root domain, synchronize * the pipeline for high priority IRQs. */ __ipipe_do_sync_pipeline(head); }
/* * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline * just like if it has been actually received from a hw source. Also * works for virtual interrupts. */ int ipipe_trigger_irq(unsigned irq) { unsigned long flags; #ifdef CONFIG_IPIPE_DEBUG if (irq >= IPIPE_NR_IRQS || (ipipe_virtual_irq_p(irq) && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))) return -EINVAL; #endif flags = hard_local_irq_save(); __ipipe_handle_irq(irq, NULL); hard_local_irq_restore(flags); return 1; }
int ipipe_request_irq(struct ipipe_domain *ipd, unsigned int irq, ipipe_irq_handler_t handler, void *cookie, ipipe_irq_ackfn_t ackfn) { unsigned long flags; int ret = 0; ipipe_root_only(); if (handler == NULL || (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq))) return -EINVAL; spin_lock_irqsave(&__ipipe_lock, flags); if (ipd->irqs[irq].handler) { ret = -EBUSY; goto out; } if (ackfn == NULL) ackfn = ipipe_root_domain->irqs[irq].ackfn; ipd->irqs[irq].handler = handler; ipd->irqs[irq].cookie = cookie; ipd->irqs[irq].ackfn = ackfn; ipd->irqs[irq].control = IPIPE_HANDLE_MASK; if (irq < NR_IRQS) __ipipe_enable_irqdesc(ipd, irq); out: spin_unlock_irqrestore(&__ipipe_lock, flags); return ret; }
/* * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline * just like if it has been actually received from a hw source. Also * works for virtual interrupts. */ int ipipe_trigger_irq(unsigned int irq) { struct pt_regs regs; unsigned long flags; #ifdef CONFIG_IPIPE_DEBUG if (irq >= IPIPE_NR_IRQS) return -EINVAL; if (ipipe_virtual_irq_p(irq)) { if (!test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)) return -EINVAL; } else if (irq_to_desc(irq) == NULL) return -EINVAL; #endif local_irq_save_hw(flags); regs.flags = flags; regs.orig_ax = irq; /* Positive value - IRQ won't be acked */ regs.cs = __KERNEL_CS; __ipipe_handle_irq(®s); local_irq_restore_hw(flags); return 1; }