void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs) { unsigned long bit; int irq; int cpu = smp_processor_id(); #ifdef DEBUG_IRQ if (mask != (1L << MAX_CPU_IRQ)) printk("do_irq_mask %08lx %p %p\n", mask, region, regs); #endif for(bit=(1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) { int irq_num; if(!(bit&mask)) continue; irq_num = region->data.irqbase + irq; ++kstat.irqs[cpu][IRQ_FROM_REGION(CPU_IRQ_REGION) | irq]; if (IRQ_REGION(irq_num) != CPU_IRQ_REGION) ++kstat.irqs[cpu][irq_num]; mask_irq(irq_num); do_irq(®ion->action[irq], irq_num, regs); unmask_irq(irq_num); } }
int show_interrupts(struct seq_file *p, void *v) { #ifdef CONFIG_PROC_FS int i, j; int regnr, irq_no; struct irq_region *region; struct irqaction *action, *mainaction; seq_puts(p, " "); for (j=0; j<smp_num_cpus; j++) seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); for (regnr = 0; regnr < NR_IRQ_REGS; regnr++) { region = irq_region[regnr]; if (!region || !region->action) continue; mainaction = region->action; for (i = 0; i <= MAX_CPU_IRQ; i++) { action = mainaction++; if (!action || !action->name) continue; irq_no = IRQ_FROM_REGION(regnr) + i; seq_printf(p, "%3d: ", irq_no); #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(irq_no)); #else for (j = 0; j < smp_num_cpus; j++) seq_printf(p, "%10u ", kstat.irqs[cpu_logical_map(j)][irq_no]); #endif seq_printf(p, " %14s", region->data.name ? region->data.name : "N/A"); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); } } seq_putc(p, '\n'); #if CONFIG_SMP seq_puts(p, "LOC: "); for (j = 0; j < smp_num_cpus; j++) seq_printf(p, "%10u ", apic_timer_irqs[cpu_logical_map(j)]); seq_putc(p, '\n'); #endif #endif /* CONFIG_PROC_FS */ return 0; }
struct irq_region *alloc_irq_region( int count, struct irq_region_ops *ops, unsigned long flags, const char *name, void *dev) { struct irq_region *region; int index; index = alloc_irqregion(); if((IRQ_REGION(count-1))) return NULL; if (count < IRQ_PER_REGION) { DBG_IRQ("alloc_irq_region() using minimum of %d irq lines for %s (%d)\n", IRQ_PER_REGION, name, count); count = IRQ_PER_REGION; } if(flags & IRQ_REG_MASK) if(!(ops->mask_irq && ops->unmask_irq)) return NULL; if(flags & IRQ_REG_DIS) if(!(ops->disable_irq && ops->enable_irq)) return NULL; if((irq_region[index])) return NULL; region = kmalloc(sizeof *region, GFP_ATOMIC); if(!region) return NULL; region->action = kmalloc(sizeof *region->action * count, GFP_ATOMIC); if(!region->action) { kfree(region); return NULL; } memset(region->action, 0, sizeof *region->action * count); region->ops = *ops; region->data.irqbase = IRQ_FROM_REGION(index); region->data.flags = flags; region->data.name = name; region->data.dev = dev; irq_region[index] = region; return irq_region[index]; }
int txn_alloc_irq(void) { int irq; /* never return irq 0 cause that's the interval timer */ for(irq=1; irq<=MAX_CPU_IRQ; irq++) { if(cpu_irq_region.action[irq].handler == NULL) { return (IRQ_FROM_REGION(CPU_IRQ_REGION) + irq); } } /* unlikely, but be prepared */ return -1; }
int get_irq_list(char *buf) { #ifdef CONFIG_PROC_FS char *p = buf; int i, j; int regnr, irq_no; struct irq_region *region; struct irqaction *action, *mainaction; p += sprintf(p, " "); for (j=0; j<smp_num_cpus; j++) p += sprintf(p, "CPU%d ",j); *p++ = '\n'; for (regnr = 0; regnr < NR_IRQ_REGS; regnr++) { region = irq_region[regnr]; if (!region || !region->action) continue; mainaction = region->action; for (i = 0; i <= MAX_CPU_IRQ; i++) { action = mainaction++; if (!action || !action->name) continue; irq_no = IRQ_FROM_REGION(regnr) + i; p += sprintf(p, "%3d: ", irq_no); #ifndef CONFIG_SMP p += sprintf(p, "%10u ", kstat_irqs(irq_no)); #else for (j = 0; j < smp_num_cpus; j++) p += sprintf(p, "%10u ", kstat.irqs[cpu_logical_map(j)][irq_no]); #endif p += sprintf(p, " %14s", region->data.name ? region->data.name : "N/A"); p += sprintf(p, " %s", action->name); for (action=action->next; action; action = action->next) p += sprintf(p, ", %s", action->name); *p++ = '\n'; } } p += sprintf(p, "\n"); #if CONFIG_SMP p += sprintf(p, "LOC: "); for (j = 0; j < smp_num_cpus; j++) p += sprintf(p, "%10u ", apic_timer_irqs[cpu_logical_map(j)]); p += sprintf(p, "\n"); #endif return p - buf; #else /* CONFIG_PROC_FS */ return 0; #endif /* CONFIG_PROC_FS */ }
static void enable_cpu_irq(void *unused, int irq) { unsigned long mask = EIEM_MASK(irq); mtctl(mask, 23); SET_EIEM_BIT(irq); } static struct irqaction cpu_irq_actions[IRQ_PER_REGION] = { [IRQ_OFFSET(TIMER_IRQ)] { timer_interrupt, 0, 0, "timer", NULL, NULL }, [IRQ_OFFSET(IPI_IRQ)] { ipi_interrupt, 0, 0, "IPI", NULL, NULL }, }; struct irq_region cpu_irq_region = { { disable_cpu_irq, enable_cpu_irq, NULL, NULL }, { &cpu_data[0], "PA-PIC", IRQ_REG_MASK|IRQ_REG_DIS, IRQ_FROM_REGION(CPU_IRQ_REGION)}, cpu_irq_actions }; struct irq_region *irq_region[NR_IRQ_REGS] = { [ 0 ] NULL, /* abuse will data page fault (aka code 15) */ [ CPU_IRQ_REGION ] &cpu_irq_region, }; /* we special-case the real IRQs here, which feels right given the relatively * high cost of indirect calls. If anyone is bored enough to benchmark this * and find out whether I am right, feel free to. prumpf */ static inline void mask_irq(int irq)