static struct megamod_pic * __init init_megamod_pic(struct device_node *np) { struct megamod_pic *pic; int i, irq; int mapping[NR_MUX_OUTPUTS]; pr_info("Initializing C64x+ Megamodule PIC\n"); pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); if (!pic) { pr_err("%s: Could not alloc PIC structure.\n", np->full_name); return NULL; } pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32, &megamod_domain_ops, pic); if (!pic->irqhost) { pr_err("%s: Could not alloc host.\n", np->full_name); goto error_free; } pic->irqhost->host_data = pic; raw_spin_lock_init(&pic->lock); pic->regs = of_iomap(np, 0); if (!pic->regs) { pr_err("%s: Could not map registers.\n", np->full_name); goto error_free; } /* */ for (i = 0; i < ARRAY_SIZE(mapping); i++) mapping[i] = IRQ_UNMAPPED; parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); /* */ for (i = 0; i < NR_COMBINERS; i++) { irq = irq_of_parse_and_map(np, i); if (irq == NO_IRQ) continue; /* */ if (irq < 4 || irq >= NR_PRIORITY_IRQS) { pr_err("%s: combiner-%d virq %d out of range!\n", np->full_name, i, irq); continue; } /* */ mapping[irq - 4] = i; pr_debug("%s: combiner-%d cascading to virq %d\n", np->full_name, i, irq); cascade_data[i].pic = pic; cascade_data[i].index = i; /* */ soc_writel(~0, &pic->regs->evtmask[i]); soc_writel(~0, &pic->regs->evtclr[i]); irq_set_handler_data(irq, &cascade_data[i]); irq_set_chained_handler(irq, megamod_irq_cascade); } /* */ for (i = 0; i < NR_MUX_OUTPUTS; i++) { if (mapping[i] != IRQ_UNMAPPED) { pr_debug("%s: setting mux %d to priority %d\n", np->full_name, mapping[i], i + 4); set_megamod_mux(pic, mapping[i], i); } } return pic; error_free: kfree(pic); return NULL; }
static struct megamod_pic * __init init_megamod_pic(struct device_node *np) { struct megamod_pic *pic; int i, irq; int mapping[NR_MUX_OUTPUTS]; pr_info("Initializing C64x+ Megamodule PIC\n"); pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); if (!pic) { pr_err("%s: Could not alloc PIC structure.\n", np->full_name); return NULL; } pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NR_COMBINERS * 32, &megamod_host_ops, IRQ_UNMAPPED); if (!pic->irqhost) { pr_err("%s: Could not alloc host.\n", np->full_name); goto error_free; } pic->irqhost->host_data = pic; raw_spin_lock_init(&pic->lock); pic->regs = of_iomap(np, 0); if (!pic->regs) { pr_err("%s: Could not map registers.\n", np->full_name); goto error_free; } /* Initialize MUX map */ for (i = 0; i < ARRAY_SIZE(mapping); i++) mapping[i] = IRQ_UNMAPPED; parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); /* * We can have up to 12 interrupts cascading to the core controller. * These cascades can be from the combined interrupt sources or for * individual interrupt sources. The "interrupts" property only * deals with the cascaded combined interrupts. The individual * interrupts muxed to the core controller use the core controller * as their interrupt parent. */ for (i = 0; i < NR_COMBINERS; i++) { irq = irq_of_parse_and_map(np, i); if (irq == NO_IRQ) continue; /* * We count on the core priority interrupts (4 - 15) being * direct mapped. Check that device tree provided something * in that range. */ if (irq < 4 || irq >= NR_PRIORITY_IRQS) { pr_err("%s: combiner-%d virq %d out of range!\n", np->full_name, i, irq); continue; } /* record the mapping */ mapping[irq - 4] = i; pr_debug("%s: combiner-%d cascading to virq %d\n", np->full_name, i, irq); cascade_data[i].pic = pic; cascade_data[i].index = i; /* mask and clear all events in combiner */ soc_writel(~0, &pic->regs->evtmask[i]); soc_writel(~0, &pic->regs->evtclr[i]); irq_set_handler_data(irq, &cascade_data[i]); irq_set_chained_handler(irq, megamod_irq_cascade); } /* Finally, set up the MUX registers */ for (i = 0; i < NR_MUX_OUTPUTS; i++) { if (mapping[i] != IRQ_UNMAPPED) { pr_debug("%s: setting mux %d to priority %d\n", np->full_name, mapping[i], i + 4); set_megamod_mux(pic, mapping[i], i); } } return pic; error_free: kfree(pic); return NULL; }