/* * Initialisation of PIC, this should be called in BSP */ void __init gef_pic_init(struct device_node *np) { unsigned long flags; /* Map the devices registers into memory */ gef_pic_irq_reg_base = of_iomap(np, 0); spin_lock_irqsave(&gef_pic_lock, flags); /* Initialise everything as masked. */ out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_INTR_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0); spin_unlock_irqrestore(&gef_pic_lock, flags); /* Map controller */ gef_pic_cascade_irq = irq_of_parse_and_map(np, 0); if (gef_pic_cascade_irq == NO_IRQ) { printk(KERN_ERR "SBC610: failed to map cascade interrupt"); return; } /* Setup an irq_host structure */ gef_pic_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, GEF_PIC_NUM_IRQS, &gef_pic_host_ops, NO_IRQ); if (gef_pic_irq_host == NULL) return; /* Chain with parent controller */ set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); }
struct irq_host * __init xilinx_intc_init(struct device_node *np) { struct irq_host * irq; void * regs; /* Find and map the intc registers */ regs = of_iomap(np, 0); if (!regs) { pr_err("xilinx_intc: could not map registers\n"); return NULL; } /* Setup interrupt controller */ out_be32(regs + XINTC_IER, 0); /* disable all irqs */ out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */ out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ /* Allocate and initialize an irq_host structure. */ irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS, &xilinx_intc_ops, -1); if (!irq) panic(__FILE__ ": Cannot allocate IRQ host\n"); irq->host_data = regs; return irq; }
static void __init xics_init_host(void) { xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, XICS_IRQ_SPURIOUS); BUG_ON(xics_host == NULL); irq_set_default_host(xics_host); }
/* * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c * It must be called before the bus walk. */ void __init iSeries_init_IRQ(void) { /* Register PCI event handler and open an event path */ struct irq_host *host; int ret; /* * The Hypervisor only allows us up to 256 interrupt * sources (the irq number is passed in a u8). */ irq_set_virq_count(256); /* Create irq host. No need for a revmap since HV will give us * back our virtual irq number */ host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0); BUG_ON(host == NULL); irq_set_default_host(host); ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, &pci_event_handler); if (ret == 0) { ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); if (ret != 0) printk(KERN_ERR "iseries_init_IRQ: open event path " "failed with rc 0x%x\n", ret); } else printk(KERN_ERR "iseries_init_IRQ: register handler " "failed with rc 0x%x\n", ret); }
/** * mpc52xx_init_irq - Initialize and register with the virq subsystem * * Hook for setting up IRQs on an mpc5200 system. A pointer to this function * is to be put into the machine definition structure. * * This function searches the device tree for an MPC5200 interrupt controller, * initializes it, and registers it with the virq subsystem. */ void __init mpc52xx_init_irq(void) { u32 intr_ctrl; struct device_node *picnode; struct device_node *np; /* Remap the necessary zones */ picnode = of_find_matching_node(NULL, mpc52xx_pic_ids); intr = of_iomap(picnode, 0); if (!intr) panic(__FILE__ ": find_and_map failed on 'mpc5200-pic'. " "Check node !"); np = of_find_matching_node(NULL, mpc52xx_sdma_ids); sdma = of_iomap(np, 0); of_node_put(np); if (!sdma) panic(__FILE__ ": find_and_map failed on 'mpc5200-bestcomm'. " "Check node !"); pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr); /* Disable all interrupt sources. */ out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */ out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */ out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */ out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */ intr_ctrl = in_be32(&intr->ctrl); intr_ctrl &= 0x00ff0000; /* Keeps IRQ[0-3] config */ intr_ctrl |= 0x0f000000 | /* clear IRQ 0-3 */ 0x00001000 | /* MEE master external enable */ 0x00000000 | /* 0 means disable IRQ 0-3 */ 0x00000001; /* CEb route critical normally */ out_be32(&intr->ctrl, intr_ctrl); /* Zero a bunch of the priority settings. */ out_be32(&intr->per_pri1, 0); out_be32(&intr->per_pri2, 0); out_be32(&intr->per_pri3, 0); out_be32(&intr->main_pri1, 0); out_be32(&intr->main_pri2, 0); /* * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, -1); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); irq_set_default_host(mpc52xx_irqhost); pr_info("MPC52xx PIC is up and running!\n"); }
static void __init xics_init_host(void) { if (firmware_has_feature(FW_FEATURE_LPAR)) xics_irq_chip = &xics_pic_lpar; else xics_irq_chip = &xics_pic_direct; xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, XICS_IRQ_SPURIOUS); BUG_ON(xics_host == NULL); irq_set_default_host(xics_host); }
void __init tsi108_pci_int_init(struct device_node *node) { DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 0, &pci_irq_host_ops, 0); if (pci_irq_host == NULL) { printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); return; } init_pci_source(); }
void __init ehv_pic_init(void) { struct device_node *np, *np2; struct ehv_pic *ehv_pic; int coreint_flag = 1; np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic"); if (!np) { pr_err("ehv_pic_init: could not find epapr,hv-pic node\n"); return; } if (!of_find_property(np, "has-external-proxy", NULL)) coreint_flag = 0; ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL); if (!ehv_pic) { of_node_put(np); return; } ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0); if (!ehv_pic->irqhost) { of_node_put(np); return; } np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu"); if (np2) { mpic_percpu_base_vaddr = of_iomap(np2, 0); if (!mpic_percpu_base_vaddr) pr_err("ehv_pic_init: of_iomap failed\n"); of_node_put(np2); } ehv_pic->irqhost->host_data = ehv_pic; ehv_pic->hc_irq = ehv_pic_irq_chip; ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity; ehv_pic->coreint_flag = coreint_flag; global_ehv_pic = ehv_pic; irq_set_default_host(global_ehv_pic->irqhost); }
/* * Setup Media5200 IRQ mapping */ static void __init media5200_init_irq(void) { struct device_node *fpga_np; int cascade_virq; /* First setup the regular MPC5200 interrupt controller */ mpc52xx_init_irq(); /* Now find the FPGA IRQ */ fpga_np = of_find_compatible_node(NULL, NULL, "fsl,media5200-fpga"); if (!fpga_np) goto out; pr_debug("%s: found fpga node: %s\n", __func__, fpga_np->full_name); media5200_irq.regs = of_iomap(fpga_np, 0); if (!media5200_irq.regs) goto out; pr_debug("%s: mapped to %p\n", __func__, media5200_irq.regs); cascade_virq = irq_of_parse_and_map(fpga_np, 0); if (!cascade_virq) goto out; pr_debug("%s: cascaded on virq=%i\n", __func__, cascade_virq); /* Disable all FPGA IRQs */ out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, 0); spin_lock_init(&media5200_irq.lock); media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_HOST_MAP_LINEAR, MEDIA5200_NUM_IRQS, &media5200_irq_ops, -1); if (!media5200_irq.irqhost) goto out; pr_debug("%s: allocated irqhost\n", __func__); media5200_irq.irqhost->host_data = &media5200_irq; set_irq_data(cascade_virq, &media5200_irq); set_irq_chained_handler(cascade_virq, media5200_irq_cascade); return; out: pr_err("Could not find Media5200 FPGA; PCI interrupts will not work\n"); }
void __init mpc5121_ads_cpld_pic_init(void) { unsigned int cascade_irq; struct device_node *np = NULL; pr_debug("cpld_ic_init\n"); np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld-pic"); if (!np) { printk(KERN_ERR "CPLD PIC init: can not find cpld-pic node\n"); return; } if (!cpld_regs) goto end; cascade_irq = irq_of_parse_and_map(np, 0); if (cascade_irq == NO_IRQ) goto end; /* * statically route touch screen pendown through 1 * and ignore it here * route all others through our cascade irq */ out_8(&cpld_regs->route, 0xfd); out_8(&cpld_regs->pci_mask, 0xff); /* unmask pci ints in misc mask */ out_8(&cpld_regs->misc_mask, ~(MISC_IGNORE)); cpld_pic_node = of_node_get(np); cpld_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 16, &cpld_pic_host_ops, 16); if (!cpld_pic_host) { printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); goto end; } set_irq_chained_handler(cascade_irq, cpld_pic_cascade); end: of_node_put(np); }
struct irq_host * __init flipper_pic_init(struct device_node *np) { struct device_node *pi; struct irq_host *irq_host = NULL; struct resource res; void __iomem *io_base; int retval; pi = of_get_parent(np); if (!pi) { pr_err("no parent found\n"); goto out; } if (!of_device_is_compatible(pi, "nintendo,flipper-pi")) { pr_err("unexpected parent compatible\n"); goto out; } retval = of_address_to_resource(pi, 0, &res); if (retval) { pr_err("no io memory range found\n"); goto out; } io_base = ioremap(res.start, resource_size(&res)); pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); __flipper_quiesce(io_base); irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS, &flipper_irq_host_ops, -1); if (!irq_host) { pr_err("failed to allocate irq_host\n"); return NULL; } irq_host->host_data = io_base; out: return irq_host; }
void cpm2_pic_init(struct device_node *node) { int i; cpm2_intctl = cpm2_map(im_intctl); /* Clear the CPM IRQ controller, in case it has any bits set * from the bootloader */ /* Mask out everything */ out_be32(&cpm2_intctl->ic_simrh, 0x00000000); out_be32(&cpm2_intctl->ic_simrl, 0x00000000); wmb(); /* Ack everything */ out_be32(&cpm2_intctl->ic_sipnrh, 0xffffffff); out_be32(&cpm2_intctl->ic_sipnrl, 0xffffffff); wmb(); /* Dummy read of the vector */ i = in_be32(&cpm2_intctl->ic_sivec); rmb(); /* Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ out_be16(&cpm2_intctl->ic_sicr, 0); out_be32(&cpm2_intctl->ic_scprrh, 0x05309770); out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); /* create a legacy host */ cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 64, &cpm2_pic_host_ops, 64); if (cpm2_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); return; } }
void __init mv64x60_init_irq(void) { struct device_node *np; phys_addr_t paddr; unsigned int size; const unsigned int *reg; unsigned long flags; np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp"); reg = of_get_property(np, "reg", &size); paddr = of_translate_address(np, reg); mv64x60_gpp_reg_base = ioremap(paddr, reg[1]); of_node_put(np); np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic"); reg = of_get_property(np, "reg", &size); paddr = of_translate_address(np, reg); mv64x60_irq_reg_base = ioremap(paddr, reg[1]); mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MV64x60_NUM_IRQS, &mv64x60_host_ops, MV64x60_NUM_IRQS); spin_lock_irqsave(&mv64x60_lock, flags); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, mv64x60_cached_gpp_mask); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, mv64x60_cached_low_mask); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, mv64x60_cached_high_mask); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0); out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0); out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0); spin_unlock_irqrestore(&mv64x60_lock, flags); }
/** * i8259_init - Initialize the legacy controller * @node: device node of the legacy PIC (can be NULL, but then, it will match * all interrupts, so beware) * @intack_addr: PCI interrupt acknowledge (real) address which will return * the active irq from the 8259 */ void i8259_init(struct device_node *node, unsigned long intack_addr) { unsigned long flags; /* initialize the controller */ raw_spin_lock_irqsave(&i8259_lock, flags); /* Mask all first */ outb(0xff, 0xA1); outb(0xff, 0x21); /* init master interrupt controller */ outb(0x11, 0x20); /* Start init sequence */ outb(0x00, 0x21); /* Vector base */ outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */ outb(0x01, 0x21); /* Select 8086 mode */ /* init slave interrupt controller */ outb(0x11, 0xA0); /* Start init sequence */ outb(0x08, 0xA1); /* Vector base */ outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ outb(0x01, 0xA1); /* Select 8086 mode */ /* That thing is slow */ udelay(100); /* always read ISR */ outb(0x0B, 0x20); outb(0x0B, 0xA0); /* Unmask the internal cascade */ cached_21 &= ~(1 << 2); /* Set interrupt masks */ outb(cached_A1, 0xA1); outb(cached_21, 0x21); raw_spin_unlock_irqrestore(&i8259_lock, flags); /* create a legacy host */ i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 0, &i8259_host_ops, 0); if (i8259_host == NULL) { printk(KERN_ERR "i8259: failed to allocate irq host !\n"); return; } /* reserve our resources */ /* XXX should we continue doing that ? it seems to cause problems * with further requesting of PCI IO resources for that range... * need to look into it. */ request_resource(&ioport_resource, &pic1_iores); request_resource(&ioport_resource, &pic2_iores); request_resource(&ioport_resource, &pic_edgectrl_iores); if (intack_addr != 0) pci_intack = ioremap(intack_addr, 1); printk(KERN_INFO "i8259 legacy interrupt controller initialized\n"); }
void __init pmac_pfm_init(void) { // int i; struct device_node *irqctrler; const u32 *reg; unsigned long addr, size; unsigned char x; unsigned int flags = 0; irqctrler = of_find_node_by_name(NULL, "performa-intr"); if (irqctrler) { reg = of_get_property(irqctrler, "reg", NULL); addr = reg[0]; size = reg[1]; pfm_irq.icr1 = (u8 __iomem *)ioremap(addr, size); addr = reg[2]; size = reg[3]; pfm_irq.icr2 = (u8 __iomem *)ioremap(addr, size); addr = reg[4]; size = reg[5]; pfm_irq.via1_flag = (u8 __iomem *)ioremap(addr, size); addr = reg[6]; size = reg[7]; pfm_irq.via1_ena = (u8 __iomem *)ioremap(addr, size); addr = reg[8]; size = reg[9]; pfm_irq.via1_pcr = (u8 __iomem *)ioremap(addr, size); addr = reg[10]; size = reg[11]; pfm_irq.slt1_flag = (u8 __iomem *)ioremap(addr, size); addr = reg[12]; size = reg[13]; pfm_irq.via2_flag = (u8 __iomem *)ioremap(addr, size); addr = reg[14]; size = reg[15]; pfm_irq.via2_ena = (u8 __iomem *)ioremap(addr, size); addr = reg[16]; size = reg[17]; pfm_irq.slt2_flag = (u8 __iomem *)ioremap(addr, size); addr = reg[18]; size = reg[19]; pfm_irq.f108_flag = (u8 __iomem *)ioremap(addr, size); addr &= ~3; pfm_irq.f108_ack = (u16 __iomem *)ioremap(addr, size); } else { if (ppc_md.progress) ppc_md.progress("Failed to obtain Performa irq addresses/interrupts\n", 0); panic("Failed to obtain Performa irq addresses/interrupts\n"); } /* * SCC channel A control reg anc ESP SCSI status reg. We ask device * if it caused the irq. There must be a better way.. */ serial = (u8 __iomem *)ioremap(0x50f0c002,0x10); scsi = (u8 __iomem *)ioremap(0x50f10040, 0x10); /* disable all interrupts */ out_be32((u32 __iomem *)pfm_irq.icr1, 1); x = in_8(pfm_irq.icr1); out_8(pfm_irq.icr1, 1); x = in_8(pfm_irq.icr1); out_8(pfm_irq.icr2, 7); x = in_8(pfm_irq.icr2); out_8(pfm_irq.via1_pcr, 0); out_8(pfm_irq.via1_ena, 0x7f); out_8(pfm_irq.via1_flag, 0x7f); out_8(pfm_irq.via2_flag, 0x7f); out_8(pfm_irq.f108_flag, 3); out_8(pfm_irq.f108_flag, 0xfc); f108_ena = via1_ena = via2_ena = slt1_ena = slt2_ena = 0; /* register root irq handler */ //irq_desc[max_irqs].handler = &pfm_root; //request_irq(max_irqs, pfm_root_action, SA_INTERRUPT, "Performa", 0); /* for (i = 0; i < max_irqs ; i++ ) irq_desc[i].handler = &pmac_pfm; if( setup_irq(max_irqs-1, &pfm_root_action) != 0 ) { if (ppc_md.progress) ppc_md.progress( "pmac_pfm_init(): Failed register root handler!\n", 0 ); printk( KERN_ERR "%s(): Failed register root handler!\n", __func__ ); } */ /* We configure the OF parsing based on our * platform type and wether we were booted by BootX. */ flags |= OF_IMAP_OLDWORLD_MAC; if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL) flags |= OF_IMAP_NO_PHANDLE; of_irq_map_init(flags); spin_lock_init(&pfm_lock); ppc_md.get_irq = pfm_get_irq; pmac_pfm_host = irq_alloc_host(irqctrler, IRQ_HOST_MAP_LINEAR, max_irqs, &pmac_pfm_host_ops, max_irqs); BUG_ON(pmac_pfm_host == NULL); irq_set_default_host(pmac_pfm_host); of_node_put(irqctrler); printk("System has %d possible interrupts\n", max_irqs); //#ifdef CONFIG_XMON // /* fake irq line - 5 */ // request_irq(5, xmon_irq, 0, "NMI - XMON", 0); // icr_ena |= 0x20; //#endif /* CONFIG_XMON */ }
static struct megamod_pic * __init init_megamod_pic(struct device_node *np) { struct megamod_pic *pic; int i, irq; int mapping[NR_MUX_OUTPUTS]; pr_info("Initializing C64x+ Megamodule PIC\n"); pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); if (!pic) { pr_err("%s: Could not alloc PIC structure.\n", np->full_name); return NULL; } pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NR_COMBINERS * 32, &megamod_host_ops, IRQ_UNMAPPED); if (!pic->irqhost) { pr_err("%s: Could not alloc host.\n", np->full_name); goto error_free; } pic->irqhost->host_data = pic; raw_spin_lock_init(&pic->lock); pic->regs = of_iomap(np, 0); if (!pic->regs) { pr_err("%s: Could not map registers.\n", np->full_name); goto error_free; } /* Initialize MUX map */ for (i = 0; i < ARRAY_SIZE(mapping); i++) mapping[i] = IRQ_UNMAPPED; parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); /* * We can have up to 12 interrupts cascading to the core controller. * These cascades can be from the combined interrupt sources or for * individual interrupt sources. The "interrupts" property only * deals with the cascaded combined interrupts. The individual * interrupts muxed to the core controller use the core controller * as their interrupt parent. */ for (i = 0; i < NR_COMBINERS; i++) { irq = irq_of_parse_and_map(np, i); if (irq == NO_IRQ) continue; /* * We count on the core priority interrupts (4 - 15) being * direct mapped. Check that device tree provided something * in that range. */ if (irq < 4 || irq >= NR_PRIORITY_IRQS) { pr_err("%s: combiner-%d virq %d out of range!\n", np->full_name, i, irq); continue; } /* record the mapping */ mapping[irq - 4] = i; pr_debug("%s: combiner-%d cascading to virq %d\n", np->full_name, i, irq); cascade_data[i].pic = pic; cascade_data[i].index = i; /* mask and clear all events in combiner */ soc_writel(~0, &pic->regs->evtmask[i]); soc_writel(~0, &pic->regs->evtclr[i]); irq_set_handler_data(irq, &cascade_data[i]); irq_set_chained_handler(irq, megamod_irq_cascade); } /* Finally, set up the MUX registers */ for (i = 0; i < NR_MUX_OUTPUTS; i++) { if (mapping[i] != IRQ_UNMAPPED) { pr_debug("%s: setting mux %d to priority %d\n", np->full_name, mapping[i], i + 4); set_megamod_mux(pic, mapping[i], i); } } return pic; error_free: kfree(pic); return NULL; }
static void __init mpc8xxx_add_controller(struct device_node *np) { struct mpc8xxx_gpio_chip *mpc8xxx_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; const struct of_device_id *id; unsigned hwirq; int ret; mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL); if (!mpc8xxx_gc) { ret = -ENOMEM; goto err; } spin_lock_init(&mpc8xxx_gc->lock); mm_gc = &mpc8xxx_gc->mm_gc; gc = &mm_gc->gc; mm_gc->save_regs = mpc8xxx_gpio_save_regs; gc->ngpio = MPC8XXX_GPIO_PINS; gc->direction_input = mpc8xxx_gpio_dir_in; gc->direction_output = mpc8xxx_gpio_dir_out; if (of_device_is_compatible(np, "fsl,mpc8572-gpio")) gc->get = mpc8572_gpio_get; else gc->get = mpc8xxx_gpio_get; gc->set = mpc8xxx_gpio_set; gc->to_irq = mpc8xxx_gpio_to_irq; ret = of_mm_gpiochip_add(np, mm_gc); if (ret) goto err; hwirq = irq_of_parse_and_map(np, 0); if (hwirq == NO_IRQ) goto skip_irq; mpc8xxx_gc->irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS, &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS); if (!mpc8xxx_gc->irq) goto skip_irq; id = of_match_node(mpc8xxx_gpio_ids, np); if (id) mpc8xxx_gc->of_dev_id_data = id->data; mpc8xxx_gc->irq->host_data = mpc8xxx_gc; /* ack and mask all irqs */ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IMR, 0); irq_set_handler_data(hwirq, mpc8xxx_gc); irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); skip_irq: return; err: pr_err("%s: registration failed with status %d\n", np->full_name, ret); kfree(mpc8xxx_gc); return; }
void m82xx_pci_init_irq(void) { int irq; cpm2_map_t *immap; struct device_node *np; struct resource r; const u32 *regs; unsigned int size; const u32 *irq_map; int i; unsigned int irq_max, irq_min; if ((np = of_find_node_by_type(NULL, "soc")) == NULL) { printk(KERN_INFO "No SOC node in device tree\n"); return; } memset(&r, 0, sizeof(r)); if (of_address_to_resource(np, 0, &r)) { printk(KERN_INFO "No SOC reg property in device tree\n"); return; } immap = ioremap(r.start, sizeof(*immap)); of_node_put(np); /* install the demultiplexer for the PCI cascade interrupt */ np = of_find_node_by_type(NULL, "pci"); if (!np) { printk(KERN_INFO "No pci node on device tree\n"); iounmap(immap); return; } irq_map = of_get_property(np, "interrupt-map", &size); if ((!irq_map) || (size <= 7)) { printk(KERN_INFO "No interrupt-map property of pci node\n"); iounmap(immap); return; } size /= sizeof(irq_map[0]); for (i = 0, irq_max = 0, irq_min = 512; i < size; i += 7, irq_map += 7) { if (irq_map[5] < irq_min) irq_min = irq_map[5]; if (irq_map[5] > irq_max) irq_max = irq_map[5]; } pci_int_base = irq_min; irq = irq_of_parse_and_map(np, 0); set_irq_chained_handler(irq, m82xx_pci_irq_demux); of_node_put(np); np = of_find_node_by_type(NULL, "pci-pic"); if (!np) { printk(KERN_INFO "No pci pic node on device tree\n"); iounmap(immap); return; } pci_pic_node = of_node_get(np); /* PCI interrupt controller registers: status and mask */ regs = of_get_property(np, "reg", &size); if ((!regs) || (size <= 2)) { printk(KERN_INFO "No reg property in pci pic node\n"); iounmap(immap); return; } pci_regs.pci_int_stat_reg = ioremap(regs[0], sizeof(*pci_regs.pci_int_stat_reg)); pci_regs.pci_int_mask_reg = ioremap(regs[1], sizeof(*pci_regs.pci_int_mask_reg)); of_node_put(np); /* configure chip select for PCI interrupt controller */ immap->im_memctl.memc_br3 = regs[0] | 0x00001801; immap->im_memctl.memc_or3 = 0xffff8010; /* make PCI IRQ level sensitive */ immap->im_intctl.ic_siexr &= ~(1 << (14 - (irq - SIU_INT_IRQ1))); /* mask all PCI interrupts */ *pci_regs.pci_int_mask_reg |= 0xfff00000; iounmap(immap); pci_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, irq_max - irq_min + 1, &pci_pic_host_ops, irq_max + 1); return; }
unsigned int cpm_pic_init(void) { struct device_node *np = NULL; struct resource res; unsigned int sirq = NO_IRQ, hwirq, eirq; int ret; pr_debug("cpm_pic_init\n"); np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic"); if (np == NULL) np = of_find_compatible_node(NULL, "cpm-pic", "CPM"); if (np == NULL) { printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n"); return sirq; } ret = of_address_to_resource(np, 0, &res); if (ret) goto end; cpic_reg = ioremap(res.start, resource_size(&res)); if (cpic_reg == NULL) goto end; sirq = irq_of_parse_and_map(np, 0); if (sirq == NO_IRQ) goto end; /* Initialize the CPM interrupt controller. */ hwirq = (unsigned int)virq_to_hw(sirq); out_be32(&cpic_reg->cpic_cicr, (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | ((hwirq/2) << 13) | CICR_HP_MASK); out_be32(&cpic_reg->cpic_cimr, 0); cpm_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 64, &cpm_pic_host_ops, 64); if (cpm_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); sirq = NO_IRQ; goto end; } /* Install our own error handler. */ np = of_find_compatible_node(NULL, NULL, "fsl,cpm1"); if (np == NULL) np = of_find_node_by_type(NULL, "cpm"); if (np == NULL) { printk(KERN_ERR "CPM PIC init: can not find cpm node\n"); goto end; } eirq = irq_of_parse_and_map(np, 0); if (eirq == NO_IRQ) goto end; if (setup_irq(eirq, &cpm_error_irqaction)) printk(KERN_ERR "Could not allocate CPM error IRQ!"); setbits32(&cpic_reg->cpic_cicr, CICR_IEN); end: of_node_put(np); return sirq; }
int __init pq2ads_pci_init_irq(void) { struct pq2ads_pci_pic *priv; struct irq_host *host; struct device_node *np; int ret = -ENODEV; int irq; np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic"); if (!np) { printk(KERN_ERR "No pci pic node in device tree.\n"); of_node_put(np); goto out; } irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { printk(KERN_ERR "No interrupt in pci pic node.\n"); of_node_put(np); goto out; } priv = alloc_bootmem(sizeof(struct pq2ads_pci_pic)); if (!priv) { of_node_put(np); ret = -ENOMEM; goto out_unmap_irq; } /* PCI interrupt controller registers: status and mask */ priv->regs = of_iomap(np, 0); if (!priv->regs) { printk(KERN_ERR "Cannot map PCI PIC registers.\n"); goto out_free_bootmem; } /* mask all PCI interrupts */ out_be32(&priv->regs->mask, ~0); mb(); host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS, &pci_pic_host_ops, NUM_IRQS); if (!host) { ret = -ENOMEM; goto out_unmap_regs; } host->host_data = priv; priv->host = host; host->host_data = priv; set_irq_data(irq, priv); set_irq_chained_handler(irq, pq2ads_pci_irq_demux); of_node_put(np); return 0; out_unmap_regs: iounmap(priv->regs); out_free_bootmem: free_bootmem((unsigned long)priv, sizeof(struct pq2ads_pci_pic)); of_node_put(np); out_unmap_irq: irq_dispose_mapping(irq); out: return ret; }