struct irq_domain * __init xilinx_intc_init(struct device_node *np) { struct irq_domain * irq; void * regs; /* Find and map the intc registers */ regs = of_iomap(np, 0); if (!regs) { pr_err("xilinx_intc: could not map registers\n"); return NULL; } /* Setup interrupt controller */ out_be32(regs + XINTC_IER, 0); /* disable all irqs */ out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */ out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ /* Allocate and initialize an irq_domain structure. */ irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops, regs); if (!irq) panic(__FILE__ ": Cannot allocate IRQ host\n"); return irq; }
void __init mv64x60_init_irq(void) { struct device_node *np; phys_addr_t paddr; unsigned int size; const unsigned int *reg; unsigned long flags; np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp"); reg = of_get_property(np, "reg", &size); paddr = of_translate_address(np, reg); mv64x60_gpp_reg_base = ioremap(paddr, reg[1]); of_node_put(np); np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic"); reg = of_get_property(np, "reg", &size); paddr = of_translate_address(np, reg); mv64x60_irq_reg_base = ioremap(paddr, reg[1]); mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS, &mv64x60_host_ops, NULL); spin_lock_irqsave(&mv64x60_lock, flags); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, mv64x60_cached_gpp_mask); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, mv64x60_cached_low_mask); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, mv64x60_cached_high_mask); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0); out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0); out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0); spin_unlock_irqrestore(&mv64x60_lock, flags); }
static void __init combiner_init(void __iomem *combiner_base, struct device_node *np) { int i, irq; unsigned int nr_irq; nr_irq = max_nr * IRQ_IN_COMBINER; combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); if (!combiner_data) { pr_warn("%s: could not allocate combiner data\n", __func__); return; } combiner_irq_domain = irq_domain_add_linear(np, nr_irq, &combiner_irq_domain_ops, combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warn("%s: irq domain init failed\n", __func__); return; } for (i = 0; i < max_nr; i++) { irq = irq_of_parse_and_map(np, i); combiner_init_one(&combiner_data[i], i, combiner_base + (i >> 2) * 0x10, irq); combiner_cascade_irq(&combiner_data[i], irq); } }
int __init wcd9xxx_irq_of_init(struct device_node *node, struct device_node *parent) { struct wcd9xxx_irq_drv_data *data; pr_debug("%s: node %s, node parent %s\n", __func__, node->name, node->parent->name); data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; /* * wcd9xxx_intc interrupt controller supports N to N irq mapping with * single cell binding with irq numbers(offsets) only. * Use irq_domain_simple_ops that has irq_domain_simple_map and * irq_domain_xlate_onetwocell. */ data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS, &irq_domain_simple_ops, data); if (!data->domain) { kfree(data); return -ENOMEM; } return 0; }
static int __init armctrl_of_init(struct device_node *node, struct device_node *parent) { void __iomem *base; int irq, b, i; base = of_iomap(node, 0); if (!base) panic("%s: unable to map IC registers\n", node->full_name); intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), &armctrl_ops, NULL); if (!intc.domain) panic("%s: unable to create IRQ domain\n", node->full_name); for (b = 0; b < NR_BANKS; b++) { intc.pending[b] = base + reg_pending[b]; intc.enable[b] = base + reg_enable[b]; intc.disable[b] = base + reg_disable[b]; for (i = 0; i < bank_irqs[b]; i++) { irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i)); BUG_ON(irq <= 0); irq_set_chip_and_handler(irq, &armctrl_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } set_handle_irq(bcm2835_handle_irq); return 0; }
static int __init nps400_of_init(struct device_node *node, struct device_node *parent) { struct irq_domain *nps400_root_domain; if (parent) { pr_err("DeviceTree incore ic not a root irq controller\n"); return -EINVAL; } nps400_root_domain = irq_domain_add_linear(node, NPS_NR_CPU_IRQS, &nps400_irq_ops, NULL); if (!nps400_root_domain) { pr_err("nps400 root irq domain not avail\n"); return -ENOMEM; } /* * Needed for primary domain lookup to succeed * This is a primary irqchip, and can never have a parent */ irq_set_default_host(nps400_root_domain); #ifdef CONFIG_SMP irq_create_mapping(nps400_root_domain, NPS_IPI_IRQ); #endif return 0; }
/* * Initialisation of PIC, this should be called in BSP */ void __init gef_pic_init(struct device_node *np) { unsigned long flags; /* Map the devices registers into memory */ gef_pic_irq_reg_base = of_iomap(np, 0); raw_spin_lock_irqsave(&gef_pic_lock, flags); /* Initialise everything as masked. */ out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_INTR_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0); out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0); raw_spin_unlock_irqrestore(&gef_pic_lock, flags); /* Map controller */ gef_pic_cascade_irq = irq_of_parse_and_map(np, 0); if (gef_pic_cascade_irq == NO_IRQ) { printk(KERN_ERR "SBC610: failed to map cascade interrupt"); return; } /* Setup an irq_domain structure */ gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS, &gef_pic_host_ops, NULL); if (gef_pic_irq_host == NULL) return; /* Chain with parent controller */ irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); }
static int __init init_onchip_IRQ(struct device_node *intc, struct device_node *parent) { struct irq_domain *root_domain; struct bcr_irq_arcv2 irq_bcr; unsigned int nr_cpu_irqs; READ_BCR(ARC_REG_IRQ_BCR, irq_bcr); nr_cpu_irqs = irq_bcr.irqs + NR_EXCEPTIONS; if (parent) panic("DeviceTree incore intc not a root irq controller\n"); root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL); if (!root_domain) panic("root irq domain not avail\n"); /* * Needed for primary domain lookup to succeed * This is a primary irqchip, and can never have a parent */ irq_set_default_host(root_domain); #ifdef CONFIG_SMP irq_create_mapping(root_domain, IPI_IRQ); #endif irq_create_mapping(root_domain, SOFTIRQ_IRQ); return 0; }
static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; /* Read IDU BCR to confirm nr_irqs */ int nr_irqs = of_irq_count(intc); int i, irq; if (!idu_detected) panic("IDU not detected, but DeviceTree using it"); pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ irq = irq_of_parse_and_map(intc, i); if (!i) idu_first_irq = irq; irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); return 0; }
static int mpc8xxx_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mpc8xxx_gpio_chip *mpc8xxx_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; const struct of_device_id *id; int ret; mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL); if (!mpc8xxx_gc) return -ENOMEM; platform_set_drvdata(pdev, mpc8xxx_gc); spin_lock_init(&mpc8xxx_gc->lock); mm_gc = &mpc8xxx_gc->mm_gc; gc = &mm_gc->gc; mm_gc->save_regs = mpc8xxx_gpio_save_regs; gc->ngpio = MPC8XXX_GPIO_PINS; gc->direction_input = mpc8xxx_gpio_dir_in; gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ? mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out; gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ? mpc8572_gpio_get : mpc8xxx_gpio_get; gc->set = mpc8xxx_gpio_set; gc->set_multiple = mpc8xxx_gpio_set_multiple; gc->to_irq = mpc8xxx_gpio_to_irq; ret = of_mm_gpiochip_add(np, mm_gc); if (ret) return ret; mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0); if (mpc8xxx_gc->irqn == NO_IRQ) return 0; mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS, &mpc8xxx_gpio_irq_ops, mpc8xxx_gc); if (!mpc8xxx_gc->irq) return 0; id = of_match_node(mpc8xxx_gpio_ids, np); if (id) mpc8xxx_gc->of_dev_id_data = id->data; /* ack and mask all irqs */ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IMR, 0); irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc); irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade); return 0; }
/** * mpc52xx_init_irq - Initialize and register with the virq subsystem * * Hook for setting up IRQs on an mpc5200 system. A pointer to this function * is to be put into the machine definition structure. * * This function searches the device tree for an MPC5200 interrupt controller, * initializes it, and registers it with the virq subsystem. */ void __init mpc52xx_init_irq(void) { u32 intr_ctrl; struct device_node *picnode; struct device_node *np; /* Remap the necessary zones */ picnode = of_find_matching_node(NULL, mpc52xx_pic_ids); intr = of_iomap(picnode, 0); if (!intr) panic(__FILE__ ": find_and_map failed on 'mpc5200-pic'. " "Check node !"); np = of_find_matching_node(NULL, mpc52xx_sdma_ids); sdma = of_iomap(np, 0); of_node_put(np); if (!sdma) panic(__FILE__ ": find_and_map failed on 'mpc5200-bestcomm'. " "Check node !"); pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr); /* Disable all interrupt sources. */ out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */ out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */ out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */ out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */ intr_ctrl = in_be32(&intr->ctrl); intr_ctrl &= 0x00ff0000; /* Keeps IRQ[0-3] config */ intr_ctrl |= 0x0f000000 | /* clear IRQ 0-3 */ 0x00001000 | /* MEE master external enable */ 0x00000000 | /* 0 means disable IRQ 0-3 */ 0x00000001; /* CEb route critical normally */ out_be32(&intr->ctrl, intr_ctrl); /* Zero a bunch of the priority settings. */ out_be32(&intr->per_pri1, 0); out_be32(&intr->per_pri2, 0); out_be32(&intr->per_pri3, 0); out_be32(&intr->main_pri1, 0); out_be32(&intr->main_pri2, 0); /* * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ mpc52xx_irqhost = irq_domain_add_linear(picnode, MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, NULL); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); irq_set_default_host(mpc52xx_irqhost); pr_info("MPC52xx PIC is up and running!\n"); }
static int __init nvic_of_init(struct device_node *node, struct device_node *parent) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; unsigned int irqs, i, ret, numbanks; void __iomem *nvic_base; numbanks = (readl_relaxed(V7M_SCS_ICTR) & V7M_SCS_ICTR_INTLINESNUM_MASK) + 1; nvic_base = of_iomap(node, 0); if (!nvic_base) { pr_warn("unable to map nvic registers\n"); return -ENOMEM; } irqs = numbanks * 32; if (irqs > NVIC_MAX_IRQ) irqs = NVIC_MAX_IRQ; nvic_irq_domain = irq_domain_add_linear(node, irqs, &irq_generic_chip_ops, NULL); if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); return -ENOMEM; } ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1, "nvic_irq", handle_fasteoi_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { pr_warn("Failed to allocate irq chips\n"); irq_domain_remove(nvic_irq_domain); return ret; } for (i = 0; i < numbanks; ++i) { struct irq_chip_generic *gc; gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i); gc->reg_base = nvic_base + 4 * i; gc->chip_types[0].regs.enable = NVIC_ISER; gc->chip_types[0].regs.disable = NVIC_ICER; gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; gc->chip_types[0].chip.irq_eoi = nvic_eoi; /* disable interrupts */ writel_relaxed(~0, gc->reg_base + NVIC_ICER); } /* Set priority on all interrupts */ for (i = 0; i < irqs; i += 4) writel_relaxed(0, nvic_base + NVIC_IPR + i); return 0; }
static int __init xtensa_pic_init(struct device_node *np, struct device_node *interrupt_parent) { struct irq_domain *root_domain = irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops, &xtensa_irq_chip); irq_set_default_host(root_domain); return 0; }
static void __init icoll_add_domain(struct device_node *np, int num) { icoll_domain = irq_domain_add_linear(np, num, &icoll_irq_domain_ops, NULL); if (!icoll_domain) panic("%s: unable to create irq domain", np->full_name); }
static int __init moxart_of_intc_init(struct device_node *node, struct device_node *parent) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; int ret; struct irq_chip_generic *gc; intc.base = of_iomap(node, 0); if (!intc.base) { pr_err("%s: unable to map IC registers\n", node->full_name); return -EINVAL; } intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, intc.base); if (!intc.domain) { pr_err("%s: unable to create IRQ domain\n", node->full_name); return -EINVAL; } ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1, "MOXARTINTC", handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { pr_err("%s: could not allocate generic chip\n", node->full_name); irq_domain_remove(intc.domain); return -EINVAL; } ret = of_property_read_u32(node, "interrupt-mask", &intc.interrupt_mask); if (ret) pr_err("%s: could not read interrupt-mask DT property\n", node->full_name); gc = irq_get_domain_generic_chip(intc.domain, 0); gc->reg_base = intc.base; gc->chip_types[0].regs.mask = IRQ_MASK_REG; gc->chip_types[0].regs.ack = IRQ_CLEAR_REG; gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; writel(0, intc.base + IRQ_MASK_REG); writel(0xffffffff, intc.base + IRQ_CLEAR_REG); writel(intc.interrupt_mask, intc.base + IRQ_MODE_REG); writel(intc.interrupt_mask, intc.base + IRQ_LEVEL_REG); set_handle_irq(handle_irq); return 0; }
void __init init_IRQ(void) { u32 nr_irq, intr_mask; struct device_node *intc = NULL; #ifdef CONFIG_SELFMOD_INTC unsigned int intc_baseaddr = 0; static int arr_func[] = { (int)&get_irq, (int)&intc_enable_or_unmask, (int)&intc_disable_or_mask, (int)&intc_mask_ack, (int)&intc_ack, (int)&intc_end, 0 }; #endif intc = of_find_compatible_node(NULL, NULL, "xlnx,xps-intc-1.00.a"); BUG_ON(!intc); intc_baseaddr = be32_to_cpup(of_get_property(intc, "reg", NULL)); intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE); nr_irq = be32_to_cpup(of_get_property(intc, "xlnx,num-intr-inputs", NULL)); intr_mask = be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL)); if (intr_mask > (u32)((1ULL << nr_irq) - 1)) pr_info(" ERROR: Mismatch in kind-of-intr param\n"); #ifdef CONFIG_SELFMOD_INTC selfmod_function((int *) arr_func, intc_baseaddr); #endif pr_info("%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n", intc->name, intc_baseaddr, nr_irq, intr_mask); /* * Disable all external interrupts until they are * explicity requested. */ out_be32(intc_baseaddr + IER, 0); /* Acknowledge any pending interrupts just in case. */ out_be32(intc_baseaddr + IAR, 0xffffffff); /* Turn on the Master Enable. */ out_be32(intc_baseaddr + MER, MER_HIE | MER_ME); /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm * lazy and Michal can clean it up to something nicer when he tests * and commits this patch. ~~gcl */ root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops, (void *)intr_mask); irq_set_default_host(root_domain); }
/** * amdgpu_irq_add_domain - create a linear irq domain * * @adev: amdgpu device pointer * * Create an irq domain for GPU interrupt sources * that may be driven by another driver (e.g., ACP). */ int amdgpu_irq_add_domain(struct amdgpu_device *adev) { adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, &amdgpu_hw_irqdomain_ops, adev); if (!adev->irq.domain) { DRM_ERROR("GPU irq add domain failed\n"); return -ENODEV; } return 0; }
static int __init vt8500_irq_init(struct device_node *node, struct device_node *parent) { int irq, i; struct device_node *np = node; if (active_cnt == VT8500_INTC_MAX) { pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n", __func__); goto out; } intc[active_cnt].base = of_iomap(np, 0); intc[active_cnt].domain = irq_domain_add_linear(node, 64, &vt8500_irq_domain_ops, &intc[active_cnt]); if (!intc[active_cnt].base) { pr_err("%s: Unable to map IO memory\n", __func__); goto out; } if (!intc[active_cnt].domain) { pr_err("%s: Unable to add irq domain!\n", __func__); goto out; } set_handle_irq(vt8500_handle_irq); vt8500_init_irq_hw(intc[active_cnt].base); pr_info("vt8500-irq: Added interrupt controller\n"); active_cnt++; /* check if this is a slaved controller */ if (of_irq_count(np) != 0) { /* check that we have the correct number of interrupts */ if (of_irq_count(np) != 8) { pr_err("%s: Incorrect IRQ map for slaved controller\n", __func__); return -EINVAL; } for (i = 0; i < 8; i++) { irq = irq_of_parse_and_map(np, i); enable_irq(irq); } pr_info("vt8500-irq: Enabled slave->parent interrupts\n"); } out: return 0; }
void __init mpc52xx_init_irq(void) { u32 intr_ctrl; struct device_node *picnode; struct device_node *np; picnode = of_find_matching_node(NULL, mpc52xx_pic_ids); intr = of_iomap(picnode, 0); if (!intr) panic(__FILE__ ": find_and_map failed on 'mpc5200-pic'. " "Check node !"); np = of_find_matching_node(NULL, mpc52xx_sdma_ids); sdma = of_iomap(np, 0); of_node_put(np); if (!sdma) panic(__FILE__ ": find_and_map failed on 'mpc5200-bestcomm'. " "Check node !"); pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr); out_be32(&sdma->IntPend, 0xffffffff); out_be32(&sdma->IntMask, 0xffffffff); out_be32(&intr->per_mask, 0x7ffffc00); out_be32(&intr->main_mask, 0x00010fff); intr_ctrl = in_be32(&intr->ctrl); intr_ctrl &= 0x00ff0000; intr_ctrl |= 0x0f000000 | 0x00001000 | 0x00000000 | 0x00000001; out_be32(&intr->ctrl, intr_ctrl); out_be32(&intr->per_pri1, 0); out_be32(&intr->per_pri2, 0); out_be32(&intr->per_pri3, 0); out_be32(&intr->main_pri1, 0); out_be32(&intr->main_pri2, 0); mpc52xx_irqhost = irq_domain_add_linear(picnode, MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, NULL); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); irq_set_default_host(mpc52xx_irqhost); pr_info("MPC52xx PIC is up and running!\n"); }
void __init mtvic_init(int def) { static unsigned virqs; virq_host = irq_domain_add_linear(NULL , 32, &mtvic_ops, NULL); virq_host->host_data = &virqs; if (def) irq_set_default_host(virq_host); }
static int wsa_irq_probe(struct platform_device *pdev) { int irq; struct wsa_resource *wsa_res = NULL; int ret = -EINVAL; irq = platform_get_irq_byname(pdev, "wsa-int"); if (irq < 0) { dev_err(&pdev->dev, "%s: Couldn't find wsa-int node(%d)\n", __func__, irq); return -EINVAL; } else { pr_debug("%s: node %s\n", __func__, pdev->name); wsa_res = kzalloc(sizeof(*wsa_res), GFP_KERNEL); if (!wsa_res) { pr_err("%s: could not allocate memory\n", __func__); return -ENOMEM; } /* * wsa interrupt controller supports N to N irq mapping with * single cell binding with irq numbers(offsets) only. * Use irq_domain_simple_ops that has irq_domain_simple_map and * irq_domain_xlate_onetwocell. */ wsa_res->dev = &pdev->dev; wsa_res->domain = irq_domain_add_linear(wsa_res->dev->of_node, WSA_MAX_NUM_IRQS, &irq_domain_simple_ops, wsa_res); if (!wsa_res->domain) { dev_err(&pdev->dev, "%s: domain is NULL\n", __func__); ret = -ENOMEM; goto err; } wsa_res->dev = &pdev->dev; dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq); wsa_res->irq = irq; wsa_res->num_irq_regs = 1; wsa_res->num_irqs = WSA_NUM_IRQS; ret = wsa_irq_init(wsa_res); if (ret < 0) { dev_err(&pdev->dev, "%s: failed to do irq init %d\n", __func__, ret); goto err; } } return ret; err: kfree(wsa_res); return ret; }
/* * add xgold irq linear irq domain * irq desc will be allocated dynamically when requesting the interrupt */ int __init xgold_irq_domain_add_linear(struct device_node *np, struct xgold_irq_chip_data *data, struct irq_domain_ops *ops) { xgold_irq_mask_ack_all(data); data->domain = irq_domain_add_linear(np, data->nr_int, ops, data); if (WARN_ON(!data->domain)) { pr_err("%s: irq domain init failed. exit...\n", __func__); return -1; } pr_info("%s: new irq domain: %s - %d irqs\n", XGOLD_IRQ, data->name, data->nr_int); return 0; }
/* * This sets up the IRQ domain for the PIC built in to the OpenRISC * 1000 CPU. This is the "root" domain as these are the interrupts * that directly trigger an exception in the CPU. */ static int __init or1k_pic_init(struct device_node *node, struct or1k_pic_dev *pic) { /* Disable all interrupts until explicitly requested */ mtspr(SPR_PICMR, (0UL)); root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops, pic); set_handle_irq(or1k_pic_handle_irq); return 0; }
int max77693_irq_init(struct max77693_dev *max77693) { struct irq_domain *domain; int i; int ret; mutex_init(&max77693->irqlock); /* Mask individual interrupt sources */ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) { struct regmap *map; /* MUIC IRQ 0:MASK 1:NOT MASK */ /* Other IRQ 1:MASK 0:NOT MASK */ if (i >= MUIC_INT1 && i <= MUIC_INT3) { max77693->irq_masks_cur[i] = 0x00; max77693->irq_masks_cache[i] = 0x00; } else { max77693->irq_masks_cur[i] = 0xff; max77693->irq_masks_cache[i] = 0xff; } map = max77693_get_regmap(max77693, i); if (IS_ERR_OR_NULL(map)) continue; if (max77693_mask_reg[i] == MAX77693_REG_INVALID) continue; if (i >= MUIC_INT1 && i <= MUIC_INT3) max77693_write_reg(map, max77693_mask_reg[i], 0x00); else max77693_write_reg(map, max77693_mask_reg[i], 0xff); } domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR, &max77693_irq_domain_ops, max77693); if (!domain) { dev_err(max77693->dev, "could not create irq domain\n"); return -ENODEV; } max77693->irq_domain = domain; ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max77693-irq", max77693); if (ret) dev_err(max77693->dev, "Failed to request IRQ %d: %d\n", max77693->irq, ret); return 0; }
void __init init_IRQ(void) { struct irq_domain *domain; struct device_node *node; node = of_find_compatible_node(NULL, NULL, "ALTR,nios2-1.0"); BUG_ON(!node); domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL); BUG_ON(!domain); irq_set_default_host(domain); of_node_put(node); }
static int __init orion_irq_init(struct device_node *np, struct device_node *parent) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; int n, ret, base, num_chips = 0; struct resource r; /* count number of irq chips by valid reg addresses */ while (of_address_to_resource(np, num_chips, &r) == 0) num_chips++; orion_irq_domain = irq_domain_add_linear(np, num_chips * ORION_IRQS_PER_CHIP, &irq_generic_chip_ops, NULL); if (!orion_irq_domain) panic("%s: unable to add irq domain\n", np->name); ret = irq_alloc_domain_generic_chips(orion_irq_domain, ORION_IRQS_PER_CHIP, 1, np->name, handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) panic("%s: unable to alloc irq domain gc\n", np->name); for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) { struct irq_chip_generic *gc = irq_get_domain_generic_chip(orion_irq_domain, base); of_address_to_resource(np, n, &r); if (!request_mem_region(r.start, resource_size(&r), np->name)) panic("%s: unable to request mem region %d", np->name, n); gc->reg_base = ioremap(r.start, resource_size(&r)); if (!gc->reg_base) panic("%s: unable to map resource %d", np->name, n); gc->chip_types[0].regs.mask = ORION_IRQ_MASK; gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; /* mask all interrupts */ writel(0, gc->reg_base + ORION_IRQ_MASK); } set_handle_irq(orion_handle_irq); return 0; }
static int __init digicolor_of_init(struct device_node *node, struct device_node *parent) { static void __iomem *reg_base; unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct regmap *ucregs; int ret; reg_base = of_iomap(node, 0); if (!reg_base) { pr_err("%s: unable to map IC registers\n", node->full_name); return -ENXIO; } /* disable all interrupts */ writel(0, reg_base + IC_INT0ENABLE_LO); writel(0, reg_base + IC_INT0ENABLE_XLO); ucregs = syscon_regmap_lookup_by_phandle(node, "syscon"); if (IS_ERR(ucregs)) { pr_err("%s: unable to map UC registers\n", node->full_name); return PTR_ERR(ucregs); } /* channel 1, regular IRQs */ regmap_write(ucregs, UC_IRQ_CONTROL, 1); digicolor_irq_domain = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); if (!digicolor_irq_domain) { pr_err("%s: unable to create IRQ domain\n", node->full_name); return -ENOMEM; } ret = irq_alloc_domain_generic_chips(digicolor_irq_domain, 32, 1, "digicolor_irq", handle_level_irq, clr, 0, 0); if (ret) { pr_err("%s: unable to allocate IRQ gc\n", node->full_name); return ret; } digicolor_set_gc(reg_base, 0, IC_INT0ENABLE_LO, IC_FLAG_CLEAR_LO); digicolor_set_gc(reg_base, 32, IC_INT0ENABLE_XLO, IC_FLAG_CLEAR_XLO); set_handle_irq(digicolor_handle_irq); return 0; }
static int __init h8300h_intc_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; intc_baseaddr = of_iomap(intc, 0); BUG_ON(!intc_baseaddr); /* All interrupt priority low */ ctrl_outb(0x00, IPR + 0); ctrl_outb(0x00, IPR + 1); domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); BUG_ON(!domain); irq_set_default_host(domain); return 0; }
int __init at91_aic_of_init(struct device_node *node, struct device_node *parent) { at91_aic_base = of_iomap(node, 0); at91_aic_np = node; at91_aic_domain = irq_domain_add_linear(at91_aic_np, NR_AIC_IRQS, &at91_aic_irq_ops, NULL); if (!at91_aic_domain) panic("Unable to add AIC irq domain (DT)\n"); irq_set_default_host(at91_aic_domain); at91_aic_hw_init(NR_AIC_IRQS); return 0; }
static int mdss_irq_domain_init(struct msm_mdss *mdss) { struct device *dev = mdss->dev->dev; struct irq_domain *d; d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops, mdss); if (!d) { dev_err(dev, "mdss irq domain add failed\n"); return -ENXIO; } mdss->irqcontroller.enabled_mask = 0; mdss->irqcontroller.domain = d; return 0; }