static int __init beat_register_event(void) { u64 path[4], data[2]; int rc, i; unsigned int virq; for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) { struct beat_event_list *ev = &beat_event_list[i]; if (beat_construct_event_receive_port(data) != 0) { printk(KERN_ERR "Beat: " "cannot construct event receive port for %s\n", ev->typecode); return -EINVAL; } virq = irq_create_mapping(NULL, data[0]); if (virq == NO_IRQ) { printk(KERN_ERR "Beat: failed to get virtual IRQ" " for event receive port for %s\n", ev->typecode); beat_destruct_event_receive_port(data[0]); return -EIO; } ev->virq = virq; rc = request_irq(virq, ev->handler, IRQF_DISABLED, ev->typecode, NULL); if (rc != 0) { printk(KERN_ERR "Beat: failed to request virtual IRQ" " for event receive port for %s\n", ev->typecode); beat_destruct_event_receive_port(data[0]); return rc; } path[0] = 0x1000000065780000ul; /* 1,ex */ path[1] = 0x627574746f6e0000ul; /* button */ path[2] = 0; strncpy((char *)&path[2], ev->typecode, 8); path[3] = 0; data[1] = 0; beat_create_repository_node(path, data); } return 0; }
static int __init intc_of_init(struct device_node *node, struct device_node *parent) { struct resource res; struct irq_domain *domain; int irq; if (!of_property_read_u32_array(node, "ralink,intc-registers", rt_intc_regs, 6)) pr_info("intc: using register map from devicetree\n"); irq = irq_of_parse_and_map(node, 0); if (!irq) panic("Failed to get INTC IRQ"); if (of_address_to_resource(node, 0, &res)) panic("Failed to get intc memory range"); if (request_mem_region(res.start, resource_size(&res), res.name) < 0) pr_err("Failed to request intc memory"); rt_intc_membase = ioremap_nocache(res.start, resource_size(&res)); if (!rt_intc_membase) panic("Failed to remap intc memory"); /* disable all interrupts */ rt_intc_w32(~0, INTC_REG_DISABLE); /* route all INTC interrupts to MIPS HW0 interrupt */ rt_intc_w32(0, INTC_REG_TYPE); domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain"); rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE); irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain); /* tell the kernel which irq is used for performance monitoring */ rt_perfcount_irq = irq_create_mapping(domain, 9); return 0; }
static int __init hvsi_console_init(void) { struct device_node *vty; hvsi_wait = poll_for_state; /* no irqs yet; must poll */ /* search device tree for vty nodes */ for (vty = of_find_compatible_node(NULL, "serial", "hvterm-protocol"); vty != NULL; vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) { struct hvsi_struct *hp; const uint32_t *vtermno, *irq; vtermno = of_get_property(vty, "reg", NULL); irq = of_get_property(vty, "interrupts", NULL); if (!vtermno || !irq) continue; if (hvsi_count >= MAX_NR_HVSI_CONSOLES) { of_node_put(vty); break; } hp = &hvsi_ports[hvsi_count]; INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); INIT_WORK(&hp->handshaker, hvsi_handshaker); init_waitqueue_head(&hp->emptyq); init_waitqueue_head(&hp->stateq); spin_lock_init(&hp->lock); hp->index = hvsi_count; hp->inbuf_end = hp->inbuf; hp->state = HVSI_CLOSED; hp->vtermno = *vtermno; hp->virq = irq_create_mapping(NULL, irq[0]); if (hp->virq == NO_IRQ) { printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", __func__, irq[0]); continue; } hvsi_count++; } if (hvsi_count) register_console(&hvsi_con_driver); return 0; }
static int lp8788_set_irqs(struct platform_device *pdev, struct lp8788_charger *pchg, const char *name) { struct resource *r; struct irq_domain *irqdm = pchg->lp->irqdm; int irq_start; int irq_end; int virq; int nr_irq; int i; int ret; /* no error even if no irq resource */ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, name); if (!r) return 0; irq_start = r->start; irq_end = r->end; for (i = irq_start; i <= irq_end; i++) { nr_irq = pchg->num_irqs; virq = irq_create_mapping(irqdm, i); pchg->irqs[nr_irq].virq = virq; pchg->irqs[nr_irq].which = i; pchg->num_irqs++; ret = request_threaded_irq(virq, NULL, lp8788_charger_irq_thread, 0, name, pchg); if (ret) break; } if (i <= irq_end) goto err_free_irq; return 0; err_free_irq: for (i = 0; i < pchg->num_irqs; i++) free_irq(pchg->irqs[i].virq, pchg); return ret; }
static int __init armctrl_of_init(struct device_node *node, struct device_node *parent, bool is_2836) { void __iomem *base; int irq, b, i; base = of_iomap(node, 0); if (!base) panic("%s: unable to map IC registers\n", node->full_name); intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), &armctrl_ops, NULL); if (!intc.domain) panic("%s: unable to create IRQ domain\n", node->full_name); for (b = 0; b < NR_BANKS; b++) { intc.pending[b] = base + reg_pending[b]; intc.enable[b] = base + reg_enable[b]; intc.disable[b] = base + reg_disable[b]; for (i = 0; i < bank_irqs[b]; i++) { irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i)); BUG_ON(irq <= 0); irq_set_chip_and_handler(irq, &armctrl_chip, handle_level_irq); irq_set_probe(irq); } } if (is_2836) { int parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { panic("%s: unable to get parent interrupt.\n", node->full_name); } irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq); } else { set_handle_irq(bcm2835_handle_irq); } return 0; }
static int __init opal_init(void) { struct device_node *np, *consoles; const u32 *irqs; int rc, i, irqlen; opal_node = of_find_node_by_path("/ibm,opal"); if (!opal_node) { pr_warn("opal: Node not found\n"); return -ENODEV; } if (firmware_has_feature(FW_FEATURE_OPALv2)) consoles = of_find_node_by_path("/ibm,opal/consoles"); else consoles = of_node_get(opal_node); /* Register serial ports */ for_each_child_of_node(consoles, np) { if (strcmp(np->name, "serial")) continue; of_platform_device_create(np, NULL, NULL); } of_node_put(consoles); /* Find all OPAL interrupts and request them */ irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); pr_debug("opal: Found %d interrupts reserved for OPAL\n", irqs ? (irqlen / 4) : 0); opal_irq_count = irqlen / 4; opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { unsigned int hwirq = be32_to_cpup(irqs); unsigned int irq = irq_create_mapping(NULL, hwirq); if (irq == NO_IRQ) { pr_warning("opal: Failed to map irq 0x%x\n", hwirq); continue; } rc = request_irq(irq, opal_interrupt, 0, "opal", NULL); if (rc) pr_warning("opal: Error %d requesting irq %d" " (0x%x)\n", rc, irq, hwirq); opal_irqs[i] = irq; } return 0; }
void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, int parent_irq, u32 valid, struct device_node *node) { struct fpga_irq_data *f; int i; if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) { pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__); return; } f = &fpga_irq_devices[fpga_irq_id]; f->base = base; f->chip.name = name; f->chip.irq_ack = fpga_irq_mask; f->chip.irq_mask = fpga_irq_mask; f->chip.irq_unmask = fpga_irq_unmask; f->valid = valid; if (parent_irq != -1) { irq_set_handler_data(parent_irq, f); irq_set_chained_handler(parent_irq, fpga_irq_handle); } /* This will also allocate irq descriptors */ f->domain = irq_domain_add_simple(node, fls(valid), irq_start, &fpga_irqdomain_ops, f); /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < fls(valid); i++) if (valid & BIT(i)) { if (!irq_start) irq_create_mapping(f->domain, i); f->used_irqs++; } pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs", fpga_irq_id, name, base, f->used_irqs); if (parent_irq != -1) pr_cont(", parent IRQ: %d\n", parent_irq); else pr_cont("\n"); fpga_irq_id++; }
static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus) { struct ssb_chipcommon *chipco = &bus->chipco; struct gpio_chip *chip = &bus->gpio; int gpio, hwirq, err; if (bus->bustype != SSB_BUSTYPE_SSB) return 0; bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio, &irq_domain_simple_ops, chipco); if (!bus->irq_domain) { err = -ENODEV; goto err_irq_domain; } for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_create_mapping(bus->irq_domain, gpio); irq_set_chip_data(irq, bus); irq_set_chip_and_handler(irq, &ssb_gpio_irq_chipco_chip, handle_simple_irq); } hwirq = ssb_mips_irq(bus->chipco.dev) + 2; err = request_irq(hwirq, ssb_gpio_irq_chipco_handler, IRQF_SHARED, "gpio", bus); if (err) goto err_req_irq; ssb_chipco_gpio_intmask(&bus->chipco, ~0, 0); chipco_set32(chipco, SSB_CHIPCO_IRQMASK, SSB_CHIPCO_IRQ_GPIO); return 0; err_req_irq: for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_find_mapping(bus->irq_domain, gpio); irq_dispose_mapping(irq); } irq_domain_remove(bus->irq_domain); err_irq_domain: return err; }
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) { struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); int i; pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np, MAX_MSI_IRQS, &ks_dw_pcie_msi_domain_ops, chip); if (!pp->irq_domain) { dev_err(pp->dev, "irq domain init failed\n"); return -ENXIO; } for (i = 0; i < MAX_MSI_IRQS; i++) irq_create_mapping(pp->irq_domain, i); return 0; }
void local_timer_setup(unsigned cpu) { struct ccount_timer *timer = &per_cpu(ccount_timer, cpu); struct clock_event_device *clockevent = &timer->evt; timer->irq_enabled = 1; clockevent->name = timer->name; snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu); clockevent->features = CLOCK_EVT_FEAT_ONESHOT; clockevent->rating = 300; clockevent->set_next_event = ccount_timer_set_next_event; clockevent->set_mode = ccount_timer_set_mode; clockevent->cpumask = cpumask_of(cpu); clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT); if (WARN(!clockevent->irq, "error: can't map timer irq")) return; clockevents_config_and_register(clockevent, ccount_freq, 0xf, 0xffffffff); }
static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc) { struct gpio_chip *chip = &cc->gpio; int gpio, hwirq, err; if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) return 0; cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio, &irq_domain_simple_ops, cc); if (!cc->irq_domain) { err = -ENODEV; goto err_irq_domain; } for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_create_mapping(cc->irq_domain, gpio); irq_set_chip_data(irq, cc); irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip, handle_simple_irq); } hwirq = bcma_core_irq(cc->core, 0); err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio", cc); if (err) goto err_req_irq; bcma_chipco_gpio_intmask(cc, ~0, 0); bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO); return 0; err_req_irq: for (gpio = 0; gpio < chip->ngpio; gpio++) { int irq = irq_find_mapping(cc->irq_domain, gpio); irq_dispose_mapping(irq); } irq_domain_remove(cc->irq_domain); err_irq_domain: return err; }
static int __init intc_of_init(struct device_node *node, struct device_node *parent) { struct resource res; struct irq_domain *domain; int irq; irq = irq_of_parse_and_map(node, 0); if (!irq) panic("Failed to get INTC IRQ"); if (of_address_to_resource(node, 0, &res)) panic("Failed to get intc memory range"); if (request_mem_region(res.start, resource_size(&res), res.name) < 0) pr_err("Failed to request intc memory"); rt_intc_membase = ioremap_nocache(res.start, resource_size(&res)); if (!rt_intc_membase) panic("Failed to remap intc memory"); /* disable all interrupts */ rt_intc_w32(~0, INTC_REG_DISABLE); /* route all INTC interrupts to MIPS HW0 interrupt */ rt_intc_w32(0, INTC_REG_TYPE); domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain"); rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE); irq_set_chained_handler(irq, ralink_intc_irq_handler); irq_set_handler_data(irq, domain); cp0_perfcount_irq = irq_create_mapping(domain, 9); return 0; }
static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, unsigned int *virq) { int result; struct ps3_private *pd; /* This defines the default interrupt distribution policy. */ if (cpu == PS3_BINDING_CPU_ANY) cpu = 0; pd = &per_cpu(ps3_private, cpu); *virq = irq_create_mapping(NULL, outlet); if (*virq == NO_IRQ) { pr_debug("%s:%d: irq_create_mapping failed: outlet %lu\n", __func__, __LINE__, outlet); result = -ENOMEM; goto fail_create; } pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, outlet, cpu, *virq); result = irq_set_chip_data(*virq, pd); if (result) { pr_debug("%s:%d: irq_set_chip_data failed\n", __func__, __LINE__); goto fail_set; } ps3_chip_mask(irq_get_irq_data(*virq)); return result; fail_set: irq_dispose_mapping(*virq); fail_create: return result; }
static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; int nr_irqs; int i, virq; struct mcip_bcr mp; struct mcip_idu_bcr idu_bcr; READ_BCR(ARC_REG_MCIP_BCR, mp); if (!mp.idu) panic("IDU not detected, but DeviceTree using it"); READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { /* Mask all common interrupts by default */ idu_irq_mask_raw(i); /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); BUG_ON(!virq); irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); return 0; }
/* Translate our of irq notation * format: <ctrl_num ctrl_irq parent_irq type> */ static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct s3c_irq_intc *intc; struct s3c_irq_intc *parent_intc; struct s3c_irq_data *irq_data; struct s3c_irq_data *parent_irq_data; int irqno; if (WARN_ON(intsize < 4)) return -EINVAL; if (intspec[0] > 2 || !s3c_intc[intspec[0]]) { pr_err("controller number %d invalid\n", intspec[0]); return -EINVAL; } intc = s3c_intc[intspec[0]]; *out_hwirq = intspec[0] * 32 + intspec[2]; *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK; parent_intc = intc->parent; if (parent_intc) { irq_data = &intc->irqs[intspec[2]]; irq_data->parent_irq = intspec[1]; parent_irq_data = &parent_intc->irqs[irq_data->parent_irq]; parent_irq_data->sub_intc = intc; parent_irq_data->sub_bits |= (1UL << intspec[2]); /* parent_intc is always s3c_intc[0], so no offset */ irqno = irq_create_mapping(parent_intc->domain, intspec[1]); if (irqno < 0) { pr_err("irq: could not map parent interrupt\n"); return irqno; } irq_set_chained_handler(irqno, s3c_irq_demux); } return 0; }
int ps3_alloc_io_irq(unsigned int interrupt_id, unsigned int *virq) { int result; unsigned long outlet; result = lv1_construct_io_irq_outlet(interrupt_id, &outlet); if (result) { pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; } *virq = irq_create_mapping(NULL, outlet); pr_debug("%s:%d: interrupt_id %u => outlet %lu, virq %u\n", __func__, __LINE__, interrupt_id, outlet, *virq); return 0; }
static int atc260x_onoff_irq_chip_init(struct platform_device *pdev) { onoff_reset_domain = irq_domain_add_linear(pdev->dev.of_node, 1, &onoff_reset_irq_ops, NULL); if (!onoff_reset_domain) { pr_err("[onoff_reset_irq] %s: irq_domain_add_linear failed!\n", __func__); return -ENODEV; } onoff_reset_irq = irq_create_mapping(onoff_reset_domain, 0); if(onoff_reset_irq <= 0) { pr_err("[onoff_reset_irq] %s: irq_create_mapping failed!\n", __func__); return -ENODEV; } return 0; }
static unsigned int pic32_xlate_core_timer_irq(void) { static struct device_node *node; unsigned int irq; node = of_find_matching_node(NULL, pic32_infra_match); if (WARN_ON(!node)) goto default_map; irq = irq_of_parse_and_map(node, 0); if (!irq) goto default_map; return irq; default_map: return irq_create_mapping(NULL, DEFAULT_CORE_TIMER_INTERRUPT); }
void __init ar5312_arch_init_irq(void) { struct irq_domain *domain; unsigned irq; ath25_irq_dispatch = ar5312_irq_dispatch; domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, &ar5312_misc_irq_domain_ops, NULL); if (!domain) panic("Failed to add IRQ domain"); irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC); setup_irq(irq, &ar5312_ahb_err_interrupt); irq_set_chained_handler(AR5312_IRQ_MISC, ar5312_misc_irq_handler); irq_set_handler_data(AR5312_IRQ_MISC, domain); ar5312_misc_irq_domain = domain; }
static void gpio_irq_init(int irq, struct gpio_chip *gpiochip, struct irq_chip *irqchip) { int n, i; struct sci_gpio_chip *sci_gpio = to_sci_gpio(gpiochip); struct irq_domain *irq_domain = sci_gpio->irq_domain; /* setup the cascade irq handlers */ if (sci_gpio->is_adi_gpio) { /*TODO*/ irq_set_chained_handler(irq, gpio_muxed_flow_handler); irq_set_handler_data(irq, gpiochip); } for (i = 0; i < gpiochip->ngpio; i++) { n = irq_create_mapping(irq_domain, i); irq_set_chip_and_handler(n, irqchip, handle_level_irq); irq_set_chip_data(n, gpiochip); set_irq_flags(n, IRQF_VALID); } }
int ps3_alloc_event_irq(unsigned int *virq) { int result; unsigned long outlet; result = lv1_construct_event_receive_port(&outlet); if (result) { pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n", __func__, __LINE__, ps3_result(result)); *virq = NO_IRQ; return result; } *virq = irq_create_mapping(NULL, outlet); pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__, outlet, *virq); return 0; }
/** * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip * @chip: the gpio chip to add the irqchip to * @irqchip: the irqchip to add to the adapter * @first_irq: if not dynamically assigned, the base (first) IRQ to * allocate gpio irqs from * @handler: the irq handler to use (often a predefined irq core function) * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE * to have the core avoid setting up any default type in the hardware. * * This function closely associates a certain irqchip with a certain * gpio chip, providing an irq domain to translate the local IRQs to * global irqs, and making sure that the gpio chip * is passed as chip data to all related functions. Driver callbacks * need to use container_of() to get their local state containers back * from the gpio chip passed as chip data. An irqdomain will be stored * in the gpio chip that shall be used by the driver to handle IRQ number * translation. The gpio chip will need to be initialized and registered * before calling this function. */ static int gb_gpio_irqchip_add(struct gpio_chip *chip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { struct gb_gpio_controller *ggc; unsigned int offset; unsigned int irq_base; if (!chip || !irqchip) return -EINVAL; ggc = gpio_chip_to_gb_gpio_controller(chip); ggc->irqchip = irqchip; ggc->irq_handler = handler; ggc->irq_default_type = type; ggc->irqdomain = irq_domain_add_simple(NULL, ggc->line_max + 1, first_irq, &gb_gpio_domain_ops, chip); if (!ggc->irqdomain) { ggc->irqchip = NULL; return -EINVAL; } /* * Prepare the mapping since the irqchip shall be orthogonal to * any gpio calls. If the first_irq was zero, this is * necessary to allocate descriptors for all IRQs. */ for (offset = 0; offset < (ggc->line_max + 1); offset++) { irq_base = irq_create_mapping(ggc->irqdomain, offset); if (offset == 0) ggc->irq_base = irq_base; } return 0; }
static void __init opal_irq_init(struct device_node *dn) { const __be32 *irqs; int i, irqlen; /* Get interrupt property */ irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); pr_debug("Found %d interrupts reserved for OPAL\n", irqs ? (irqlen / 4) : 0); /* Install interrupt handlers */ opal_irq_count = irqlen / 4; opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); for (i = 0; irqs && i < opal_irq_count; i++, irqs++) { unsigned int irq, virq; int rc; /* Get hardware and virtual IRQ */ irq = be32_to_cpup(irqs); virq = irq_create_mapping(NULL, irq); if (virq == NO_IRQ) { pr_warn("Failed to map irq 0x%x\n", irq); continue; } /* Install interrupt handler */ rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); if (rc) { irq_dispose_mapping(virq); pr_warn("Error %d requesting irq %d (0x%x)\n", rc, virq, irq); continue; } /* Cache IRQ */ opal_irqs[i] = virq; } }
static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan) { u32 val; int chan_index = chan + sc->base_chan; int ret; struct pasemi_fnu_txring *ring; ring = &sc->tx[chan]; spin_lock_init(&ring->fill_lock); spin_lock_init(&ring->clean_lock); ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) * TX_RING_SIZE, GFP_KERNEL); if (!ring->desc_info) return -ENOMEM; /* Allocate descriptors */ ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev, TX_RING_SIZE * 2 * sizeof(u64), &ring->dma, GFP_KERNEL); if (!ring->desc) return -ENOMEM; memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64)); out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30); ring->total_pktcnt = 0; out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index), PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2); out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val); out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index), PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_TATTR(chan) | PAS_DMA_TXCHAN_CFG_WT(2)); /* enable tx channel */ out_le32(sc->dma_regs + PAS_DMA_TXCHAN_TCMDSTA(chan_index), PAS_DMA_TXCHAN_TCMDSTA_EN); out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index), PAS_IOB_DMA_TXCH_CFG_CNTTH(1000)); ring->next_to_fill = 0; ring->next_to_clean = 0; snprintf(ring->irq_name, sizeof(ring->irq_name), "%s%d", "crypto", chan); ring->irq = irq_create_mapping(NULL, sc->base_irq + chan); ret = request_irq(ring->irq, (irq_handler_t) pasemi_intr, IRQF_DISABLED, ring->irq_name, sc); if (ret) { printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n", ring->irq, ret); ring->irq = -1; return ret; } setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc); return 0; }
void request_event_sources_irqs(struct device_node *np, irq_handler_t handler, const char *name) { int i, index, count = 0; struct of_phandle_args oirq; const u32 *opicprop; unsigned int opicplen; unsigned int virqs[16]; /* Check for obsolete "open-pic-interrupt" property. If present, then * map those interrupts using the default interrupt host and default * trigger */ opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); if (opicprop) { opicplen /= sizeof(u32); for (i = 0; i < opicplen; i++) { if (count > 15) break; virqs[count] = irq_create_mapping(NULL, *(opicprop++)); if (virqs[count] == NO_IRQ) { pr_err("event-sources: Unable to allocate " "interrupt number for %s\n", np->full_name); WARN_ON(1); } else count++; } } /* Else use normal interrupt tree parsing */ else { /* First try to do a proper OF tree parsing */ for (index = 0; of_irq_parse_one(np, index, &oirq) == 0; index++) { if (count > 15) break; virqs[count] = irq_create_of_mapping(&oirq); if (virqs[count] == NO_IRQ) { pr_err("event-sources: Unable to allocate " "interrupt number for %s\n", np->full_name); WARN_ON(1); } else count++; } } /* Now request them */ for (i = 0; i < count; i++) { if (request_irq(virqs[i], handler, 0, name, NULL)) { pr_err("event-sources: Unable to request interrupt " "%d for %s\n", virqs[i], np->full_name); WARN_ON(1); return; } } }
static int __init opal_init(void) { struct device_node *np, *consoles; const __be32 *irqs; int rc, i, irqlen; opal_node = of_find_node_by_path("/ibm,opal"); if (!opal_node) { pr_warn("opal: Node not found\n"); return -ENODEV; } /* Register OPAL consoles if any ports */ if (firmware_has_feature(FW_FEATURE_OPALv2)) consoles = of_find_node_by_path("/ibm,opal/consoles"); else consoles = of_node_get(opal_node); if (consoles) { for_each_child_of_node(consoles, np) { if (strcmp(np->name, "serial")) continue; of_platform_device_create(np, NULL, NULL); } of_node_put(consoles); } /* Find all OPAL interrupts and request them */ irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); pr_debug("opal: Found %d interrupts reserved for OPAL\n", irqs ? (irqlen / 4) : 0); opal_irq_count = irqlen / 4; opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { unsigned int hwirq = be32_to_cpup(irqs); unsigned int irq = irq_create_mapping(NULL, hwirq); if (irq == NO_IRQ) { pr_warning("opal: Failed to map irq 0x%x\n", hwirq); continue; } rc = request_irq(irq, opal_interrupt, 0, "opal", NULL); if (rc) pr_warning("opal: Error %d requesting irq %d" " (0x%x)\n", rc, irq, hwirq); opal_irqs[i] = irq; } /* Create "opal" kobject under /sys/firmware */ rc = opal_sysfs_init(); if (rc == 0) { /* Setup dump region interface */ opal_dump_region_init(); /* Setup error log interface */ rc = opal_elog_init(); /* Setup code update interface */ opal_flash_init(); /* Setup platform dump extract interface */ opal_platform_dump_init(); /* Setup system parameters interface */ opal_sys_param_init(); /* Setup message log interface. */ opal_msglog_init(); } return 0; }
static int em_gio_to_irq(struct gpio_chip *chip, unsigned offset) { return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset); }
void ipi_init(void) { unsigned irq = irq_create_mapping(NULL, IPI_IRQ); setup_irq(irq, &ipi_irqaction); }
static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct lnw_gpio *lnw = to_lnw_priv(chip); return irq_create_mapping(lnw->domain, offset); }
/* * Attach to a macio probed interface */ static int pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) { void __iomem *base; unsigned long regbase; pmac_ide_hwif_t *pmif; int irq, rc; struct ide_hw hw; pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) return -ENOMEM; if (macio_resource_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no address for %s\n", mdev->ofdev.dev.of_node->full_name); rc = -ENXIO; goto out_free_pmif; } /* Request memory resource for IO ports */ if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { printk(KERN_ERR "ide-pmac: can't request MMIO resource for " "%s!\n", mdev->ofdev.dev.of_node->full_name); rc = -EBUSY; goto out_free_pmif; } /* XXX This is bogus. Should be fixed in the registry by checking * the kind of host interrupt controller, a bit like gatwick * fixes in irq.c. That works well enough for the single case * where that happens though... */ if (macio_irq_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no intrs for device %s, using " "13\n", mdev->ofdev.dev.of_node->full_name); irq = irq_create_mapping(NULL, 13); } else irq = macio_irq(mdev, 0); base = ioremap(macio_resource_start(mdev, 0), 0x400); regbase = (unsigned long) base; pmif->mdev = mdev; pmif->node = mdev->ofdev.dev.of_node; pmif->regbase = regbase; pmif->irq = irq; pmif->kauai_fcr = NULL; if (macio_resource_count(mdev) >= 2) { if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) printk(KERN_WARNING "ide-pmac: can't request DMA " "resource for %s!\n", mdev->ofdev.dev.of_node->full_name); else pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); } else pmif->dma_regs = NULL; dev_set_drvdata(&mdev->ofdev.dev, pmif); memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = irq; hw.dev = &mdev->bus->pdev->dev; hw.parent = &mdev->ofdev.dev; rc = pmac_ide_setup_device(pmif, &hw); if (rc != 0) { /* The inteface is released to the common IDE layer */ dev_set_drvdata(&mdev->ofdev.dev, NULL); iounmap(base); if (pmif->dma_regs) { iounmap(pmif->dma_regs); macio_release_resource(mdev, 1); } macio_release_resource(mdev, 0); kfree(pmif); } return rc; out_free_pmif: kfree(pmif); return rc; }