static void asic2_irq_init(struct asic2_data *asic) { int i; asic->kpio_int_shadow = 0; /* Disable all IRQs and set up clock */ __asic2_write_register(asic, IPAQ_ASIC2_KPIINTSTAT, 0); /* Disable all interrupts */ __asic2_write_register(asic, IPAQ_ASIC2_GPIINTSTAT, 0); __asic2_write_register(asic, IPAQ_ASIC2_KPIINTCLR, 0); /* Clear all KPIO interrupts */ __asic2_write_register(asic, IPAQ_ASIC2_GPIINTCLR, 0); /* Clear all GPIO interrupts */ for ( i = 0 ; i < IPAQ_ASIC2_KPIO_IRQ_COUNT ; i++ ) { int irq = i + asic->irq_base + IPAQ_ASIC2_KPIO_IRQ_START; set_irq_chip(irq, &asic2_kpio_irq_chip); set_irq_chip_data(irq, asic); set_irq_handler(irq, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } for ( i = 0 ; i < IPAQ_ASIC2_GPIO_IRQ_COUNT ; i++ ) { int irq = i + asic->irq_base + IPAQ_ASIC2_GPIO_IRQ_START; set_irq_chip(irq, &asic2_gpio_irq_chip); set_irq_chip_data(irq, asic); set_irq_handler(irq, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } /* Don't start up the ADC IRQ automatically */ set_irq_flags(asic->irq_base + IRQ_IPAQ_ASIC2_ADC, IRQF_VALID | IRQF_NOAUTOEN); }
static int vlynq_setup_irq(struct vlynq_device *dev) { u32 val; int i, virq; if (dev->local_irq == dev->remote_irq) { printk(KERN_ERR "%s: local vlynq irq should be different from remote\n", dev_name(&dev->dev)); return -EINVAL; } /* Clear local and remote error bits */ writel(readl(&dev->local->status), &dev->local->status); writel(readl(&dev->remote->status), &dev->remote->status); /* Now setup interrupts */ val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq); val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL | VLYNQ_CTRL_INT2CFG; val |= readl(&dev->local->control); writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr); writel(val, &dev->local->control); val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq); val |= VLYNQ_CTRL_INT_ENABLE; val |= readl(&dev->remote->control); writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr); writel(val, &dev->remote->int_ptr); writel(val, &dev->remote->control); for (i = dev->irq_start; i <= dev->irq_end; i++) { virq = i - dev->irq_start; if (virq == dev->local_irq) { set_irq_chip_and_handler(i, &vlynq_local_chip, handle_level_irq); set_irq_chip_data(i, dev); } else if (virq == dev->remote_irq) { set_irq_chip_and_handler(i, &vlynq_remote_chip, handle_level_irq); set_irq_chip_data(i, dev); } else { set_irq_chip_and_handler(i, &vlynq_irq_chip, handle_simple_irq); set_irq_chip_data(i, dev); writel(0, &dev->remote->int_device[virq >> 2]); } } if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) { printk(KERN_ERR "%s: request_irq failed\n", dev_name(&dev->dev)); return -EAGAIN; } return 0; }
void __init syncpt_init_irq(void) { void __iomem *sync_regs; unsigned int i; sync_regs = ioremap(TEGRA_HOST1X_BASE + HOST1X_SYNC_OFFSET, HOST1X_SYNC_SIZE); BUG_ON(!sync_regs); writel(0xffffffffUL, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE); writel(0xffffffffUL, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS); for (i = INT_SYNCPT_THRESH_BASE; i < INT_GPIO_BASE; i++) { set_irq_chip(i, &syncpt_thresh_irq); set_irq_chip_data(i, sync_regs); set_irq_handler(i, handle_simple_irq); set_irq_flags(i, IRQF_VALID); } if (set_irq_data(INT_HOST1X_MPCORE_SYNCPT, sync_regs)) BUG(); set_irq_chained_handler(INT_HOST1X_MPCORE_SYNCPT, syncpt_thresh_cascade); }
void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start) { unsigned int max_irq; unsigned int i; if (gic_nr >= MAX_GIC_NR) BUG(); gic_data[gic_nr].dist_base = base; gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31; max_irq = _gic_dist_init(gic_nr); gic_data[gic_nr].max_irq = max_irq; /* * Setup the Linux IRQ subsystem. */ for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) { set_irq_chip(i, &gic_chip); set_irq_chip_data(i, &gic_data[gic_nr]); set_irq_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } writel(1, base + GIC_DIST_CTRL); }
static int __init msm_init_gpio(void) { int i, j = 0; for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) { if (i - FIRST_GPIO_IRQ >= msm_gpio_chips[j].chip.base + msm_gpio_chips[j].chip.ngpio) j++; set_irq_chip_data(i, &msm_gpio_chips[j]); set_irq_chip(i, &msm_gpio_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { spin_lock_init(&msm_gpio_chips[i].lock); writel(0, msm_gpio_chips[i].regs.int_en); gpiochip_add(&msm_gpio_chips[i].chip); } set_irq_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); set_irq_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); set_irq_wake(INT_GPIO_GROUP1, 1); set_irq_wake(INT_GPIO_GROUP2, 2); return 0; }
/* * Called from the processor-specific init to enable GPIO interrupt support. */ void __init at91_gpio_irq_setup(void) { unsigned pioc, pin; for (pioc = 0, pin = PIN_BASE; pioc < gpio_banks; pioc++) { void __iomem *controller; unsigned id = gpio[pioc].id; unsigned i; clk_enable(gpio[pioc].clock); /* enable PIO controller's clock */ controller = (void __iomem *) AT91_VA_BASE_SYS + gpio[pioc].offset; __raw_writel(~0, controller + PIO_IDR); set_irq_data(id, (void *) pin); set_irq_chip_data(id, controller); for (i = 0; i < 32; i++, pin++) { /* * Can use the "simple" and not "edge" handler since it's * shorter, and the AIC handles interupts sanely. */ set_irq_chip(pin, &gpio_irqchip); set_irq_handler(pin, handle_simple_irq); set_irq_flags(pin, IRQF_VALID); } set_irq_chained_handler(id, gpio_irq_handler); } pr_info("AT91: %d gpio irqs in %d banks\n", pin - PIN_BASE, gpio_banks); }
static void __init ilc_demux_init(struct platform_device *pdev) { struct ilc *ilc = platform_get_drvdata(pdev); int irq; int i; /* Default all interrupts to active high. */ for (i = 0, irq = ilc->first_irq; i < ilc->inputs_num; i++, irq++) { ILC_SET_TRIGMODE(ilc->base, i, ILC_TRIGGERMODE_HIGH); /* SIM: Should we do the masking etc in ilc_irq_demux and * then change this to handle_simple_irq? */ set_irq_chip_and_handler_name(irq, &ilc_chip, handle_level_irq, ilc->name); set_irq_chip_data(irq, ilc); } i = 0; irq = platform_get_irq(pdev, i++); while (irq >= 0) { set_irq_chip_and_handler(irq, &dummy_irq_chip, ilc_irq_demux); set_irq_data(irq, ilc); irq = platform_get_irq(pdev, i++); } return; }
static int __init msm_init_gpio(void) { int i, j = 0; for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) { if (i - FIRST_GPIO_IRQ > msm_gpio_chips[j].chip.end) { if (j < ARRAY_SIZE(msm_gpio_chips) - 1) j++; } set_irq_chip_data(i, &msm_gpio_chips[j]); set_irq_chip(i, &msm_gpio_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { writel(0, msm_gpio_chips[i].regs.int_en); register_gpio_chip(&msm_gpio_chips[i].chip); } set_irq_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); set_irq_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); set_irq_wake(INT_GPIO_GROUP1, 1); set_irq_wake(INT_GPIO_GROUP2, 2); return 0; }
/* * Initialize IRQ setting */ void __init init_se7722_IRQ(void) { int i, irq; ctrl_outw(0, IRQ01_MASK); /* disable all irqs */ ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */ for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) { irq = create_irq(); if (irq < 0) return; se7722_fpga_irq[i] = irq; set_irq_chip_and_handler_name(se7722_fpga_irq[i], &se7722_irq_chip, handle_level_irq, "level"); set_irq_chip_data(se7722_fpga_irq[i], (void *)i); } set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux); set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux); set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); }
/* * The shift value is now the number of bits to shift, not the number of * bits/4. This is to make it easier to read the value directly from the * datasheets. The IPR address is calculated using the ipr_offset table. */ void register_ipr_controller(struct ipr_desc *desc) { int i; desc->chip.mask = disable_ipr_irq; desc->chip.unmask = enable_ipr_irq; desc->chip.mask_ack = disable_ipr_irq; for (i = 0; i < desc->nr_irqs; i++) { struct ipr_data *p = desc->ipr_data + i; struct irq_desc *irq_desc; BUG_ON(p->ipr_idx >= desc->nr_offsets); BUG_ON(!desc->ipr_offsets[p->ipr_idx]); irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id()); if (unlikely(!irq_desc)) { printk(KERN_INFO "can not get irq_desc for %d\n", p->irq); continue; } disable_irq_nosync(p->irq); set_irq_chip_and_handler_name(p->irq, &desc->chip, handle_level_irq, "level"); set_irq_chip_data(p->irq, p); disable_ipr_irq(p->irq); } }
static int __init msm_init_gpio(void) { int i, j = 0; for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) { if (i - FIRST_GPIO_IRQ >= msm_gpio_chips[j].chip.base + msm_gpio_chips[j].chip.ngpio) j++; set_irq_chip_data(i, &msm_gpio_chips[j]); set_irq_chip(i, &msm_gpio_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } set_irq_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); set_irq_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { spin_lock_init(&msm_gpio_chips[i].lock); __raw_writel(0, msm_gpio_chips[i].regs.int_en); gpiochip_add(&msm_gpio_chips[i].chip); } mb(); set_irq_wake(INT_GPIO_GROUP1, 1); set_irq_wake(INT_GPIO_GROUP2, 2); proc_create_data("gdump", 0, NULL, &gdump_proc_fops, NULL); msm_gpio_buf = kzalloc(512, GFP_KERNEL); return 0; }
static int pci_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { get_irq_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_data(virq, h->host_data); set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); return 0; }
static int flipper_pic_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { set_irq_chip_data(virq, h->host_data); irq_to_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); return 0; }
static int pca953x_irq_setup(struct pca953x_chip *chip, const struct i2c_device_id *id) { struct i2c_client *client = chip->client; struct pca953x_platform_data *pdata = client->dev.platform_data; int ret; if (pdata->irq_base && (id->driver_data & PCA953X_INT)) { int lvl; ret = pca953x_read_reg(chip, PCA953X_INPUT, &chip->irq_stat); if (ret) goto out_failed; /* * There is no way to know which GPIO line generated the * interrupt. We have to rely on the previous read for * this purpose. */ chip->irq_stat &= chip->reg_direction; chip->irq_base = pdata->irq_base; mutex_init(&chip->irq_lock); for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { int irq = lvl + chip->irq_base; set_irq_chip_data(irq, chip); set_irq_chip_and_handler(irq, &pca953x_irq_chip, handle_edge_irq); set_irq_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else set_irq_noprobe(irq); #endif } ret = request_threaded_irq(client->irq, NULL, pca953x_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out_failed; } chip->gpio_chip.to_irq = pca953x_gpio_to_irq; } return 0; out_failed: chip->irq_base = 0; return ret; }
int gsc_assign_irq(struct irq_chip *type, void *data) { static int irq = GSC_IRQ_BASE; if (irq > GSC_IRQ_MAX) return NO_IRQ; set_irq_chip_and_handler(irq, type, handle_simple_irq); set_irq_chip_data(irq, data); return irq++; }
int ps3_alloc_irq(enum ps3_cpu_binding cpu, unsigned long outlet, unsigned int *virq) { int result; struct ps3_private *pd; /* This defines the default interrupt distribution policy. */ if (cpu == PS3_BINDING_CPU_ANY) cpu = 0; pd = &per_cpu(ps3_private, cpu); *virq = irq_create_mapping(NULL, outlet); if (*virq == NO_IRQ) { pr_debug("%s:%d: irq_create_mapping failed: outlet %lu\n", __func__, __LINE__, outlet); result = -ENOMEM; goto fail_create; } /* Binds outlet to cpu + virq. */ result = lv1_connect_irq_plug_ext(pd->node, pd->cpu, *virq, outlet, 0); if (result) { pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n", __func__, __LINE__, ps3_result(result)); result = -EPERM; goto fail_connect; } pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, outlet, cpu, *virq); result = set_irq_chip_data(*virq, pd); if (result) { pr_debug("%s:%d: set_irq_chip_data failed\n", __func__, __LINE__); goto fail_set; } return result; fail_set: lv1_disconnect_irq_plug_ext(pd->node, pd->cpu, *virq); fail_connect: irq_dispose_mapping(*virq); fail_create: return result; }
static int adp5588_irq_setup(struct adp5588_gpio *dev) { struct i2c_client *client = dev->client; struct adp5588_gpio_platform_data *pdata = client->dev.platform_data; unsigned gpio; int ret; adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC); adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */ adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */ dev->irq_base = pdata->irq_base; mutex_init(&dev->irq_lock); for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) { int irq = gpio + dev->irq_base; set_irq_chip_data(irq, dev); set_irq_chip_and_handler(irq, &adp5588_irq_chip, handle_level_irq); set_irq_nested_thread(irq, 1); #ifdef CONFIG_ARM /* * ARM needs us to explicitly flag the IRQ as VALID, * once we do so, it will also set the noprobe. */ set_irq_flags(irq, IRQF_VALID); #else set_irq_noprobe(irq); #endif } ret = request_threaded_irq(client->irq, NULL, adp5588_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), dev); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out; } dev->gpio_chip.to_irq = adp5588_gpio_to_irq; adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT); return 0; out: dev->irq_base = 0; return ret; }
void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start) { unsigned int max_irq, i; u32 cpumask = 1 << smp_processor_id(); if (gic_nr >= MAX_GIC_NR) BUG(); cpumask |= cpumask << 8; cpumask |= cpumask << 16; gic_data[gic_nr].dist_base = base; gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31; writel(0, base + GIC_DIST_CTRL); max_irq = readl(base + GIC_DIST_CTR) & 0x1f; max_irq = (max_irq + 1) * 32; if (max_irq > 1020) max_irq = 1020; for (i = 32; i < max_irq; i += 16) writel(0, base + GIC_DIST_CONFIG + i * 4 / 16); for (i = 32; i < max_irq; i += 4) writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); for (i = 0; i < max_irq; i += 4) writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); for (i = 0; i < max_irq; i += 32) writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); for (i = irq_start; i < NR_IRQS && i < gic_data[gic_nr].irq_offset + max_irq; i++) { set_irq_chip(i, &gic_chip); set_irq_chip_data(i, &gic_data[gic_nr]); set_irq_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } writel(1, base + GIC_DIST_CTRL); }
static int max732x_irq_setup(struct max732x_chip *chip, const struct i2c_device_id *id) { struct i2c_client *client = chip->client; struct max732x_platform_data *pdata = client->dev.platform_data; int has_irq = max732x_features[id->driver_data] >> 32; int ret; if (pdata->irq_base && has_irq != INT_NONE) { int lvl; chip->irq_base = pdata->irq_base; chip->irq_features = has_irq; mutex_init(&chip->irq_lock); for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { int irq = lvl + chip->irq_base; if (!(chip->dir_input & (1 << lvl))) continue; set_irq_chip_data(irq, chip); set_irq_chip_and_handler(irq, &max732x_irq_chip, handle_edge_irq); set_irq_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else set_irq_noprobe(irq); #endif } ret = request_threaded_irq(client->irq, NULL, max732x_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out_failed; } chip->gpio_chip.to_irq = max732x_gpio_to_irq; } return 0; out_failed: chip->irq_base = 0; return ret; }
int ps3_virq_destroy(unsigned int virq) { const struct ps3_private *pd = get_irq_chip_data(virq); pr_debug("%s:%d: node %lu, cpu %d, virq %u\n", __func__, __LINE__, pd->node, pd->cpu, virq); set_irq_chip_data(virq, NULL); irq_dispose_mapping(virq); pr_debug("%s:%d <-\n", __func__, __LINE__); return 0; }
int ps3_virq_destroy(unsigned int virq) { const struct ps3_private *pd = get_irq_chip_data(virq); pr_debug("%s:%d: ppe_id %lu, thread_id %lu, virq %u\n", __func__, __LINE__, pd->ppe_id, pd->thread_id, virq); set_irq_chip_data(virq, NULL); irq_dispose_mapping(virq); pr_debug("%s:%d <-\n", __func__, __LINE__); return 0; }
static void tc3589x_irq_remove(struct tc3589x *tc3589x) { int base = tc3589x->irq_base; int irq; for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif set_irq_chip_and_handler(irq, NULL, NULL); set_irq_chip_data(irq, NULL); } }
void __init samsung_irq_gpio_add(struct s3c_gpio_chip *chip) { int irq, i; chip->chip.to_irq = samsung_irq_gpio_to_irq; for (i = 0; i < chip->chip.ngpio; i++) { irq = IRQ_GPIO_GROUP(chip->group) + i; set_irq_chip(irq, &samsung_irq_gpio); set_irq_chip_data(irq, chip); set_irq_handler(irq, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } }
static int media5200_irq_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { struct irq_desc *desc = irq_to_desc(virq); pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); set_irq_chip_data(virq, &media5200_irq); set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL; return 0; }
static int xilinx_intc_map(struct irq_host *h, unsigned int virq, irq_hw_number_t irq) { set_irq_chip_data(virq, h->host_data); if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip, handle_level_irq); } else { set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip, handle_edge_irq); } return 0; }
int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) { if (irq_desc[irq].action) return -EBUSY; if (irq_desc[irq].chip != &cpu_interrupt_type) return -EBUSY; /* for iosapic interrupts */ if (type) { set_irq_chip_and_handler(irq, type, handle_percpu_irq); set_irq_chip_data(irq, data); cpu_unmask_irq(irq); } return 0; }
static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip) { unsigned int first_irq; int i; first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); for (i = first_irq; i < first_irq + NMK_GPIO_PER_CHIP; i++) { set_irq_chip(i, &nmk_gpio_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); set_irq_chip_data(i, nmk_chip); } set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); set_irq_data(nmk_chip->parent_irq, nmk_chip); return 0; }
static void tc6393xb_detach_irq(struct platform_device *dev) { struct tc6393xb *tc6393xb = platform_get_drvdata(dev); unsigned int irq, irq_base; set_irq_chained_handler(tc6393xb->irq, NULL); set_irq_data(tc6393xb->irq, NULL); irq_base = tc6393xb->irq_base; for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { set_irq_flags(irq, 0); set_irq_chip(irq, NULL); set_irq_chip_data(irq, NULL); } }
static void init_mux_irq(struct icu_mux_irq_chip_data *chip_data, int mux_start, int count) { int irq; u32 r; /* maks all the irqs*/ r = __raw_readl(chip_data->mask) | ((1 << count) - 1); __raw_writel(r, chip_data->mask); for (irq = mux_start; count > 0; irq++, count--) { set_irq_chip(irq, &icu_mux_irq_chip); set_irq_chip_data(irq, chip_data); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); set_irq_handler(irq, handle_level_irq); } }
static void t7l66xb_detach_irq(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned int irq, irq_base; irq_base = t7l66xb->irq_base; set_irq_chained_handler(t7l66xb->irq, NULL); set_irq_data(t7l66xb->irq, NULL); for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif set_irq_chip(irq, NULL); set_irq_chip_data(irq, NULL); } }