void __devinit vmi_time_bsp_init(void) { /* * On APIC systems, we want local timers to fire on each cpu. We do * this by programming LVTT to deliver timer events to the IRQ handler * for IRQ-0, since we can't re-use the APIC local timer handler * without interfering with that code. */ clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); local_irq_disable(); #ifdef CONFIG_SMP /* * XXX handle_percpu_irq only defined for SMP; we need to switch over * to using it, since this is a local interrupt, which each CPU must * handle individually without locking out or dropping simultaneous * local timers on other CPUs. We also don't want to trigger the * quirk workaround code for interrupts which gets invoked from * handle_percpu_irq via eoi, so we use our own IRQ chip. */ set_irq_chip_and_handler_name(0, &vmi_chip, handle_percpu_irq, "lvtt"); #else set_irq_chip_and_handler_name(0, &vmi_chip, handle_edge_irq, "lvtt"); #endif vmi_wiring = VMI_ALARM_WIRED_LVTT; apic_write(APIC_LVTT, vmi_get_timer_vector()); local_irq_enable(); clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); }
/* * Initialize IRQ setting */ void __init init_se7722_IRQ(void) { int i, irq; ctrl_outw(0, IRQ01_MASK); /* disable all irqs */ ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */ for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) { irq = create_irq(); if (irq < 0) return; se7722_fpga_irq[i] = irq; set_irq_chip_and_handler_name(se7722_fpga_irq[i], &se7722_irq_chip, handle_level_irq, "level"); set_irq_chip_data(se7722_fpga_irq[i], (void *)i); } set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux); set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux); set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); }
/* * The shift value is now the number of bits to shift, not the number of * bits/4. This is to make it easier to read the value directly from the * datasheets. The IPR address is calculated using the ipr_offset table. */ void register_ipr_controller(struct ipr_desc *desc) { int i; desc->chip.mask = disable_ipr_irq; desc->chip.unmask = enable_ipr_irq; desc->chip.mask_ack = disable_ipr_irq; for (i = 0; i < desc->nr_irqs; i++) { struct ipr_data *p = desc->ipr_data + i; struct irq_desc *irq_desc; BUG_ON(p->ipr_idx >= desc->nr_offsets); BUG_ON(!desc->ipr_offsets[p->ipr_idx]); irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id()); if (unlikely(!irq_desc)) { printk(KERN_INFO "can not get irq_desc for %d\n", p->irq); continue; } disable_irq_nosync(p->irq); set_irq_chip_and_handler_name(p->irq, &desc->chip, handle_level_irq, "level"); set_irq_chip_data(p->irq, p); disable_ipr_irq(p->irq); } }
static void __init ilc_demux_init(struct platform_device *pdev) { struct ilc *ilc = platform_get_drvdata(pdev); int irq; int i; /* Default all interrupts to active high. */ for (i = 0, irq = ilc->first_irq; i < ilc->inputs_num; i++, irq++) { ILC_SET_TRIGMODE(ilc->base, i, ILC_TRIGGERMODE_HIGH); /* SIM: Should we do the masking etc in ilc_irq_demux and * then change this to handle_simple_irq? */ set_irq_chip_and_handler_name(irq, &ilc_chip, handle_level_irq, ilc->name); set_irq_chip_data(irq, ilc); } i = 0; irq = platform_get_irq(pdev, i++); while (irq >= 0) { set_irq_chip_and_handler(irq, &dummy_irq_chip, ilc_irq_demux); set_irq_data(irq, ilc); irq = platform_get_irq(pdev, i++); } return; }
void emma2rh_irq_init(void) { u32 i; for (i = 0; i < NUM_EMMA2RH_IRQ; i++) set_irq_chip_and_handler_name(EMMA2RH_IRQ_BASE + i, &emma2rh_irq_controller, handle_level_irq, "level"); }
void emma2rh_gpio_irq_init(void) { u32 i; for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++) set_irq_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i, &emma2rh_gpio_irq_controller, handle_edge_irq, "edge"); }
int __init x3proto_gpio_setup(void) { int ilsel; int ret, i; ilsel = ilsel_enable(ILSEL_KEY); if (unlikely(ilsel < 0)) return ilsel; ret = gpiochip_add(&x3proto_gpio_chip); if (unlikely(ret)) goto err_gpio; for (i = 0; i < NR_BASEBOARD_GPIOS; i++) { unsigned long flags; int irq = create_irq(); if (unlikely(irq < 0)) { ret = -EINVAL; goto err_irq; } spin_lock_irqsave(&x3proto_gpio_lock, flags); x3proto_gpio_irq_map[i] = irq; set_irq_chip_and_handler_name(irq, &dummy_irq_chip, handle_simple_irq, "gpio"); spin_unlock_irqrestore(&x3proto_gpio_lock, flags); } pr_info("registering '%s' support, handling GPIOs %u -> %u, " "bound to IRQ %u\n", x3proto_gpio_chip.label, x3proto_gpio_chip.base, x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio, ilsel); set_irq_chained_handler(ilsel, x3proto_gpio_irq_handler); set_irq_wake(ilsel, 1); return 0; err_irq: for (; i >= 0; --i) if (x3proto_gpio_irq_map[i]) destroy_irq(x3proto_gpio_irq_map[i]); ret = gpiochip_remove(&x3proto_gpio_chip); if (unlikely(ret)) pr_err("Failed deregistering GPIO\n"); err_gpio: synchronize_irq(ilsel); ilsel_disable(ILSEL_KEY); return ret; }
/* * Initialize IRQ setting */ void __init init_landisk_IRQ(void) { int i; for (i = 5; i < 14; i++) { disable_irq_nosync(i); set_irq_chip_and_handler_name(i, &landisk_irq_chip, handle_level_irq, "level"); enable_landisk_irq(i); } __raw_writeb(0x00, PA_PWRINT_CLR); }
int arch_setup_dmar_msi(unsigned int irq) { int ret; struct msi_msg msg; ret = msi_compose_msg(NULL, irq, &msg); if (ret < 0) return ret; dmar_msi_write(irq, &msg); set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, "edge"); return 0; }
void __init harp_init_irq(void) { int irq; disable_all_interrupts(); for (irq = 0; irq < NUM_EXTERNAL_IRQS; irq++) { disable_irq_nosync(irq); set_irq_chip_and_handler_name(irq, &harp_chips[irq], handle_level_irq, "level"); disable_harp_irq(irq); } }
void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq) { _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000); /* Reset interrupt controller - initialises all registers to 0 */ MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT); board_bind_eic_interrupt = &msc_bind_eic_interrupt; for (; nirq >= 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { case MSC01_IRQ_EDGE: set_irq_chip_and_handler_name(irqbase + n, &msc_edgeirq_type, handle_edge_irq, "edge"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); else MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); break; case MSC01_IRQ_LEVEL: set_irq_chip_and_handler_name(irqbase+n, &msc_levelirq_type, handle_level_irq, "level"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); else MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); } } irq_base = irqbase; MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */ }
/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware * interrupt (except 128, which is used for system calls), and then tells the * Linux infrastructure that each interrupt is controlled by our level-based * lguest interrupt controller. */ static void __init lguest_init_IRQ(void) { unsigned int i; for (i = 0; i < LGUEST_IRQS; i++) { int vector = FIRST_EXTERNAL_VECTOR + i; if (vector != SYSCALL_VECTOR) { set_intr_gate(vector, interrupt[i]); set_irq_chip_and_handler_name(i, &lguest_irq_controller, handle_level_irq, "level"); } } /* This call is required to set up for 4k stacks, where we have * separate stacks for hard and soft interrupts. */ irq_ctx_init(smp_processor_id()); }
/* * Initialize IRQ setting */ void __init init_se7722_IRQ(void) { int i; ctrl_outw(0, IRQ01_MASK); /* disable all irqs */ ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */ for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) set_irq_chip_and_handler_name(SE7722_FPGA_IRQ_BASE + i, &se7722_irq_chip, handle_level_irq, "level"); set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux); set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux); set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); }
void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq) { unsigned int irq; /* mask & disable & ack all */ __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTCLR); __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR); __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT); wmb(); bcsr_csc_base = csc_start; for (irq = csc_start; irq <= csc_end; irq++) set_irq_chip_and_handler_name(irq, &bcsr_irq_type, handle_level_irq, "level"); set_irq_chained_handler(hook_irq, bcsr_csc_handler); }
static int stm_gpio_irq_init(int port_no) { struct stm_gpio_pin *pin; unsigned int pin_irq; int pin_no; pin = stm_gpio_ports[port_no].pins; pin_irq = stm_gpio_irq_base + (port_no * STM_GPIO_PINS_PER_PORT); for (pin_no = 0; pin_no < STM_GPIO_PINS_PER_PORT; pin_no++) { set_irq_chip_and_handler_name(pin_irq, &stm_gpio_irq_chip, handle_simple_irq, "stm_gpio"); set_irq_chip_data(pin_irq, pin); stm_gpio_irq_chip_type(pin_irq, IRQ_TYPE_LEVEL_HIGH); pin++; pin_irq++; } return 0; }
/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware * interrupt (except 128, which is used for system calls), and then tells the * Linux infrastructure that each interrupt is controlled by our level-based * lguest interrupt controller. */ static void __init lguest_init_IRQ(void) { unsigned int i; for (i = 0; i < LGUEST_IRQS; i++) { int vector = FIRST_EXTERNAL_VECTOR + i; /* Some systems map "vectors" to interrupts weirdly. Lguest has * a straightforward 1 to 1 mapping, so force that here. */ __get_cpu_var(vector_irq)[vector] = i; if (vector != SYSCALL_VECTOR) { set_intr_gate(vector, interrupt[vector]); set_irq_chip_and_handler_name(i, &lguest_irq_controller, handle_level_irq, "level"); } } /* This call is required to set up for 4k stacks, where we have * separate stacks for hard and soft interrupts. */ irq_ctx_init(smp_processor_id()); }
static void __init init_ISA_irqs(void) { int i; init_bsp_APIC(); init_8259A(0); for (i = 0; i < NR_IRQS_LEGACY; i++) { struct irq_desc *desc = irq_to_desc(i); desc->status = IRQ_DISABLED; desc->action = NULL; desc->depth = 1; /* * 16 old-style INTA-cycle interrupts: */ set_irq_chip_and_handler_name(i, &i8259A_chip, handle_level_irq, "XT"); } }
/* * The shift value is now the number of bits to shift, not the number of * bits/4. This is to make it easier to read the value directly from the * datasheets. The IPR address is calculated using the ipr_offset table. */ void register_ipr_controller(struct ipr_desc *desc) { int i; desc->chip.mask = disable_ipr_irq; desc->chip.unmask = enable_ipr_irq; desc->chip.mask_ack = disable_ipr_irq; for (i = 0; i < desc->nr_irqs; i++) { struct ipr_data *p = desc->ipr_data + i; BUG_ON(p->ipr_idx >= desc->nr_offsets); BUG_ON(!desc->ipr_offsets[p->ipr_idx]); disable_irq_nosync(p->irq); set_irq_chip_and_handler_name(p->irq, &desc->chip, handle_level_irq, "level"); set_irq_chip_data(p->irq, p); disable_ipr_irq(p->irq); } }
void __init init_ISA_irqs(void) { int i; #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) init_bsp_APIC(); #endif legacy_pic->init(0); /* * 16 old-style INTA-cycle interrupts: */ for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { struct irq_desc *desc = irq_to_desc(i); desc->status = IRQ_DISABLED; desc->action = NULL; desc->depth = 1; set_irq_chip_and_handler_name(i, &i8259A_chip, handle_level_irq, "XT"); } }
static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, struct pci_dev *pdev, struct msi_desc *entry) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; const struct sparc64_msiq_ops *ops = pbm->msi_ops; struct msi_msg msg; int msi, err; u32 msiqid; *virt_irq_p = virt_irq_alloc(0, 0); err = -ENOMEM; if (!*virt_irq_p) goto out_err; set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq, handle_simple_irq, "MSI"); err = alloc_msi(pbm); if (unlikely(err < 0)) goto out_virt_irq_free; msi = err; msiqid = pick_msiq(pbm); err = ops->msi_setup(pbm, msiqid, msi, (entry->msi_attrib.is_64 ? 1 : 0)); if (err) goto out_msi_free; pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p; if (entry->msi_attrib.is_64) { msg.address_hi = pbm->msi64_start >> 32; msg.address_lo = pbm->msi64_start & 0xffffffff; } else {
void __init init_ISA_irqs (void) { int i; #ifdef CONFIG_X86_LOCAL_APIC init_bsp_APIC(); #endif init_8259A(0); /* * 16 old-style INTA-cycle interrupts: */ for (i = 0; i < 16; i++) { /* first time call this irq_desc */ struct irq_desc *desc = irq_to_desc(i); desc->status = IRQ_DISABLED; desc->action = NULL; desc->depth = 1; set_irq_chip_and_handler_name(i, &i8259A_chip, handle_level_irq, "XT"); } }
void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) { int i; for (i = 0; i < nr_irqs; i++) { unsigned int irq = table[i].irq; if (!irq) irq = table[i].irq = i; /* could the IPR index be mapped, if not we ignore this */ if (!table[i].addr) { table[i].addr = map_ipridx_to_addr(table[i].ipr_idx); if (!table[i].addr) continue; } disable_irq_nosync(irq); set_irq_chip_and_handler_name(irq, &ipr_irq_chip, handle_level_irq, "level"); set_irq_chip_data(irq, &table[i]); enable_ipr_irq(irq); } }
void lguest_setup_irq(unsigned int irq) { irq_to_desc_alloc_node(irq, 0); set_irq_chip_and_handler_name(irq, &lguest_irq_controller, handle_level_irq, "level"); }
void __init arch_init_irq(void) { unsigned int irq; /* Install our interrupt handler, then clear and disable all * CRIME and MACE interrupts. */ crime->imask = 0; crime->hard_int = 0; crime->soft_int = 0; mace->perif.ctrl.istat = 0; mace->perif.ctrl.imask = 0; mips_cpu_irq_init(); for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) { switch (irq) { case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ: set_irq_chip_and_handler_name(irq,&ip32_mace_interrupt, handle_level_irq, "level"); break; case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ: set_irq_chip_and_handler_name(irq, &ip32_macepci_interrupt, handle_level_irq, "level"); break; case CRIME_CPUERR_IRQ: case CRIME_MEMERR_IRQ: set_irq_chip_and_handler_name(irq, &crime_level_interrupt, handle_level_irq, "level"); break; case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ: case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ: case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ: case CRIME_VICE_IRQ: set_irq_chip_and_handler_name(irq, &crime_edge_interrupt, handle_edge_irq, "edge"); break; case MACEISA_PARALLEL_IRQ: case MACEISA_SERIAL1_TDMAPR_IRQ: case MACEISA_SERIAL2_TDMAPR_IRQ: set_irq_chip_and_handler_name(irq, &ip32_maceisa_edge_interrupt, handle_edge_irq, "edge"); break; default: set_irq_chip_and_handler_name(irq, &ip32_maceisa_level_interrupt, handle_level_irq, "level"); break; } } setup_irq(CRIME_MEMERR_IRQ, &memerr_irq); setup_irq(CRIME_CPUERR_IRQ, &cpuerr_irq); #define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) change_c0_status(ST0_IM, ALLINTS); }
void __init init_IRQ(void) { u32 i, j, intr_type; struct device_node *intc = NULL; #ifdef CONFIG_SELFMOD_INTC unsigned int intc_baseaddr = 0; static int arr_func[] = { (int)&get_irq, (int)&intc_enable_or_unmask, (int)&intc_disable_or_mask, (int)&intc_mask_ack, (int)&intc_ack, (int)&intc_end, 0 }; #endif static char *intc_list[] = { "xlnx,xps-intc-1.00.a", "xlnx,opb-intc-1.00.c", "xlnx,opb-intc-1.00.b", "xlnx,opb-intc-1.00.a", NULL }; for (j = 0; intc_list[j] != NULL; j++) { intc = of_find_compatible_node(NULL, NULL, intc_list[j]); if (intc) break; } BUG_ON(!intc); intc_baseaddr = *(int *) of_get_property(intc, "reg", NULL); intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE); nr_irq = *(int *) of_get_property(intc, "xlnx,num-intr-inputs", NULL); intr_type = *(int *) of_get_property(intc, "xlnx,kind-of-intr", NULL); if (intr_type >= (1 << (nr_irq + 1))) printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n"); #ifdef CONFIG_SELFMOD_INTC selfmod_function((int *) arr_func, intc_baseaddr); #endif printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n", intc_list[j], intc_baseaddr, nr_irq, intr_type); /* * Disable all external interrupts until they are * explicity requested. */ out_be32(intc_baseaddr + IER, 0); /* Acknowledge any pending interrupts just in case. */ out_be32(intc_baseaddr + IAR, 0xffffffff); /* Turn on the Master Enable. */ out_be32(intc_baseaddr + MER, MER_HIE | MER_ME); for (i = 0; i < nr_irq; ++i) { if (intr_type & (0x00000001 << i)) { set_irq_chip_and_handler_name(i, &intc_dev, handle_edge_irq, intc_dev.name); irq_desc[i].status &= ~IRQ_LEVEL; } else { set_irq_chip_and_handler_name(i, &intc_dev, handle_level_irq, intc_dev.name); irq_desc[i].status |= IRQ_LEVEL; } } }
static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int irq = platform_get_irq(pdev, 0); struct intel_pmic_gpio_platform_data *pdata = dev->platform_data; struct pmic_gpio *pg; int retval; int i; if (irq < 0) { dev_dbg(dev, "no IRQ line\n"); return -EINVAL; } if (!pdata || !pdata->gpio_base || !pdata->irq_base) { dev_dbg(dev, "incorrect or missing platform data\n"); return -EINVAL; } pg = kzalloc(sizeof(*pg), GFP_KERNEL); if (!pg) return -ENOMEM; dev_set_drvdata(dev, pg); pg->irq = irq; /* setting up SRAM mapping for GPIOINT register */ pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8); if (!pg->gpiointr) { printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__); retval = -EINVAL; goto err2; } pg->irq_base = pdata->irq_base; pg->chip.label = "intel_pmic"; pg->chip.direction_input = pmic_gpio_direction_input; pg->chip.direction_output = pmic_gpio_direction_output; pg->chip.get = pmic_gpio_get; pg->chip.set = pmic_gpio_set; pg->chip.to_irq = pmic_gpio_to_irq; pg->chip.base = pdata->gpio_base; pg->chip.ngpio = NUM_GPIO; pg->chip.can_sleep = 1; pg->chip.dev = dev; INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); spin_lock_init(&pg->irqtypes.lock); pg->chip.dev = dev; retval = gpiochip_add(&pg->chip); if (retval) { printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); goto err; } set_irq_data(pg->irq, pg); set_irq_chained_handler(pg->irq, pmic_irq_handler); for (i = 0; i < 8; i++) { set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, handle_simple_irq, "demux"); set_irq_chip_data(i + pg->irq_base, pg); } return 0; err: iounmap(pg->gpiointr); err2: kfree(pg); return retval; }
static int au1x_ic_settype(unsigned int irq, unsigned int flow_type) { struct irq_chip *chip; unsigned long icr[6]; unsigned int bit, ic; int ret; if (irq >= AU1000_INTC1_INT_BASE) { bit = irq - AU1000_INTC1_INT_BASE; chip = &au1x_ic1_chip; ic = 1; } else { bit = irq - AU1000_INTC0_INT_BASE; chip = &au1x_ic0_chip; ic = 0; } if (bit > 31) return -EINVAL; icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET; icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET; icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET; icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR; icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR; icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR; ret = 0; switch (flow_type) { /* cfgregs 2:1:0 */ case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ au_writel(1 << bit, icr[5]); au_writel(1 << bit, icr[4]); au_writel(1 << bit, icr[0]); set_irq_chip_and_handler_name(irq, chip, handle_edge_irq, "riseedge"); break; case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ au_writel(1 << bit, icr[5]); au_writel(1 << bit, icr[1]); au_writel(1 << bit, icr[3]); set_irq_chip_and_handler_name(irq, chip, handle_edge_irq, "falledge"); break; case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ au_writel(1 << bit, icr[5]); au_writel(1 << bit, icr[1]); au_writel(1 << bit, icr[0]); set_irq_chip_and_handler_name(irq, chip, handle_edge_irq, "bothedge"); break; case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ au_writel(1 << bit, icr[2]); au_writel(1 << bit, icr[4]); au_writel(1 << bit, icr[0]); set_irq_chip_and_handler_name(irq, chip, handle_level_irq, "hilevel"); break; case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ au_writel(1 << bit, icr[2]); au_writel(1 << bit, icr[1]); au_writel(1 << bit, icr[3]); set_irq_chip_and_handler_name(irq, chip, handle_level_irq, "lowlevel"); break; case IRQ_TYPE_NONE: /* 0:0:0 */ au_writel(1 << bit, icr[5]); au_writel(1 << bit, icr[4]); au_writel(1 << bit, icr[3]); /* set at least chip so we can call set_irq_type() on it */ set_irq_chip(irq, chip); break; default: ret = -EINVAL; } au_sync(); return ret; }