int wcd9xxx_spmi_free_irq(int irq, void *priv) { devm_free_irq(&map.spmi[BIT_BYTE(irq)]->dev, map.linuxirq[irq], priv); map.mask[BIT_BYTE(irq)] |= BYTE_BIT_MASK(irq); return 0; }
int wcd9xxx_spmi_request_irq(int irq, irq_handler_t handler, const char *name, void *priv) { int rc; map.linuxirq[irq] = spmi_get_irq_byname(map.spmi[BIT_BYTE(irq)], NULL, irq_names[irq]); rc = devm_request_threaded_irq(&map.spmi[BIT_BYTE(irq)]->dev, map.linuxirq[irq], NULL, wcd9xxx_spmi_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, priv); if (rc < 0) { dev_err(&map.spmi[BIT_BYTE(irq)]->dev, "Can't request %d IRQ\n", irq); return rc; } dev_dbg(&map.spmi[BIT_BYTE(irq)]->dev, "irq %d linuxIRQ: %d\n", irq, map.linuxirq[irq]); map.mask[BIT_BYTE(irq)] &= ~BYTE_BIT_MASK(irq); map.handler[irq] = handler; enable_irq_wake(map.linuxirq[irq]); return 0; }
static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res, struct intr_data *irqdata) { int irqbit = irqdata->intr_num; if (!wcd9xxx_res->codec_reg_write) { pr_err("%s: codec read/write callback not defined\n", __func__); return; } if (irqdata->clear_first) { wcd9xxx_nested_irq_lock(wcd9xxx_res); wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit)); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_MODE, 0x02); handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit)); wcd9xxx_nested_irq_unlock(wcd9xxx_res); } else { wcd9xxx_nested_irq_lock(wcd9xxx_res); handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit)); wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit)); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_MODE, 0x02); wcd9xxx_nested_irq_unlock(wcd9xxx_res); } }
void wcd9xxx_spmi_disable_irq(int irq) { pr_debug("%s: irqno =%d\n", __func__, irq); if ((irq >= 0) && (irq <= 7)) { snd_soc_update_bits(map.codec, MSM8X16_WCD_A_DIGITAL_INT_EN_SET, (0x01 << (irq)), 0x00); snd_soc_update_bits(map.codec, MSM8X16_WCD_A_DIGITAL_INT_EN_CLR, (0x01 << irq), (0x01 << irq)); } if ((irq > 7) && (irq <= 15)) { snd_soc_update_bits(map.codec, MSM8X16_WCD_A_ANALOG_INT_EN_SET, (0x01 << (irq - 8)), 0x00); snd_soc_update_bits(map.codec, MSM8X16_WCD_A_ANALOG_INT_EN_CLR, (0x01 << (irq - 8)), (0x01 << (irq - 8))); } if (map.mask[BIT_BYTE(irq)] & (BYTE_BIT_MASK(irq))) return; map.mask[BIT_BYTE(irq)] |= (BYTE_BIT_MASK(irq)); disable_irq_nosync(map.linuxirq[irq]); }
static void wcd9xxx_irq_enable(struct irq_data *data) { struct wcd9xxx_core_resource *wcd9xxx_res = irq_data_get_irq_chip_data(data); int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq); if (BIT_BYTE(wcd9xxx_irq) < WCD9XXX_MAX_IRQ_REGS) wcd9xxx_res->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] &= ~(BYTE_BIT_MASK(wcd9xxx_irq)); }
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data) { int ret; int i; struct wcd9xxx *wcd9xxx = data; int num_irq_regs = wcd9xxx_num_irq_regs(wcd9xxx); u8 status[num_irq_regs]; if (unlikely(wcd9xxx_lock_sleep(wcd9xxx) == false)) { dev_err(wcd9xxx->dev, "Failed to hold suspend\n"); return IRQ_NONE; } ret = wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_INTR_STATUS0, num_irq_regs, status); if (ret < 0) { dev_err(wcd9xxx->dev, "Failed to read interrupt status: %d\n", ret); dev_err(wcd9xxx->dev, "Disable irq %d\n", wcd9xxx->irq); disable_irq_wake(wcd9xxx->irq); disable_irq_nosync(wcd9xxx->irq); wcd9xxx_unlock_sleep(wcd9xxx); return IRQ_NONE; } /* Apply masking */ for (i = 0; i < num_irq_regs; i++) status[i] &= ~wcd9xxx->irq_masks_cur[i]; /* Find out which interrupt was triggered and call that interrupt's * handler function */ if (status[BIT_BYTE(WCD9XXX_IRQ_SLIMBUS)] & BYTE_BIT_MASK(WCD9XXX_IRQ_SLIMBUS)) wcd9xxx_irq_dispatch(wcd9xxx, WCD9XXX_IRQ_SLIMBUS); /* Since codec has only one hardware irq line which is shared by * codec's different internal interrupts, so it's possible master irq * handler dispatches multiple nested irq handlers after breaking * order. Dispatch MBHC interrupts order to follow MBHC state * machine's order */ for (i = WCD9XXX_IRQ_MBHC_INSERTION; i >= WCD9XXX_IRQ_MBHC_REMOVAL; i--) { if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) wcd9xxx_irq_dispatch(wcd9xxx, i); } for (i = WCD9XXX_IRQ_BG_PRECHARGE; i < wcd9xxx->num_irqs; i++) { if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) wcd9xxx_irq_dispatch(wcd9xxx, i); } wcd9xxx_unlock_sleep(wcd9xxx); return IRQ_HANDLED; }
int wcd9xxx_spmi_request_irq(int irq, irq_handler_t handler, const char *name, void *priv) { int rc; #ifdef VENDOR_EDIT //[email protected], 2015/07/01, Add for system not reume when in sleep mode unsigned long irq_flags; if (strcmp(name, "mbhc sw intr")) { irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT; } else { irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_NO_SUSPEND; } #endif /* VENDOR_EDIT */ map.linuxirq[irq] = spmi_get_irq_byname(map.spmi[BIT_BYTE(irq)], NULL, irq_names[irq]); #ifndef VENDOR_EDIT //[email protected], 2015/07/01, Modify for system not reume when in sleep mode /* rc = devm_request_threaded_irq(&map.spmi[BIT_BYTE(irq)]->dev, map.linuxirq[irq], NULL, wcd9xxx_spmi_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, priv); */ #else /* VENDOR_EDIT */ rc = devm_request_threaded_irq(&map.spmi[BIT_BYTE(irq)]->dev, map.linuxirq[irq], NULL, wcd9xxx_spmi_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, priv); #endif /* VENDOR_EDIT */ if (rc < 0) { dev_err(&map.spmi[BIT_BYTE(irq)]->dev, "Can't request %d IRQ\n", irq); return rc; } dev_dbg(&map.spmi[BIT_BYTE(irq)]->dev, "irq %d linuxIRQ: %d\n", irq, map.linuxirq[irq]); map.mask[BIT_BYTE(irq)] &= ~BYTE_BIT_MASK(irq); map.handler[irq] = handler; enable_irq_wake(map.linuxirq[irq]); return 0; }
bool BitSet_isSet(const struct BitSet *bs, unsigned int bitPos) { assert(bitPos < bs->size); return !! (bs->bits[BIT_BYTE(bitPos)] & (1 << BIT_BIT(bitPos))); }
void BitSet_clear(struct BitSet *bs, unsigned int bitPos) { assert(bitPos < bs->size); bs->bits[BIT_BYTE(bitPos)] &= ~(1 << BIT_BIT(bitPos)); }
static void wcd9xxx_irq_disable(struct irq_data *data) { struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data); int wcd9xxx_irq = virq_to_phyirq(wcd9xxx, data->irq); wcd9xxx->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] |= BYTE_BIT_MASK(wcd9xxx_irq); }
static void wcd9xxx_irq_dispatch(struct wcd9xxx *wcd9xxx, int irqbit) { if ((irqbit <= WCD9XXX_IRQ_MBHC_INSERTION) && (irqbit >= WCD9XXX_IRQ_MBHC_REMOVAL)) { wcd9xxx_nested_irq_lock(wcd9xxx); wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit)); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_MODE, 0x02); handle_nested_irq(phyirq_to_virq(wcd9xxx, irqbit)); wcd9xxx_nested_irq_unlock(wcd9xxx); } else { wcd9xxx_nested_irq_lock(wcd9xxx); handle_nested_irq(phyirq_to_virq(wcd9xxx, irqbit)); wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit)); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_MODE, 0x02); wcd9xxx_nested_irq_unlock(wcd9xxx); } }
int wcd9xxx_spmi_irq_init(void) { int i = 0; for (; i < MAX_NUM_IRQS; i++) map.mask[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); mutex_init(&map.pm_lock); map.wlock_holders = 0; map.pm_state = WCD9XXX_PM_SLEEPABLE; init_waitqueue_head(&map.pm_wq); pm_qos_add_request(&map.pm_qos_req, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); return 0; }
static void le_bitmap_set(unsigned long *map, unsigned int start, int len) { u8 *p = ((u8 *)map) + BIT_BYTE(start); const unsigned int size = start + len; int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); while (len - bits_to_set >= 0) { *p |= mask_to_set; len -= bits_to_set; bits_to_set = BITS_PER_BYTE; mask_to_set = ~0; p++; } if (len) { mask_to_set &= BITMAP_LAST_BYTE_MASK(size); *p |= mask_to_set; } }
static irqreturn_t wcd9xxx_spmi_irq_handler(int linux_irq, void *data) { int irq, i, j; unsigned long status[NUM_IRQ_REGS] = {0}; printk("%s\n", __func__); if (unlikely(wcd9xxx_spmi_lock_sleep() == false)) { pr_err("Failed to hold suspend\n"); return IRQ_NONE; } irq = get_irq_bit(linux_irq); if (irq == MAX_NUM_IRQS) return IRQ_HANDLED; status[BIT_BYTE(irq)] |= BYTE_BIT_MASK(irq); for (i = 0; i < NUM_IRQ_REGS; i++) { status[i] |= snd_soc_read(map.codec, BIT_BYTE(irq) * 0x100 + MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS); status[i] &= ~map.mask[i]; } for (i = 0; i < MAX_NUM_IRQS; i++) { j = get_order_irq(i); if ((status[BIT_BYTE(j)] & BYTE_BIT_MASK(j)) && ((map.handled[BIT_BYTE(j)] & BYTE_BIT_MASK(j)) == 0)) { map.handler[j](irq, data); map.handled[BIT_BYTE(j)] |= BYTE_BIT_MASK(j); } } map.handled[BIT_BYTE(irq)] &= ~BYTE_BIT_MASK(irq); wcd9xxx_spmi_unlock_sleep(); return IRQ_HANDLED; }
int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res) { int i, ret; u8 irq_level[wcd9xxx_res->num_irq_regs]; mutex_init(&wcd9xxx_res->irq_lock); mutex_init(&wcd9xxx_res->nested_irq_lock); wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res); if (!wcd9xxx_res->irq) { pr_warn("%s: irq driver is not yet initialized\n", __func__); mutex_destroy(&wcd9xxx_res->irq_lock); mutex_destroy(&wcd9xxx_res->nested_irq_lock); return -EPROBE_DEFER; } pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq); ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res); if (ret) { pr_err("%s: Failed to setup downstream IRQ\n", __func__); wcd9xxx_irq_put_upstream_irq(wcd9xxx_res); mutex_destroy(&wcd9xxx_res->irq_lock); mutex_destroy(&wcd9xxx_res->nested_irq_lock); return ret; } wcd9xxx_res->irq_level_high[0] = true; memset(irq_level, 0, wcd9xxx_res->num_irq_regs); for (i = 0; i < wcd9xxx_res->num_irqs; i++) { wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); irq_level[BIT_BYTE(i)] |= wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE); } if (!wcd9xxx_res->codec_reg_write) { dev_err(wcd9xxx_res->dev, "%s: Codec Register write callback not defined\n", __func__); ret = -EINVAL; goto fail_irq_init; } for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) { wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_LEVEL0 + i, irq_level[i]); wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_MASK0 + i, wcd9xxx_res->irq_masks_cur[i]); } ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "wcd9xxx", wcd9xxx_res); if (ret != 0) dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n", wcd9xxx_res->irq, ret); else { ret = enable_irq_wake(wcd9xxx_res->irq); if (ret) dev_err(wcd9xxx_res->dev, "Failed to set wake interrupt on IRQ %d: %d\n", wcd9xxx_res->irq, ret); if (ret) free_irq(wcd9xxx_res->irq, wcd9xxx_res); } if (ret) goto fail_irq_init; return ret; fail_irq_init: dev_err(wcd9xxx_res->dev, "%s: Failed to init wcd9xxx irq\n", __func__); wcd9xxx_irq_put_upstream_irq(wcd9xxx_res); mutex_destroy(&wcd9xxx_res->irq_lock); mutex_destroy(&wcd9xxx_res->nested_irq_lock); return ret; }
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data) { int ret; int i; struct intr_data irqdata; char linebuf[128]; static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1); struct wcd9xxx_core_resource *wcd9xxx_res = data; int num_irq_regs = wcd9xxx_res->num_irq_regs; u8 status[num_irq_regs], status1[num_irq_regs]; if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) { dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n"); return IRQ_NONE; } if (!wcd9xxx_res->codec_bulk_read) { dev_err(wcd9xxx_res->dev, "%s: Codec Bulk Register read callback not supplied\n", __func__); goto err_disable_irq; } ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res, WCD9XXX_A_INTR_STATUS0, num_irq_regs, status); if (ret < 0) { dev_err(wcd9xxx_res->dev, "Failed to read interrupt status: %d\n", ret); goto err_disable_irq; } for (i = 0; i < num_irq_regs; i++) status[i] &= ~wcd9xxx_res->irq_masks_cur[i]; memcpy(status1, status, sizeof(status1)); for (i = 0; i < wcd9xxx_res->intr_table_size; i++) { irqdata = wcd9xxx_res->intr_table[i]; if (status[BIT_BYTE(irqdata.intr_num)] & BYTE_BIT_MASK(irqdata.intr_num)) { wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata); status1[BIT_BYTE(irqdata.intr_num)] &= ~BYTE_BIT_MASK(irqdata.intr_num); } } if (unlikely(!memcmp(status, status1, sizeof(status)))) { if (__ratelimit(&ratelimit)) { pr_warn("%s: Unhandled irq found\n", __func__); hex_dump_to_buffer(status, sizeof(status), 16, 1, linebuf, sizeof(linebuf), false); pr_warn("%s: status0 : %s\n", __func__, linebuf); hex_dump_to_buffer(status1, sizeof(status1), 16, 1, linebuf, sizeof(linebuf), false); pr_warn("%s: status1 : %s\n", __func__, linebuf); } memset(status, 0xff, num_irq_regs); wcd9xxx_res->codec_bulk_write(wcd9xxx_res, WCD9XXX_A_INTR_CLEAR0, num_irq_regs, status); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_MODE, 0x02); } wcd9xxx_unlock_sleep(wcd9xxx_res); return IRQ_HANDLED; err_disable_irq: dev_err(wcd9xxx_res->dev, "Disable irq %d\n", wcd9xxx_res->irq); disable_irq_wake(wcd9xxx_res->irq); disable_irq_nosync(wcd9xxx_res->irq); wcd9xxx_unlock_sleep(wcd9xxx_res); return IRQ_NONE; }
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data) { int ret; int i; struct intr_data irqdata; char linebuf[128]; static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1); struct wcd9xxx_core_resource *wcd9xxx_res = data; int num_irq_regs = wcd9xxx_res->num_irq_regs; u8 status[num_irq_regs], status1[num_irq_regs]; if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) { dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n"); return IRQ_NONE; } if (!wcd9xxx_res->codec_bulk_read) { dev_err(wcd9xxx_res->dev, "%s: Codec Bulk Register read callback not supplied\n", __func__); goto err_disable_irq; } ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res, WCD9XXX_A_INTR_STATUS0, num_irq_regs, status); if (ret < 0) { dev_err(wcd9xxx_res->dev, "Failed to read interrupt status: %d\n", ret); goto err_disable_irq; } /* Apply masking */ for (i = 0; i < num_irq_regs; i++) status[i] &= ~wcd9xxx_res->irq_masks_cur[i]; memcpy(status1, status, sizeof(status1)); /* Find out which interrupt was triggered and call that interrupt's * handler function * * Since codec has only one hardware irq line which is shared by * codec's different internal interrupts, so it's possible master irq * handler dispatches multiple nested irq handlers after breaking * order. Dispatch interrupts in the order that is maintained by * the interrupt table. */ for (i = 0; i < wcd9xxx_res->intr_table_size; i++) { irqdata = wcd9xxx_res->intr_table[i]; if (status[BIT_BYTE(irqdata.intr_num)] & BYTE_BIT_MASK(irqdata.intr_num)) { wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata); status1[BIT_BYTE(irqdata.intr_num)] &= ~BYTE_BIT_MASK(irqdata.intr_num); } } /* * As a failsafe if unhandled irq is found, clear it to prevent * interrupt storm. * Note that we can say there was an unhandled irq only when no irq * handled by nested irq handler since Taiko supports qdsp as irqs' * destination for few irqs. Therefore driver shouldn't clear pending * irqs when few handled while few others not. */ if (unlikely(!memcmp(status, status1, sizeof(status)))) { if (__ratelimit(&ratelimit)) { pr_warn("%s: Unhandled irq found\n", __func__); hex_dump_to_buffer(status, sizeof(status), 16, 1, linebuf, sizeof(linebuf), false); pr_warn("%s: status0 : %s\n", __func__, linebuf); hex_dump_to_buffer(status1, sizeof(status1), 16, 1, linebuf, sizeof(linebuf), false); pr_warn("%s: status1 : %s\n", __func__, linebuf); } memset(status, 0xff, num_irq_regs); ret = wcd9xxx_res->codec_bulk_write(wcd9xxx_res, WCD9XXX_A_INTR_CLEAR0, num_irq_regs, status); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_MODE, 0x02); } wcd9xxx_unlock_sleep(wcd9xxx_res); return IRQ_HANDLED; err_disable_irq: dev_err(wcd9xxx_res->dev, "Disable irq %d\n", wcd9xxx_res->irq); disable_irq_wake(wcd9xxx_res->irq); disable_irq_nosync(wcd9xxx_res->irq); wcd9xxx_unlock_sleep(wcd9xxx_res); return IRQ_NONE; }
int wcd9xxx_irq_init(struct wcd9xxx *wcd9xxx) { int i, ret; u8 irq_level[wcd9xxx_num_irq_regs(wcd9xxx)]; mutex_init(&wcd9xxx->irq_lock); mutex_init(&wcd9xxx->nested_irq_lock); wcd9xxx->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx); if (!wcd9xxx->irq) { pr_warn("%s: irq driver is not yet initialized\n", __func__); mutex_destroy(&wcd9xxx->irq_lock); mutex_destroy(&wcd9xxx->nested_irq_lock); return -EPROBE_DEFER; } pr_debug("%s: probed irq %d\n", __func__, wcd9xxx->irq); /* Setup downstream IRQs */ ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx); if (ret) { pr_err("%s: Failed to setup downstream IRQ\n", __func__); wcd9xxx_irq_put_upstream_irq(wcd9xxx); mutex_destroy(&wcd9xxx->irq_lock); mutex_destroy(&wcd9xxx->nested_irq_lock); return ret; } /* All other wcd9xxx interrupts are edge triggered */ wcd9xxx->irq_level_high[0] = true; /* mask all the interrupts */ memset(irq_level, 0, wcd9xxx_num_irq_regs(wcd9xxx)); for (i = 0; i < wcd9xxx->num_irqs; i++) { wcd9xxx->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); wcd9xxx->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); irq_level[BIT_BYTE(i)] |= wcd9xxx->irq_level_high[i] << (i % BITS_PER_BYTE); } for (i = 0; i < wcd9xxx_num_irq_regs(wcd9xxx); i++) { /* Initialize interrupt mask and level registers */ wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_LEVEL0 + i, irq_level[i]); wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_MASK0 + i, wcd9xxx->irq_masks_cur[i]); } ret = request_threaded_irq(wcd9xxx->irq, NULL, wcd9xxx_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "wcd9xxx", wcd9xxx); if (ret != 0) dev_err(wcd9xxx->dev, "Failed to request IRQ %d: %d\n", wcd9xxx->irq, ret); else { ret = enable_irq_wake(wcd9xxx->irq); if (ret == 0) { ret = device_init_wakeup(wcd9xxx->dev, 1); if (ret) { dev_err(wcd9xxx->dev, "Failed to init device" "wakeup : %d\n", ret); disable_irq_wake(wcd9xxx->irq); } } else dev_err(wcd9xxx->dev, "Failed to set wake interrupt on" " IRQ %d: %d\n", wcd9xxx->irq, ret); if (ret) free_irq(wcd9xxx->irq, wcd9xxx); } if (ret) { pr_err("%s: Failed to init wcd9xxx irq\n", __func__); wcd9xxx_irq_put_upstream_irq(wcd9xxx); mutex_destroy(&wcd9xxx->irq_lock); mutex_destroy(&wcd9xxx->nested_irq_lock); } return ret; }