void hw_i2c_setup_slave(HW_I2C_ID id, HW_I2C_ADDRESSING addr_mode, uint16_t addr, hw_i2c_event_cb cb) { struct i2c *i2c = get_i2c(id); hw_i2c_set_mode(id, HW_I2C_MODE_SLAVE); hw_i2c_set_slave_addressing_mode(id, addr_mode); hw_i2c_set_slave_address(id, addr); i2c->event_cb = cb; /* * there's no need for app to specify event callback - if not specified there's no need for * interrupt handler as well */ if (!cb) { return; } /* * need to setup RX threshold as low as possible to have interrupt as soon as possible, * otherwise we'll have overruns easily. */ hw_i2c_set_rx_fifo_threshold(id, 0); hw_i2c_register_int(id, intr_slave_handler, HW_I2C_INT_READ_REQUEST | HW_I2C_INT_RX_FULL | HW_I2C_INT_RX_OVERFLOW | HW_I2C_INT_TX_ABORT); }
static void max77665_irq_sync_unlock(struct irq_data *data) { struct max77665_dev *max77665 = irq_get_chip_data(data->irq); int i; for (i = 0; i < MAX77665_IRQ_GROUP_NR; i++) { u8 mask_reg = max77665_mask_reg[i]; struct i2c_client *i2c = get_i2c(max77665, i); if (mask_reg == MAX77665_REG_INVALID || IS_ERR_OR_NULL(i2c)) continue; max77665->irq_masks_cache[i] = max77665->irq_masks_cur[i]; if (max77665->irq_masks_cur[i] != 0xff) { u8 reg_data; max77665_read_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK, ®_data); reg_data &= ~(1<<i); max77665_write_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK,reg_data); } else { u8 reg_data; max77665_read_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK, ®_data); reg_data |= (1<<i); max77665_write_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK,reg_data); } max77665_write_reg(i2c, max77665_mask_reg[i], max77665->irq_masks_cur[i]); } mutex_unlock(&max77665->irqlock); }
int hw_i2c_read_buffer_async(HW_I2C_ID id, uint8_t *data, uint16_t len, hw_i2c_complete_cb cb, void *cb_data, uint32_t flags) { struct i2c *i2c = get_i2c(id); if (!cb || !data || len == 0) { if (cb) { cb(id, cb_data, 0, false); } return -1; } i2c->rx_state.data = data; i2c->rx_state.len = len; i2c->rx_state.num = 0; i2c->rx_state.rr = 0; i2c->rx_state.cb = cb; i2c->rx_state.cb_data = cb_data; hw_i2c_set_rx_fifo_threshold(id, 0); hw_i2c_reset_int_tx_abort(id); hw_i2c_register_int(id, intr_read_buffer_handler, HW_I2C_INT_TX_EMPTY | HW_I2C_INT_RX_FULL | HW_I2C_INT_TX_ABORT); return (ssize_t)len; }
int hw_i2c_write_buffer_async(HW_I2C_ID id, const uint8_t *data, uint16_t len, hw_i2c_complete_cb cb, void *cb_data, uint32_t flags) { struct i2c *i2c = get_i2c(id); uint16_t mask = HW_I2C_INT_TX_EMPTY | HW_I2C_INT_TX_ABORT; if (!cb || !data || len == 0) { if (cb) { cb(id, cb_data, 0, false); } return -1; } i2c->tx_state.data = data; i2c->tx_state.len = len; i2c->tx_state.num = 0; i2c->tx_state.cb = cb; i2c->tx_state.cb_data = cb_data; i2c->tx_state.flags = flags; hw_i2c_reset_int_tx_abort(id); if (flags & HW_I2C_F_WAIT_FOR_STOP) { hw_i2c_reset_int_stop_detected(id); mask |= HW_I2C_INT_STOP_DETECTED; } /* we want TX_EMPTY as soon as FIFO is not completely full */ hw_i2c_set_tx_fifo_threshold(id, I2C_FIFO_DEPTH - 1); hw_i2c_register_int(id, intr_write_buffer_handler, mask); return 0; }
void hw_i2c_register_int(HW_I2C_ID id, hw_i2c_interrupt_cb cb, uint16_t mask) { struct i2c *i2c = get_i2c(id); i2c->intr_cb = cb; IBA(id)->I2C_INTR_MASK_REG = mask; }
static inline void intr_handler(HW_I2C_ID id, uint16_t mask) { struct i2c *i2c = get_i2c(id); if (i2c->intr_cb) { i2c->intr_cb(id, mask); } }
static void notify_on_dma_write_end_no_stop_cb(void *user_data, uint16 len) { HW_I2C_ID id = (HW_I2C_ID) user_data; struct i2c *i2c = get_i2c(id); /* disable I2C DMA */ IBA(id)->I2C_DMA_CR_REG = 0; dma_tx_reply(id, len == i2c->tx_state.len); }
static void rx_reply(HW_I2C_ID id, bool success) { struct i2c *i2c = get_i2c(id); hw_i2c_unregister_int(id); i2c->rx_state.data = NULL; if (i2c->rx_state.cb) { i2c->rx_state.cb(id, i2c->rx_state.cb_data, i2c->rx_state.num, success); } }
static void notify_on_dma_write_end_cb(void *user_data, uint16 len) { HW_I2C_ID id = (HW_I2C_ID) user_data; struct i2c *i2c = get_i2c(id); /* * store len, to pass to user's cb when STOP/ABORT is detected */ i2c->tx_state.num = len; /* disable I2C DMA */ IBA(id)->I2C_DMA_CR_REG = 0; }
static void hw_i2c_dma_cb(void *user_data, uint16_t len) { HW_I2C_ID id = (HW_I2C_ID) user_data; struct i2c *i2c = get_i2c(id); if (i2c->dma_state.cb) { i2c->dma_state.cb(id, i2c->dma_state.cb_data, len, false); i2c->dma_state.cb = NULL; } /* disable I2C DMA */ IBA(id)->I2C_DMA_CR_REG = 0; }
static void notify_on_dma_read_end_cb(void *user_data, uint16 len) { HW_I2C_ID id = (HW_I2C_ID) user_data; struct i2c *i2c = get_i2c(id); struct rx_state *rxs = &i2c->rx_state; rxs->num = len; /* disable I2C DMA */ IBA(id)->I2C_DMA_CR_REG = 0; dma_rx_reply(id, rxs->num == rxs->len); }
bool hw_i2c_read_buffer(HW_I2C_ID id, uint8_t *data, uint16_t len, hw_i2c_complete_cb cb, void *cb_data) { uint16_t num = 0; uint16_t rr = 0; if (!data) { return false; } if (!cb) { while (num < len) { while (rr < len && hw_i2c_is_tx_fifo_not_full(id)) { hw_i2c_read_byte_trigger(id); rr++; } if (hw_i2c_get_abort_source(id)) { return false; } while (num < len && hw_i2c_get_rx_fifo_level(id)) { data[num] = hw_i2c_read_byte(id); num++; } } while (hw_i2c_is_master_busy(id)); if (hw_i2c_get_abort_source(id)) { return false; } } else { struct i2c *i2c = get_i2c(id); i2c->rx_state.data = data; i2c->rx_state.len = len; i2c->rx_state.num = 0; i2c->rx_state.rr = 0; i2c->rx_state.cb = cb; i2c->rx_state.cb_data = cb_data; hw_i2c_set_rx_fifo_threshold(id, 0); hw_i2c_reset_int_tx_abort(id); hw_i2c_register_int(id, intr_read_buffer_handler, HW_I2C_INT_TX_EMPTY | HW_I2C_INT_RX_FULL | HW_I2C_INT_TX_ABORT); } return true; }
bool hw_i2c_write_buffer(HW_I2C_ID id, const uint8_t *data, uint16_t len, hw_i2c_complete_cb cb, void *cb_data, bool wait_for_stop) { if (!data) { return false; } if (!cb) { while (len--) { while (!hw_i2c_is_tx_fifo_not_full(id)); hw_i2c_write_byte(id, *data); data++; if (hw_i2c_get_abort_source(id)) { return false; } } while (!hw_i2c_is_tx_fifo_empty(id)); while (hw_i2c_is_master_busy(id)); if (hw_i2c_get_abort_source(id)) { return false; } } else { struct i2c *i2c = get_i2c(id); i2c->tx_state.data = data; i2c->tx_state.len = len; i2c->tx_state.num = 0; i2c->tx_state.cb = cb; i2c->tx_state.cb_data = cb_data; i2c->tx_state.flags = wait_for_stop? HW_I2C_F_WAIT_FOR_STOP : HW_I2C_F_NONE; hw_i2c_reset_int_tx_abort(id); if (wait_for_stop) { hw_i2c_reset_int_stop_detected(id); } hw_i2c_register_int(id, intr_write_buffer_handler, HW_I2C_INT_TX_EMPTY | (wait_for_stop ? HW_I2C_INT_STOP_DETECTED : 0) | HW_I2C_INT_TX_ABORT); /* we want TX_EMPTY as soon as FIFO is not completely full */ hw_i2c_set_tx_fifo_threshold(id, I2C_FIFO_DEPTH - 1); } return true; }
static void intr_write_buffer_handler(HW_I2C_ID id, uint16_t mask) { struct i2c *i2c = get_i2c(id); struct tx_state *txs = &i2c->tx_state; if (!txs->data || mask == 0) { return; } if (mask & HW_I2C_INT_TX_ABORT) { tx_reply(id, false); /* clear abort */ hw_i2c_reset_int_tx_abort(id); return; } if (mask & HW_I2C_INT_STOP_DETECTED) { tx_reply(id, txs->num == txs->len); hw_i2c_reset_int_stop_detected(id); return; } if (!(mask & HW_I2C_INT_TX_EMPTY)) { tx_reply(id, false); return; } while (txs->num < txs->len && hw_i2c_is_tx_fifo_not_full(id)) { hw_i2c_write_byte(id, txs->data[txs->num]); txs->num++; } /* * trigger reply when all data were written to TX FIFO and either TX FIFO is empty * (controller will generate STOP condition on bus) or caller requested immediate callback * (caller can continue with another transfer immediately). */ if (txs->num == txs->len) { if (txs->flags & HW_I2C_F_WAIT_FOR_STOP) { hw_i2c_set_int_mask(id, hw_i2c_get_int_mask(id) & ~HW_I2C_INT_TX_EMPTY); } else { tx_reply(id, true); } } }
static void max77693_irq_sync_unlock(struct irq_data *data) { struct max77693_dev *max77693 = irq_get_chip_data(data->irq); int i; for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) { u8 mask_reg = max77693_mask_reg[i]; struct i2c_client *i2c = get_i2c(max77693, i); if (mask_reg == MAX77693_REG_INVALID || IS_ERR_OR_NULL(i2c)) continue; max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i]; max77693_write_reg(i2c, max77693_mask_reg[i], max77693->irq_masks_cur[i]); } mutex_unlock(&max77693->irqlock); }
/* * Interrupt handler used by hw_i2c_prepare_dma_ex() to handle STOP and ABORT for DMA writes */ static void intr_write_buffer_dma_handler(HW_I2C_ID id, uint16_t mask) { struct i2c *i2c = get_i2c(id); struct tx_state *txs = &i2c->tx_state; /* Must provide a valid (> 0) mask */ ASSERT_WARNING(mask != 0); if (mask & HW_I2C_INT_TX_ABORT) { /* disable I2C DMA */ IBA(id)->I2C_DMA_CR_REG = 0; dma_tx_reply(id, false); /* clear abort */ hw_i2c_reset_int_tx_abort(id); return; } if (mask & HW_I2C_INT_STOP_DETECTED) { if (IBA(id)->I2C_DMA_CR_REG != 0) { hw_i2c_reset_int_stop_detected(id); /* * A STOP while DMA is still enabled is caused by a NACK from the slave. * While servicing the STOP_DETECTED interrupt we don't need to call the * reply callback. This will be done when servicing the TX_ABORT interrupt * that will follow. */ return; } dma_tx_reply(id, txs->num == txs->len); hw_i2c_reset_int_stop_detected(id); return; } /* */ }
static void intr_read_buffer_handler(HW_I2C_ID id, uint16_t mask) { struct i2c *i2c = get_i2c(id); struct rx_state *rxs = &i2c->rx_state; if (mask & HW_I2C_INT_TX_ABORT) { rx_reply(id, false); /* clear abort */ hw_i2c_reset_int_tx_abort(id); return; } if (!rxs->data || !(mask & (HW_I2C_INT_RX_FULL | HW_I2C_INT_TX_EMPTY))) { return; } while ((rxs->rr < rxs->len) && hw_i2c_is_tx_fifo_not_full(id)) { rxs->rr++; hw_i2c_read_byte_trigger(id); } while (hw_i2c_get_rx_fifo_level(id) && rxs->num < rxs->len) { rxs->data[rxs->num] = hw_i2c_read_byte(id); rxs->num++; } if (rxs->num == rxs->len) { rx_reply(id, true); return; } if (mask & HW_I2C_INT_TX_EMPTY) { hw_i2c_register_int(id, intr_read_buffer_handler, HW_I2C_INT_RX_FULL | HW_I2C_INT_TX_ABORT); } }
void hw_i2c_init(HW_I2C_ID id, const i2c_config *cfg) { IRQn_Type irq_type = I2C_IRQn; int enable_loop_cnt = 0; if (id == HW_I2C2) { irq_type = I2C2_IRQn; } else if (id != HW_I2C1) { /* Requested ID must be one of HW_I2C1 or HW_I2C2 */ ASSERT_ERROR(0); } struct i2c *i2c = get_i2c(id); memset(i2c, 0, sizeof(*i2c)); GLOBAL_INT_DISABLE(); uint32_t clk_per_reg_local = CRG_PER->CLK_PER_REG; REG_SET_FIELD(CRG_PER, CLK_PER_REG, I2C_CLK_SEL, clk_per_reg_local, 0); REG_SET_FIELD(CRG_PER, CLK_PER_REG, I2C_ENABLE, clk_per_reg_local, 1); CRG_PER->CLK_PER_REG = clk_per_reg_local; GLOBAL_INT_RESTORE(); hw_i2c_disable(id); while (hw_i2c_get_enable_status(id) & I2C_I2C_ENABLE_STATUS_REG_IC_EN_Msk) { hw_cpm_delay_usec(500); enable_loop_cnt++; /* we shouldn't get stuck here, the HW I2C block should eventually be enabled */ ASSERT_ERROR(enable_loop_cnt < I2C_ENABLE_LOOP_LIMIT); } IBA(id)->I2C_INTR_MASK_REG = 0x0000; hw_i2c_configure(id, cfg); NVIC_EnableIRQ(irq_type); }
static void intr_slave_handler(HW_I2C_ID id, uint16_t mask) { struct i2c *i2c = get_i2c(id); hw_i2c_event_cb cb = i2c->event_cb; if (mask & HW_I2C_INT_READ_REQUEST) { if (cb) { cb(id, HW_I2C_EVENT_READ_REQUEST); } hw_i2c_reset_int_read_request(id); } if (mask & HW_I2C_INT_RX_FULL) { if (cb) { cb(id, HW_I2C_EVENT_DATA_READY); } } if (mask & HW_I2C_INT_TX_ABORT) { if (cb) { cb(id, HW_I2C_EVENT_TX_ABORT); } /* clear abort */ hw_i2c_reset_int_tx_abort(id); } if (mask & HW_I2C_INT_RX_OVERFLOW) { if (cb) { cb(id, HW_I2C_EVENT_RX_OVERFLOW); } hw_i2c_reset_int_rx_overflow(id); } }
int max77665_irq_init(struct max77665_dev *max77665) { int i; int cur_irq; int ret; u8 i2c_data; if (!max77665->irq) { dev_warn(max77665->dev, "No interrupt specified.\n"); max77665->irq_base = 0; return 0; } if (!max77665->irq_base) { dev_err(max77665->dev, "No interrupt base specified.\n"); return 0; } mutex_init(&max77665->irqlock); /* Mask individual interrupt sources */ for (i = 0; i < MAX77665_IRQ_GROUP_NR; i++) { struct i2c_client *i2c; if(i == MUIC_INT1) { max77665->irq_masks_cur[i] = 0x00; max77665->irq_masks_cache[i] = 0x00; } else { max77665->irq_masks_cur[i] = 0xff; max77665->irq_masks_cache[i] = 0xff; } i2c = get_i2c(max77665, i); if (IS_ERR_OR_NULL(i2c)) continue; if (max77665_mask_reg[i] == MAX77665_REG_INVALID) continue; if(i == MUIC_INT1) { max77665_write_reg(i2c, max77665_mask_reg[i], 0x00); } else { max77665_write_reg(i2c, max77665_mask_reg[i], 0xff); } } /* Register with genirq */ for (i = 0; i < MAX77665_IRQ_NR; i++) { cur_irq = i + max77665->irq_base; irq_set_chip_data(cur_irq, max77665); irq_set_chip_and_handler(cur_irq, &max77665_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } /* Unmask max77665 interrupt */ ret = max77665_read_reg(max77665->i2c, MAX77665_PMIC_REG_INTSRC_MASK, &i2c_data); if (ret) { dev_err(max77665->dev, "%s: fail to read muic reg\n", __func__); return ret; } i2c_data &= ~(MAX77665_IRQSRC_CHG); /* Unmask charger interrupt */ i2c_data &= ~(MAX77665_IRQSRC_MUIC); /* Unmask muic interrupt */ max77665_write_reg(max77665->i2c, MAX77665_PMIC_REG_INTSRC_MASK, i2c_data); ret = request_threaded_irq(max77665->irq, NULL, max77665_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "max77665-irq", max77665); if (ret) { dev_err(max77665->dev, "Failed to request IRQ %d: %d\n", max77665->irq, ret); return ret; } return 0; }
int max77693_irq_init(struct max77693_dev *max77693) { int i; int cur_irq; int ret; u8 i2c_data; if (!max77693->irq_gpio) { dev_warn(max77693->dev, "No interrupt specified.\n"); max77693->irq_base = 0; return 0; } if (!max77693->irq_base) { dev_err(max77693->dev, "No interrupt base specified.\n"); return 0; } mutex_init(&max77693->irqlock); max77693->irq = gpio_to_irq(max77693->irq_gpio); ret = gpio_request(max77693->irq_gpio, "if_pmic_irq"); if (ret) { dev_err(max77693->dev, "%s: failed requesting gpio %d\n", __func__, max77693->irq_gpio); return ret; } gpio_direction_input(max77693->irq_gpio); gpio_free(max77693->irq_gpio); /* Mask individual interrupt sources */ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) { struct i2c_client *i2c; /* MUIC IRQ 0:MASK 1:NOT MASK */ /* Other IRQ 1:MASK 0:NOT MASK */ if (i >= MUIC_INT1 && i <= MUIC_INT3) { max77693->irq_masks_cur[i] = 0x00; max77693->irq_masks_cache[i] = 0x00; } else { max77693->irq_masks_cur[i] = 0xff; max77693->irq_masks_cache[i] = 0xff; } i2c = get_i2c(max77693, i); if (IS_ERR_OR_NULL(i2c)) continue; if (max77693_mask_reg[i] == MAX77693_REG_INVALID) continue; if (i >= MUIC_INT1 && i <= MUIC_INT3) max77693_write_reg(i2c, max77693_mask_reg[i], 0x00); else max77693_write_reg(i2c, max77693_mask_reg[i], 0xff); } /* Register with genirq */ for (i = 0; i < MAX77693_IRQ_NR; i++) { cur_irq = i + max77693->irq_base; irq_set_chip_data(cur_irq, max77693); irq_set_chip_and_handler(cur_irq, &max77693_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } /* Unmask max77693 interrupt */ ret = max77693_read_reg(max77693->i2c, MAX77693_PMIC_REG_INTSRC_MASK, &i2c_data); if (ret) { dev_err(max77693->dev, "%s: fail to read muic reg\n", __func__); return ret; } i2c_data &= ~(MAX77693_IRQSRC_CHG); /* Unmask charger interrupt */ i2c_data &= ~(MAX77693_IRQSRC_MUIC); /* Unmask muic interrupt */ max77693_write_reg(max77693->i2c, MAX77693_PMIC_REG_INTSRC_MASK, i2c_data); ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max77693-irq", max77693); if (ret) { dev_err(max77693->dev, "Failed to request IRQ %d: %d\n", max77693->irq, ret); return ret; } return 0; }
int max8997_irq_init(struct max8997_dev *max8997) { struct irq_domain *domain; int i; int ret; u8 val; if (!max8997->irq) { dev_warn(max8997->dev, "No interrupt specified.\n"); return 0; } mutex_init(&max8997->irqlock); /* Mask individual interrupt sources */ for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) { struct i2c_client *i2c; max8997->irq_masks_cur[i] = 0xff; max8997->irq_masks_cache[i] = 0xff; i2c = get_i2c(max8997, i); if (IS_ERR_OR_NULL(i2c)) continue; if (max8997_mask_reg[i] == MAX8997_REG_INVALID) continue; max8997_write_reg(i2c, max8997_mask_reg[i], 0xff); } for (i = 0; i < MAX8997_NUM_GPIO; i++) { max8997->gpio_status[i] = (max8997_read_reg(max8997->i2c, MAX8997_REG_GPIOCNTL1 + i, &val) & MAX8997_GPIO_DATA_MASK) ? true : false; } domain = irq_domain_add_linear(NULL, MAX8997_IRQ_NR, &max8997_irq_domain_ops, max8997); if (!domain) { dev_err(max8997->dev, "could not create irq domain\n"); return -ENODEV; } max8997->irq_domain = domain; ret = request_threaded_irq(max8997->irq, NULL, max8997_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max8997-irq", max8997); if (ret) { dev_err(max8997->dev, "Failed to request IRQ %d: %d\n", max8997->irq, ret); return ret; } if (!max8997->ono) return 0; ret = request_threaded_irq(max8997->ono, NULL, max8997_irq_thread, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, "max8997-ono", max8997); if (ret) dev_err(max8997->dev, "Failed to request ono-IRQ %d: %d\n", max8997->ono, ret); return 0; }
int max77828_irq_init(struct max77828_dev *max77828) { int i; int cur_irq; int ret; if (!max77828->irq_gpio) { dev_warn(max77828->dev, "No interrupt specified.\n"); max77828->irq_base = 0; return 0; } if (!max77828->irq_base) { dev_err(max77828->dev, "No interrupt base specified.\n"); return 0; } mutex_init(&max77828->irqlock); max77828->irq = gpio_to_irq(max77828->irq_gpio); pr_info("%s:%s irq=%d, irq->gpio=%d\n", MFD_DEV_NAME, __func__, max77828->irq, max77828->irq_gpio); ret = gpio_request(max77828->irq_gpio, "if_pmic_irq"); if (ret) { dev_err(max77828->dev, "%s: failed requesting gpio %d\n", __func__, max77828->irq_gpio); return ret; } gpio_direction_input(max77828->irq_gpio); gpio_free(max77828->irq_gpio); /* Mask individual interrupt sources */ for (i = 0; i < MAX77828_IRQ_GROUP_NR; i++) { struct i2c_client *i2c; /* MUIC IRQ 0:MASK 1:NOT MASK */ /* Other IRQ 1:MASK 0:NOT MASK */ if (i >= MUIC_INT1 && i <= MUIC_INT3) { max77828->irq_masks_cur[i] = 0x00; max77828->irq_masks_cache[i] = 0x00; } else { max77828->irq_masks_cur[i] = 0xff; max77828->irq_masks_cache[i] = 0xff; } i2c = get_i2c(max77828, i); if (IS_ERR_OR_NULL(i2c)) continue; if (max77828_mask_reg[i] == MAX77828_REG_INVALID) continue; if (i >= MUIC_INT1 && i <= MUIC_INT3) max77828_write_reg(i2c, max77828_mask_reg[i], 0x00); else max77828_write_reg(i2c, max77828_mask_reg[i], 0xff); } /* Register with genirq */ for (i = 0; i < MAX77828_IRQ_NR; i++) { cur_irq = i + max77828->irq_base; irq_set_chip_data(cur_irq, max77828); irq_set_chip_and_handler(cur_irq, &max77828_irq_chip, handle_level_irq); irq_set_nested_thread(cur_irq, 1); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } ret = request_threaded_irq(max77828->irq, NULL, max77828_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "max77828-irq", max77828); if (ret) { dev_err(max77828->dev, "Failed to request IRQ %d: %d\n", max77828->irq, ret); return ret; } return 0; }
void hw_i2c_prepare_dma_ex(HW_I2C_ID id, uint8_t channel, uint16_t *data, uint16_t len, HW_I2C_DMA_TRANSFER type, hw_i2c_complete_cb cb, void *cb_data, bool notify_on_stop) { static volatile uint16_t read_cmd = 0x100; /* must be in RAM for faster access */ DMA_setup dma; struct i2c *i2c = get_i2c(id); /* for sanity so even if channel is set to odd number, we'll use proper pair */ channel &= 0xfe; /* make sure I2C DMA is off so it's not unexpectedly triggered when channels are enabled */ IBA(id)->I2C_DMA_CR_REG = 0; i2c->dma_state.cb = cb; i2c->dma_state.cb_data = cb_data; /* RX channel, not used only when writing data */ if (type != HW_I2C_DMA_TRANSFER_WRITE) { dma.channel_number = channel; dma.bus_width = HW_DMA_BW_BYTE; dma.irq_enable = HW_DMA_IRQ_STATE_ENABLED; dma.irq_nr_of_trans = 0; dma.dreq_mode = HW_DMA_DREQ_TRIGGERED; dma.a_inc = HW_DMA_AINC_FALSE; dma.b_inc = HW_DMA_BINC_TRUE; dma.circular = HW_DMA_MODE_NORMAL; /* * Set DMA priority to highest; see Tx channel setup below for explanation. */ dma.dma_prio = HW_DMA_PRIO_7; dma.dma_idle = HW_DMA_IDLE_INTERRUPTING_MODE; /* Not used by the HW in this case */ dma.dma_init = HW_DMA_INIT_AX_BX_AY_BY; dma.dma_req_mux = id == HW_I2C2 ? HW_DMA_TRIG_I2C2_RXTX : HW_DMA_TRIG_I2C_RXTX; dma.src_address = (uint32) &IBA(id)->I2C_DATA_CMD_REG; dma.dest_address = (uint32_t) data; dma.length = len; dma.callback = notify_on_dma_read_end_cb; i2c->rx_state.num = 0; i2c->rx_state.len = len; dma.user_data = (void *) id; hw_dma_channel_initialization(&dma); hw_dma_channel_enable(channel, HW_DMA_STATE_ENABLED); } /* * TX channel * used also when reading as master since we need to trigger read by writing read command * to TX FIFO */ if (type != HW_I2C_DMA_TRANSFER_SLAVE_READ) { bool is_rx = (type != HW_I2C_DMA_TRANSFER_WRITE); dma.channel_number = channel + 1; dma.bus_width = HW_DMA_BW_HALFWORD; dma.irq_enable = HW_DMA_IRQ_STATE_ENABLED; dma.irq_nr_of_trans = 0; dma.dreq_mode = HW_DMA_DREQ_TRIGGERED; /* for RX no need to increment Ax, we read single value only */ dma.a_inc = is_rx ? HW_DMA_AINC_FALSE : HW_DMA_AINC_TRUE; dma.b_inc = HW_DMA_BINC_FALSE; dma.circular = HW_DMA_MODE_NORMAL; /* * Set DMA priority to highest, to avoid case of bus starvation due to a * higher-priority DMA transaction, which will drain the FIFO and * introduce a STOP bit. * If both I2C and I2C2 are transmitting via DMA, their relative priority * will be defined by the DMA channels they are assigned. * However, the I2C bus frequency is much lower than the frequency that the * DMA controller runs at, so it is not expected that the DMA for I2C will * cause bus starvation to the DMA for I2C2 (and vice versa). */ dma.dma_prio = HW_DMA_PRIO_7; dma.dma_idle = HW_DMA_IDLE_INTERRUPTING_MODE; /* Not used by the HW in this case */ /* * We don't use HW_DMA_INIT_AX_BX_BY because it will lock the bus until * the DMA transaction is finished, which might cause bus starvation to * other peripherals. */ dma.dma_init = HW_DMA_INIT_AX_BX_AY_BY; dma.dma_req_mux = id == HW_I2C2 ? HW_DMA_TRIG_I2C2_RXTX : HW_DMA_TRIG_I2C_RXTX; /* for RX we store read command separately */ dma.src_address = (uint32_t) (is_rx ? &read_cmd : data); dma.dest_address = (uint32) &IBA(id)->I2C_DATA_CMD_REG; dma.length = len; dma.user_data = (void *) id; if (type == HW_I2C_DMA_TRANSFER_WRITE) { uint16_t int_mask = HW_I2C_INT_TX_ABORT; hw_i2c_reset_int_tx_abort(id); i2c->tx_state.num = 0; i2c->tx_state.len = len; if (notify_on_stop) { int_mask |= HW_I2C_INT_STOP_DETECTED; hw_i2c_reset_int_stop_detected(id); dma.callback = notify_on_dma_write_end_cb; /* * install an interrupt handler to detect STOP or ABORT, * which will trigger user's cb */ hw_i2c_register_int(id, intr_write_buffer_dma_handler, int_mask); /* we want TX_EMPTY as soon as FIFO is empty */ hw_i2c_set_tx_fifo_threshold(id, 0); } else { dma.callback = notify_on_dma_write_end_no_stop_cb; /* * install an interrupt handler to detect ABORT, * which will disable I2C DMA, which will trigger user's cb */ hw_i2c_register_int(id, intr_write_buffer_dma_no_stop_handler, int_mask); } } else { /* Rx DMA has been taken care of already */ dma.callback = NULL; } hw_dma_channel_initialization(&dma); hw_dma_channel_enable(channel + 1, HW_DMA_STATE_ENABLED); } /* we can set both, does not matter than one of them won't be used */ IBA(id)->I2C_DMA_TDLR_REG = 2; IBA(id)->I2C_DMA_RDLR_REG = 0; }