static irqreturn_t vpe_parse_irq(int irq_num, void *data) { if (!vpe_ctrl || !vpe_ctrl->vpebase) return IRQ_HANDLED; vpe_ctrl->irq_status = msm_camera_io_r_mb(vpe_ctrl->vpebase + VPE_INTR_STATUS_OFFSET); msm_camera_io_w_mb(vpe_ctrl->irq_status, vpe_ctrl->vpebase + VPE_INTR_CLEAR_OFFSET); msm_camera_io_w(0, vpe_ctrl->vpebase + VPE_INTR_ENABLE_OFFSET); D("%s: vpe_parse_irq =0x%x.\n", __func__, vpe_ctrl->irq_status); tasklet_schedule(&vpe_tasklet); return IRQ_HANDLED; }
static int32_t msm_cci_get_queue_free_size(struct cci_device *cci_dev, enum cci_i2c_master_t master, enum cci_i2c_queue_t queue) { uint32_t read_val = 0; uint32_t reg_offset = master * 0x200 + queue * 0x100; read_val = msm_camera_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n", __func__, __LINE__, read_val, cci_dev->cci_i2c_queue_info[master][queue].max_queue_size); return (cci_dev-> cci_i2c_queue_info[master][queue].max_queue_size) - read_val; }
static int32_t msm_cci_validate_queue(struct cci_device *cci_dev, uint32_t len, enum cci_i2c_master_t master, enum cci_i2c_queue_t queue) { int32_t rc = 0; uint32_t read_val = 0; uint32_t reg_offset = master * 0x200 + queue * 0x100; read_val = msm_camera_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset); CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n", __func__, __LINE__, read_val, len, cci_dev->cci_i2c_queue_info[master][queue].max_queue_size); if ((read_val + len + 1) > cci_dev-> cci_i2c_queue_info[master][queue].max_queue_size) { uint32_t reg_val = 0; uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8); CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__); msm_camera_io_w_mb(report_val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + reg_offset); read_val++; CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n", __func__, __LINE__, read_val); msm_camera_io_w_mb(read_val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); reg_val = 1 << ((master * 2) + queue); CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__); msm_camera_io_w_mb(reg_val, cci_dev->base + CCI_QUEUE_START_ADDR); CDBG("%s line %d wait_for_completion_timeout\n", __func__, __LINE__); rc = wait_for_completion_timeout(&cci_dev-> cci_master_info[master].reset_complete, CCI_TIMEOUT); if (rc <= 0) { pr_err("%s: wait_for_completion_timeout %d\n", __func__, __LINE__); if (rc == 0) rc = -ETIMEDOUT; msm_cci_flush_queue(cci_dev, master); return rc; } rc = cci_dev->cci_master_info[master].status; if (rc < 0) pr_err("%s failed rc %d\n", __func__, rc); } return rc; }
static irqreturn_t vpe_parse_irq(int irq_num, void *data) { unsigned long flags; uint32_t irq_status = 0; struct vpe_isr_queue_cmd_type *qcmd; CDBG("vpe_parse_irq.\n"); /* read and clear back-to-back. */ irq_status = msm_camera_io_r_mb(vpe_device->vpebase + VPE_INTR_STATUS_OFFSET); msm_camera_io_w_mb(irq_status, vpe_device->vpebase + VPE_INTR_CLEAR_OFFSET); msm_camera_io_w(0, vpe_device->vpebase + VPE_INTR_ENABLE_OFFSET); if (irq_status == 0) { pr_err("%s: irq_status = 0,Something is wrong!\n", __func__); return IRQ_HANDLED; } irq_status &= 0x1; /* apply mask. only interested in bit 0. */ if (irq_status) { qcmd = kzalloc(sizeof(struct vpe_isr_queue_cmd_type), GFP_ATOMIC); if (!qcmd) { pr_err("%s: qcmd malloc failed!\n", __func__); return IRQ_HANDLED; } /* must be 0x1 now. so in bottom half we don't really need to check. */ qcmd->irq_status = irq_status & 0x1; spin_lock_irqsave(&vpe_ctrl->tasklet_lock, flags); list_add_tail(&qcmd->list, &vpe_ctrl->tasklet_q); spin_unlock_irqrestore(&vpe_ctrl->tasklet_lock, flags); tasklet_schedule(&vpe_tasklet); } return IRQ_HANDLED; }