Пример #1
0
static void jpeg_watchdog_worker(struct work_struct *work)
{
	struct jpeg_dev *dev;
	struct jpeg_ctx *ctx;
	unsigned long flags;
	struct vb2_buffer *src_vb, *dst_vb;

	printk(KERN_DEBUG "jpeg_watchdog_worker\n");
	dev = container_of(work, struct jpeg_dev, watchdog_work);

	spin_lock_irqsave(&ctx->slock, flags);
	clear_bit(0, &dev->hw_run);
	if (dev->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_dec);

	if (ctx) {
		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
		if (dev->mode == ENCODING)
			v4l2_m2m_job_finish(dev->m2m_dev_enc, ctx->m2m_ctx);
		else
			v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
	} else {
		printk(KERN_ERR "watchdog_ctx is NULL\n");
	}

	spin_unlock_irqrestore(&ctx->slock, flags);
}
Пример #2
0
static int bdisp_ctx_stop_req(struct bdisp_ctx *ctx)
{
	struct bdisp_ctx *curr_ctx;
	struct bdisp_dev *bdisp = ctx->bdisp_dev;
	int ret;

	dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);

	cancel_delayed_work(&bdisp->timeout_work);

	curr_ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
	if (!test_bit(ST_M2M_RUNNING, &bdisp->state) || (curr_ctx != ctx))
		return 0;

	bdisp_ctx_state_lock_set(BDISP_CTX_STOP_REQ, ctx);

	ret = wait_event_timeout(bdisp->irq_queue,
			!bdisp_ctx_state_is_set(BDISP_CTX_STOP_REQ, ctx),
			BDISP_WORK_TIMEOUT);

	if (!ret) {
		dev_err(ctx->bdisp_dev->dev, "%s IRQ timeout\n", __func__);
		return -ETIMEDOUT;
	}

	return 0;
}
Пример #3
0
void gsc_op_timer_handler(unsigned long arg)
{
	struct gsc_dev *gsc = (struct gsc_dev *)arg;
	struct gsc_ctx *ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
	struct vb2_buffer *src_vb, *dst_vb;
#ifdef GSC_PERF
	gsc->end_time = sched_clock();
	gsc_err("expire time: %llu\n", gsc->end_time - gsc->start_time);
#endif
	gsc_dump_registers(gsc);
	exynos_iommu_dump_status(&gsc->pdev->dev);

	clear_bit(ST_M2M_RUN, &gsc->state);
	pm_runtime_put(&gsc->pdev->dev);
	gsc->runtime_put_cnt++;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}
	gsc_err("GSCALER[%d] interrupt hasn't been triggered", gsc->id);
	gsc_err("erro ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
}
Пример #4
0
static irqreturn_t fimc_irq_handler(int irq, void *priv)
{
	struct fimc_dev *fimc = priv;
	struct fimc_ctx *ctx;

	fimc_hw_clear_irq(fimc);

	spin_lock(&fimc->slock);

	if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
		if (test_and_clear_bit(ST_M2M_SUSPENDING, &fimc->state)) {
			set_bit(ST_M2M_SUSPENDED, &fimc->state);
			wake_up(&fimc->irq_queue);
			goto out;
		}
		ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
		if (ctx != NULL) {
			spin_unlock(&fimc->slock);
			fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);

			if (ctx->state & FIMC_CTX_SHUT) {
				ctx->state &= ~FIMC_CTX_SHUT;
				wake_up(&fimc->irq_queue);
			}
			return IRQ_HANDLED;
		}
	} else if (test_bit(ST_CAPT_PEND, &fimc->state)) {
		int last_buf = test_bit(ST_CAPT_JPEG, &fimc->state) &&
				fimc->vid_cap.reqbufs_count == 1;
		fimc_capture_irq_handler(fimc, !last_buf);
	}
out:
	spin_unlock(&fimc->slock);
	return IRQ_HANDLED;
}
Пример #5
0
void gsc_op_timer_handler(unsigned long arg)
{
	struct gsc_dev *gsc = (struct gsc_dev *)arg;
	struct gsc_ctx *ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
	struct vb2_buffer *src_vb, *dst_vb;

	if (!test_bit(ST_M2M_RUN, &gsc->state)) {
		gsc_warn("gsc state is 0x%lx", gsc->state);
		return;
	}

	gsc_dump_registers(gsc);

	clear_bit(ST_M2M_RUN, &gsc->state);
	pm_runtime_put(&gsc->pdev->dev);

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}
	gsc_err("GSCALER[%d] interrupt hasn't been triggered", gsc->id);
	gsc_err("erro ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
}
Пример #6
0
static irqreturn_t fimc_isr(int irq, void *priv)
{
    struct fimc_vid_buffer *src_buf, *dst_buf;
    struct fimc_dev *fimc = (struct fimc_dev *)priv;
    struct fimc_ctx *ctx;

    BUG_ON(!fimc);
    fimc_hw_clear_irq(fimc);

    spin_lock(&fimc->slock);

    if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
        ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
        if (!ctx || !ctx->m2m_ctx)
            goto isr_unlock;
        src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
        dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
        if (src_buf && dst_buf) {
            spin_lock(&fimc->irqlock);
            src_buf->vb.state = dst_buf->vb.state =  VIDEOBUF_DONE;
            wake_up(&src_buf->vb.done);
            wake_up(&dst_buf->vb.done);
            spin_unlock(&fimc->irqlock);
            v4l2_m2m_job_finish(fimc->m2m.m2m_dev, ctx->m2m_ctx);
        }
    }

isr_unlock:
    spin_unlock(&fimc->slock);
    return IRQ_HANDLED;
}
Пример #7
0
void rockchip_vpu_watchdog(struct work_struct *work)
{
	struct rockchip_vpu_dev *vpu;
	struct rockchip_vpu_ctx *ctx;

	vpu = container_of(to_delayed_work(work),
			   struct rockchip_vpu_dev, watchdog_work);
	ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
	if (ctx) {
		vpu_err("frame processing timed out!\n");
		ctx->codec_ops->reset(ctx);
		rockchip_vpu_job_finish(vpu, ctx, 0, VB2_BUF_STATE_ERROR);
	}
}
Пример #8
0
void rockchip_vpu_irq_done(struct rockchip_vpu_dev *vpu,
			   unsigned int bytesused,
			   enum vb2_buffer_state result)
{
	struct rockchip_vpu_ctx *ctx =
		v4l2_m2m_get_curr_priv(vpu->m2m_dev);

	/*
	 * If cancel_delayed_work returns false
	 * the timeout expired. The watchdog is running,
	 * and will take care of finishing the job.
	 */
	if (cancel_delayed_work(&vpu->watchdog_work))
		rockchip_vpu_job_finish(vpu, ctx, bytesused, result);
}
Пример #9
0
static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
{
	struct gsc_ctx *curr_ctx;
	struct gsc_dev *gsc = ctx->gsc_dev;
	int ret;

	curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
	if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
		return 0;

	gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
	ret = wait_event_timeout(gsc->irq_queue,
			!gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
			GSC_SHUTDOWN_TIMEOUT);

	return ret == 0 ? -ETIMEDOUT : ret;
}
Пример #10
0
static int gsc_ctx_stop_req(struct gsc_ctx *ctx)
{
	struct gsc_ctx *curr_ctx;
	struct gsc_dev *gsc = ctx->gsc_dev;
	int ret = 0;

	curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
	if (!gsc_m2m_run(gsc) || (curr_ctx != ctx))
		return 0;
	ctx->state |= GSC_CTX_STOP_REQ;
	ret = wait_event_timeout(gsc->irq_queue,
			!gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
			GSC_SHUTDOWN_TIMEOUT);
	if (!ret)
		ret = -EBUSY;

	return ret;
}
Пример #11
0
static void bdisp_irq_timeout(struct work_struct *ptr)
{
	struct delayed_work *twork = to_delayed_work(ptr);
	struct bdisp_dev *bdisp = container_of(twork, struct bdisp_dev,
			timeout_work);
	struct bdisp_ctx *ctx;

	ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);

	dev_err(ctx->bdisp_dev->dev, "Device work timeout\n");

	spin_lock(&bdisp->slock);
	clear_bit(ST_M2M_RUNNING, &bdisp->state);
	spin_unlock(&bdisp->slock);

	bdisp_hw_reset(bdisp);

	bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
Пример #12
0
/*
 * msm_jpegdma_isr_processing_done - Invoked by dma_hw when processing is done.
 * @dma: Pointer dma device.
 */
void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
{
	struct vb2_buffer *src_buf;
	struct vb2_buffer *dst_buf;
	struct jpegdma_ctx *ctx;

	mutex_lock(&dma->lock);
	ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
	if (ctx) {
		mutex_lock(&ctx->lock);
		ctx->plane_idx++;
		if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
			src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
			dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			complete_all(&ctx->completion);
			ctx->plane_idx = 0;

			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev,
				ctx->m2m_ctx);
		} else {
			dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
			src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
		}
		mutex_unlock(&ctx->lock);
	}
	mutex_unlock(&dma->lock);
}
Пример #13
0
static int gsc_ctx_stop_req(struct gsc_ctx *ctx)
{
	struct gsc_ctx *curr_ctx;
	struct gsc_dev *gsc = ctx->gsc_dev;
	int ret = 0;
	unsigned long flags;

	curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
	if (!gsc_m2m_run(gsc) || (curr_ctx != ctx))
		return 0;
	spin_lock_irqsave(&ctx->slock, flags);
	ctx->state |= GSC_CTX_STOP_REQ;
	spin_unlock_irqrestore(&ctx->slock, flags);
	ret = wait_event_timeout(gsc->irq_queue,
			!gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
			GSC_SHUTDOWN_TIMEOUT);
	if (!ret)
		ret = -EBUSY;

	return ret;
}
Пример #14
0
static irqreturn_t bdisp_irq_thread(int irq, void *priv)
{
	struct bdisp_dev *bdisp = priv;
	struct bdisp_ctx *ctx;

	spin_lock(&bdisp->slock);

	bdisp_dbg_perf_end(bdisp);

	cancel_delayed_work(&bdisp->timeout_work);

	if (!test_and_clear_bit(ST_M2M_RUNNING, &bdisp->state))
		goto isr_unlock;

	if (test_and_clear_bit(ST_M2M_SUSPENDING, &bdisp->state)) {
		set_bit(ST_M2M_SUSPENDED, &bdisp->state);
		wake_up(&bdisp->irq_queue);
		goto isr_unlock;
	}

	ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
	if (!ctx || !ctx->fh.m2m_ctx)
		goto isr_unlock;

	spin_unlock(&bdisp->slock);

	bdisp_job_finish(ctx, VB2_BUF_STATE_DONE);

	if (bdisp_ctx_state_is_set(BDISP_CTX_STOP_REQ, ctx)) {
		bdisp_ctx_state_lock_clear(BDISP_CTX_STOP_REQ, ctx);
		wake_up(&bdisp->irq_queue);
	}

	return IRQ_HANDLED;

isr_unlock:
	spin_unlock(&bdisp->slock);

	return IRQ_HANDLED;
}
static irqreturn_t fimc_isr(int irq, void *priv)
{
	struct fimc_dev *fimc = priv;
	struct fimc_vid_cap *cap = &fimc->vid_cap;
	struct fimc_ctx *ctx;

	fimc_hw_clear_irq(fimc);

	if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
		ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
		if (ctx != NULL) {
			fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);

			spin_lock(&ctx->slock);
			if (ctx->state & FIMC_CTX_SHUT) {
				ctx->state &= ~FIMC_CTX_SHUT;
				wake_up(&fimc->irq_queue);
			}
			spin_unlock(&ctx->slock);
		}

		return IRQ_HANDLED;
	}

	spin_lock(&fimc->slock);

	if (test_bit(ST_CAPT_PEND, &fimc->state)) {
		fimc_capture_irq_handler(fimc);

		if (cap->active_buf_cnt == 1) {
			fimc_deactivate_capture(fimc);
			clear_bit(ST_CAPT_STREAM, &fimc->state);
		}
	}

	spin_unlock(&fimc->slock);
	return IRQ_HANDLED;
}
Пример #16
0
static int rot_ctx_stop_req(struct rot_ctx *ctx)
{
	struct rot_ctx *curr_ctx;
	struct rot_dev *rot = ctx->rot_dev;
	int ret = 0;

	curr_ctx = v4l2_m2m_get_curr_priv(rot->m2m.m2m_dev);
	if (!test_bit(CTX_RUN, &ctx->flags) || (curr_ctx != ctx))
		return 0;

	set_bit(CTX_ABORT, &ctx->flags);

	ret = wait_event_timeout(rot->irq.wait,
			!test_bit(CTX_RUN, &ctx->flags), ROT_TIMEOUT);

	/* TODO: How to handle case of timeout event */
	if (!ret) {
		rot_err("device failed to stop request\n");
		ret = -EBUSY;
	}

	return ret;
}
Пример #17
0
void rot_work(struct work_struct *work)
{
	struct rot_dev *rot = container_of(work, struct rot_dev, ws);
	struct rot_ctx *ctx;
	unsigned long flags;
	struct vb2_buffer *src_vb, *dst_vb;

	spin_lock_irqsave(&rot->slock, flags);

	if (atomic_read(&rot->wdt.cnt) >= ROT_WDT_CNT) {
		rot_dbg("wakeup blocked process\n");
		ctx = v4l2_m2m_get_curr_priv(rot->m2m.m2m_dev);
		if (!ctx || !ctx->m2m_ctx) {
			rot_err("current ctx is NULL\n");
			goto wq_unlock;
		}
		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

		if (src_vb && dst_vb) {
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);

			v4l2_m2m_job_finish(rot->m2m.m2m_dev, ctx->m2m_ctx);
		}
		rot->m2m.ctx = NULL;
		atomic_set(&rot->wdt.cnt, 0);
		clear_bit(DEV_RUN, &rot->state);
		clear_bit(CTX_RUN, &ctx->flags);
	}

wq_unlock:
	spin_unlock_irqrestore(&rot->slock, flags);

	pm_runtime_put(&rot->pdev->dev);
}
Пример #18
0
static irqreturn_t jpeg_hx_irq(int irq, void *priv)
{
	unsigned int int_status;
	struct vb2_buffer *src_vb, *dst_vb;
	struct jpeg_dev *ctrl = priv;
	struct jpeg_ctx *ctx;
	unsigned long payload_size = 0;

	if (ctrl->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_dec);

	if (ctx == 0) {
		printk(KERN_ERR "ctx is null.\n");
		jpeg_hx_sw_reset(ctrl->reg_base);
		goto ctx_err;
	}

	spin_lock(&ctx->slock);
	int_status = jpeg_hx_int_pending(ctrl);

	jpeg_hx_clear_int_status(ctrl->reg_base, int_status);

	if (int_status == 8 && ctrl->mode == DECODING) {
		jpeg_hx_re_start(ctrl->reg_base);
		spin_unlock(&ctx->slock);
		return IRQ_HANDLED;
	}

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (int_status) {
		switch (int_status & 0xfff) {
		case 0xe20:
			ctrl->irq_ret = OK_ENC_OR_DEC;
			break;
		default:
			ctrl->irq_ret = ERR_UNKNOWN;
			break;
		}
	} else {
		ctrl->irq_ret = ERR_UNKNOWN;
	}

	if (ctrl->irq_ret == OK_ENC_OR_DEC) {
		if (ctrl->mode == ENCODING) {
			payload_size = jpeg_hx_get_stream_size(ctrl->reg_base);
			vb2_set_plane_payload(dst_vb, 0, payload_size);
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
		} else if (int_status != 8 && ctrl->mode == DECODING) {
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
		}
	} else {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}

	if (ctrl->mode == ENCODING)
		v4l2_m2m_job_finish(ctrl->m2m_dev_enc, ctx->m2m_ctx);
	else
		v4l2_m2m_job_finish(ctrl->m2m_dev_dec, ctx->m2m_ctx);

	spin_unlock(&ctx->slock);
ctx_err:
	return IRQ_HANDLED;
}
Пример #19
0
static irqreturn_t jpeg_irq(int irq, void *priv)
{
	unsigned int int_status;
	struct vb2_buffer *src_vb, *dst_vb;
	struct jpeg_dev *ctrl = priv;
	struct jpeg_ctx *ctx;

	spin_lock(&ctrl->slock);

	if (ctrl->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_dec);

	if (ctx == 0) {
		printk(KERN_ERR "ctx is null.\n");
		int_status = jpeg_int_pending(ctrl);
		jpeg_sw_reset(ctrl->reg_base);
		goto ctx_err;
	}

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	int_status = jpeg_int_pending(ctrl);

	if (int_status) {
		switch (int_status & 0x1f) {
		case 0x1:
			ctrl->irq_ret = ERR_PROT;
			break;
		case 0x2:
			ctrl->irq_ret = OK_ENC_OR_DEC;
			break;
		case 0x4:
			ctrl->irq_ret = ERR_DEC_INVALID_FORMAT;
			break;
		case 0x8:
			ctrl->irq_ret = ERR_MULTI_SCAN;
			break;
		case 0x10:
			ctrl->irq_ret = ERR_FRAME;
			break;
		default:
			ctrl->irq_ret = ERR_UNKNOWN;
			break;
		}
	} else {
		ctrl->irq_ret = ERR_UNKNOWN;
	}

	if (ctrl->irq_ret == OK_ENC_OR_DEC) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
	} else {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}

	clear_bit(0, &ctx->dev->hw_run);
	if (ctrl->mode == ENCODING)
		v4l2_m2m_job_finish(ctrl->m2m_dev_enc, ctx->m2m_ctx);
	else
		v4l2_m2m_job_finish(ctrl->m2m_dev_dec, ctx->m2m_ctx);
ctx_err:
	spin_unlock(&ctrl->slock);
	return IRQ_HANDLED;
}
Пример #20
0
static irqreturn_t rot_irq_handler(int irq, void *priv)
{
	struct rot_dev *rot = priv;
	struct rot_ctx *ctx;
	struct vb2_buffer *src_vb, *dst_vb;
	unsigned int irq_src;

	spin_lock(&rot->slock);

	clear_bit(DEV_RUN, &rot->state);
	if (timer_pending(&rot->wdt.timer))
		del_timer(&rot->wdt.timer);

	rot_hwget_irq_src(rot, &irq_src);
	rot_hwset_irq_clear(rot, &irq_src);

	if (irq_src != ISR_PEND_DONE) {
		rot_err("####################\n");
		rot_err("set SFR illegally\n");
		rot_err("maybe the result is wrong\n");
		rot_err("####################\n");
		rot_dump_register(rot);
	}

	ctx = v4l2_m2m_get_curr_priv(rot->m2m.m2m_dev);
	if (!ctx || !ctx->m2m_ctx) {
		rot_err("current ctx is NULL\n");
		goto isr_unlock;
	}

	clear_bit(CTX_RUN, &ctx->flags);
	rot->m2m.ctx = NULL;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);

		if (test_bit(DEV_SUSPEND, &rot->state)) {
			rot_dbg("wake up blocked process by suspend\n");
			wake_up(&rot->irq.wait);
		} else {
			v4l2_m2m_job_finish(rot->m2m.m2m_dev, ctx->m2m_ctx);
		}

		/* Wake up from CTX_ABORT state */
		if (test_and_clear_bit(CTX_ABORT, &ctx->flags))
			wake_up(&rot->irq.wait);

		queue_work(rot->wq, &rot->ws);
	} else {
		rot_err("failed to get the buffer done\n");
	}

isr_unlock:
	spin_unlock(&rot->slock);

	return IRQ_HANDLED;
}