Beispiel #1
0
Datei: g2d.c Projekt: 7799/linux
static irqreturn_t g2d_isr(int irq, void *prv)
{
	struct g2d_dev *dev = prv;
	struct g2d_ctx *ctx = dev->curr;
	struct vb2_buffer *src, *dst;

	g2d_clear_int(dev);
	clk_disable(dev->gate);

	BUG_ON(ctx == NULL);

	src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	BUG_ON(src == NULL);
	BUG_ON(dst == NULL);

	dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
	dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
	dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
	dst->v4l2_buf.flags |=
		src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;

	v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
	v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
	v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);

	dev->curr = NULL;
	wake_up(&dev->irq_queue);
	return IRQ_HANDLED;
}
Beispiel #2
0
static irqreturn_t g2d_isr(int irq, void *prv)
{
    struct g2d_dev *dev = prv;
    struct g2d_ctx *ctx = dev->curr;
    struct vb2_buffer *src, *dst;

    g2d_clear_int(dev);
    clk_disable(dev->gate);

    BUG_ON(ctx == NULL);

    src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
    dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

    BUG_ON(src == NULL);
    BUG_ON(dst == NULL);

    v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
    v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
    v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);

    dev->curr = NULL;
    wake_up(&dev->irq_queue);
    return IRQ_HANDLED;
}
Beispiel #3
0
void gsc_op_timer_handler(unsigned long arg)
{
	struct gsc_dev *gsc = (struct gsc_dev *)arg;
	struct gsc_ctx *ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
	struct vb2_buffer *src_vb, *dst_vb;

	if (!test_bit(ST_M2M_RUN, &gsc->state)) {
		gsc_warn("gsc state is 0x%lx", gsc->state);
		return;
	}

	gsc_dump_registers(gsc);

	clear_bit(ST_M2M_RUN, &gsc->state);
	pm_runtime_put(&gsc->pdev->dev);

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}
	gsc_err("GSCALER[%d] interrupt hasn't been triggered", gsc->id);
	gsc_err("erro ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
}
Beispiel #4
0
static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
{
	struct vb2_v4l2_buffer *src_vb, *dst_vb;

	if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n"))
		return;

	dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);

	src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	if (src_vb && dst_vb) {
		dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
		dst_vb->timecode = src_vb->timecode;
		dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
		dst_vb->flags |= src_vb->flags &
					  V4L2_BUF_FLAG_TSTAMP_SRC_MASK;

		v4l2_m2m_buf_done(src_vb, vb_state);
		v4l2_m2m_buf_done(dst_vb, vb_state);

		v4l2_m2m_job_finish(ctx->bdisp_dev->m2m.m2m_dev,
				    ctx->fh.m2m_ctx);
	}
}
Beispiel #5
0
static void jpeg_watchdog_worker(struct work_struct *work)
{
	struct jpeg_dev *dev;
	struct jpeg_ctx *ctx;
	unsigned long flags;
	struct vb2_buffer *src_vb, *dst_vb;

	printk(KERN_DEBUG "jpeg_watchdog_worker\n");
	dev = container_of(work, struct jpeg_dev, watchdog_work);

	spin_lock_irqsave(&ctx->slock, flags);
	clear_bit(0, &dev->hw_run);
	if (dev->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_dec);

	if (ctx) {
		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
		if (dev->mode == ENCODING)
			v4l2_m2m_job_finish(dev->m2m_dev_enc, ctx->m2m_ctx);
		else
			v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
	} else {
		printk(KERN_ERR "watchdog_ctx is NULL\n");
	}

	spin_unlock_irqrestore(&ctx->slock, flags);
}
Beispiel #6
0
void gsc_op_timer_handler(unsigned long arg)
{
	struct gsc_dev *gsc = (struct gsc_dev *)arg;
	struct gsc_ctx *ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
	struct vb2_buffer *src_vb, *dst_vb;
#ifdef GSC_PERF
	gsc->end_time = sched_clock();
	gsc_err("expire time: %llu\n", gsc->end_time - gsc->start_time);
#endif
	gsc_dump_registers(gsc);
	exynos_iommu_dump_status(&gsc->pdev->dev);

	clear_bit(ST_M2M_RUN, &gsc->state);
	pm_runtime_put(&gsc->pdev->dev);
	gsc->runtime_put_cnt++;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}
	gsc_err("GSCALER[%d] interrupt hasn't been triggered", gsc->id);
	gsc_err("erro ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
}
Beispiel #7
0
void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
{
	struct vb2_v4l2_buffer *src_vb, *dst_vb;

	if (!ctx || !ctx->m2m_ctx)
		return;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (src_vb && dst_vb) {
		dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
		dst_vb->timecode = src_vb->timecode;
		dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
		dst_vb->flags |=
			src_vb->flags
			& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;

		v4l2_m2m_buf_done(src_vb, vb_state);
		v4l2_m2m_buf_done(dst_vb, vb_state);

		v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
				    ctx->m2m_ctx);
	}
}
Beispiel #8
0
static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu,
				    struct rockchip_vpu_ctx *ctx,
				    unsigned int bytesused,
				    enum vb2_buffer_state result)
{
	struct vb2_v4l2_buffer *src, *dst;
	size_t avail_size;

	pm_runtime_mark_last_busy(vpu->dev);
	pm_runtime_put_autosuspend(vpu->dev);
	clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);

	src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	if (WARN_ON(!src))
		return;
	if (WARN_ON(!dst))
		return;

	src->sequence = ctx->sequence_out++;
	dst->sequence = ctx->sequence_cap++;

	dst->field = src->field;
	if (src->flags & V4L2_BUF_FLAG_TIMECODE)
		dst->timecode = src->timecode;
	dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
	dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
			V4L2_BUF_FLAG_TIMECODE);
	dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
				    V4L2_BUF_FLAG_TIMECODE);

	avail_size = vb2_plane_size(&dst->vb2_buf, 0) -
		     ctx->vpu_dst_fmt->header_size;
	if (bytesused <= avail_size) {
		if (ctx->bounce_buf) {
			memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) +
			       ctx->vpu_dst_fmt->header_size,
			       ctx->bounce_buf, bytesused);
		}
		dst->vb2_buf.planes[0].bytesused =
			ctx->vpu_dst_fmt->header_size + bytesused;
	} else {
		result = VB2_BUF_STATE_ERROR;
	}

	v4l2_m2m_buf_done(src, result);
	v4l2_m2m_buf_done(dst, result);

	v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
}
Beispiel #9
0
static int mtk_venc_encode_header(void *priv)
{
	struct mtk_vcodec_ctx *ctx = priv;
	int ret;
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct mtk_vcodec_mem bs_buf;
	struct venc_done_result enc_result;

	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (!dst_buf) {
		mtk_v4l2_debug(1, "No dst buffer");
		return -EINVAL;
	}

	bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
	bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;

	mtk_v4l2_debug(1,
			"[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
			ctx->id,
			dst_buf->vb2_buf.index, bs_buf.va,
			(u64)bs_buf.dma_addr,
			bs_buf.size);

	ret = venc_if_encode(ctx,
			VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
			NULL, &bs_buf, &enc_result);

	if (ret) {
		dst_buf->vb2_buf.planes[0].bytesused = 0;
		ctx->state = MTK_STATE_ABORT;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		mtk_v4l2_err("venc_if_encode failed=%d", ret);
		return -EINVAL;
	}
	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf) {
		dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
		dst_buf->timecode = src_buf->timecode;
	} else {
		mtk_v4l2_err("No timestamp for the header buffer.");
	}

	ctx->state = MTK_STATE_HEADER;
	dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
	v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);

	return 0;
}
Beispiel #10
0
static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
{
	struct vb2_v4l2_buffer *src_vb, *dst_vb;

	while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
	}

	while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}
}
Beispiel #11
0
static void hva_run_work(struct work_struct *work)
{
	struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	const struct hva_enc *enc = ctx->enc;
	struct hva_frame *frame;
	struct hva_stream *stream;
	int ret;

	/* protect instance against reentrancy */
	mutex_lock(&ctx->lock);

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_dbg_perf_begin(ctx);
#endif

	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	frame = to_hva_frame(src_buf);
	stream = to_hva_stream(dst_buf);
	frame->vbuf.sequence = ctx->frame_num++;

	ret = enc->encode(ctx, frame, stream);

	vb2_set_plane_payload(&dst_buf->vb2_buf, 0, stream->bytesused);
	if (ret) {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
	} else {
		/* propagate frame timestamp */
		dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
		dst_buf->field = V4L2_FIELD_NONE;
		dst_buf->sequence = ctx->stream_num - 1;

		ctx->encoded_frames++;

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
		hva_dbg_perf_end(ctx, stream);
#endif

		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
	}

	mutex_unlock(&ctx->lock);

	v4l2_m2m_job_finish(ctx->hva_dev->m2m_dev, ctx->fh.m2m_ctx);
}
Beispiel #12
0
static void hva_stop_streaming(struct vb2_queue *vq)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	const struct hva_enc *enc = ctx->enc;
	struct vb2_v4l2_buffer *vbuf;

	dev_dbg(dev, "%s %s stop streaming\n", ctx->name,
		to_type_str(vq->type));

	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		/* return of all pending buffers to vb2 (in error state) */
		ctx->frame_num = 0;
		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
	} else {
		/* return of all pending buffers to vb2 (in error state) */
		ctx->stream_num = 0;
		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
	}

	if ((V4L2_TYPE_IS_OUTPUT(vq->type) &&
	     vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
	    (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
	     vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
		dev_dbg(dev, "%s %s out=%d cap=%d\n",
			ctx->name, to_type_str(vq->type),
			vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
			vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
		return;
	}

	/* close encoder when both stop_streaming have been called */
	if (enc) {
		dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
		enc->close(ctx);
		ctx->enc = NULL;

		/* clear instance context in instances array */
		hva->instances[ctx->id] = NULL;
		hva->nb_of_instances--;
	}

	ctx->aborting = false;
}
Beispiel #13
0
void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
    struct vb2_buffer *src_vb, *dst_vb;

    if (!ctx || !ctx->m2m_ctx)
        return;

    src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
    dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

    if (src_vb && dst_vb) {
        v4l2_m2m_buf_done(src_vb, vb_state);
        v4l2_m2m_buf_done(dst_vb, vb_state);
        v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
                            ctx->m2m_ctx);
    }
}
Beispiel #14
0
void gsc_op_timer_handler(unsigned long arg)
{
	struct gsc_ctx *ctx = (struct gsc_ctx *)arg;
	struct gsc_dev *gsc = ctx->gsc_dev;
	struct vb2_buffer *src_vb, *dst_vb;

	clear_bit(ST_M2M_RUN, &gsc->state);
	pm_runtime_put_sync(&gsc->pdev->dev);

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}
	gsc_err("GSCALER[%d] interrupt hasn't been triggered", gsc->id);
	gsc_err("erro ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
}
Beispiel #15
0
/*
 * msm_jpegdma_isr_processing_done - Invoked by dma_hw when processing is done.
 * @dma: Pointer dma device.
 */
void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
{
	struct vb2_buffer *src_buf;
	struct vb2_buffer *dst_buf;
	struct jpegdma_ctx *ctx;

	mutex_lock(&dma->lock);
	ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
	if (ctx) {
		mutex_lock(&ctx->lock);
		ctx->plane_idx++;
		if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
			src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
			dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			complete_all(&ctx->completion);
			ctx->plane_idx = 0;

			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev,
				ctx->m2m_ctx);
		} else {
			dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
			src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
		}
		mutex_unlock(&ctx->lock);
	}
	mutex_unlock(&dma->lock);
}
Beispiel #16
0
void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
{
	struct vb2_buffer *src_vb, *dst_vb;

	if (!ctx || !ctx->m2m_ctx)
		return;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (src_vb && dst_vb) {
		src_vb->v4l2_buf.timestamp = dst_vb->v4l2_buf.timestamp;
		src_vb->v4l2_buf.timecode = dst_vb->v4l2_buf.timecode;

		v4l2_m2m_buf_done(src_vb, vb_state);
		v4l2_m2m_buf_done(dst_vb, vb_state);

		v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
				    ctx->m2m_ctx);
	}
}
Beispiel #17
0
static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
{
	struct bdisp_ctx *ctx = q->drv_priv;
	struct vb2_v4l2_buffer *buf;
	int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);

	if (ret < 0) {
		dev_err(ctx->bdisp_dev->dev, "failed to set runtime PM\n");

		if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
			while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
				v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
		} else {
			while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
				v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
		}

		return ret;
	}

	return 0;
}
Beispiel #18
0
static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
{
	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	int ret;

	mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type);

	if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
		while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
			dst_buf->vb2_buf.planes[0].bytesused = 0;
			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		}
	} else {
		while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
	}

	if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
	     vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) ||
	    (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
	     vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) {
		mtk_v4l2_debug(1, "[%d]-> q type %d out=%d cap=%d",
			       ctx->id, q->type,
			       vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q),
			       vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q));
		return;
	}

	/* Release the encoder if both streams are stopped. */
	ret = venc_if_deinit(ctx);
	if (ret)
		mtk_v4l2_err("venc_if_deinit failed=%d", ret);

	ctx->state = MTK_STATE_FREE;
}
void rot_work(struct work_struct *work)
{
	struct rot_dev *rot = container_of(work, struct rot_dev, ws);
	struct rot_ctx *ctx;
	unsigned long flags;
	struct vb2_buffer *src_vb, *dst_vb;

	spin_lock_irqsave(&rot->slock, flags);

	if (atomic_read(&rot->wdt.cnt) >= ROT_WDT_CNT) {
		rot_dbg("wakeup blocked process\n");
		ctx = v4l2_m2m_get_curr_priv(rot->m2m.m2m_dev);
		if (!ctx || !ctx->m2m_ctx) {
			rot_err("current ctx is NULL\n");
			goto wq_unlock;
		}
		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

		if (src_vb && dst_vb) {
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);

			v4l2_m2m_job_finish(rot->m2m.m2m_dev, ctx->m2m_ctx);
		}
		rot->m2m.ctx = NULL;
		atomic_set(&rot->wdt.cnt, 0);
		clear_bit(DEV_RUN, &rot->state);
		clear_bit(CTX_RUN, &ctx->flags);
	}

wq_unlock:
	spin_unlock_irqrestore(&rot->slock, flags);

	pm_runtime_put(&rot->pdev->dev);
}
Beispiel #20
0
/*
 * This function tries to clean all display buffers, the buffers will return
 * in display order.
 * Note the buffers returned from codec driver may still be in driver's
 * reference list.
 */
static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
{
	struct vdec_fb *disp_frame_buffer = NULL;
	struct mtk_video_dec_buf *dstbuf;

	mtk_v4l2_debug(3, "[%d]", ctx->id);
	if (vdec_if_get_param(ctx,
			GET_PARAM_DISP_FRAME_BUFFER,
			&disp_frame_buffer)) {
		mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
			ctx->id);
		return NULL;
	}

	if (disp_frame_buffer == NULL) {
		mtk_v4l2_debug(3, "No display frame buffer");
		return NULL;
	}

	dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
				frame_buffer);
	mutex_lock(&ctx->lock);
	if (dstbuf->used) {
		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
					ctx->picinfo.y_bs_sz);
		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
					ctx->picinfo.c_bs_sz);

		dstbuf->ready_to_display = true;

		mtk_v4l2_debug(2,
				"[%d]status=%x queue id=%d to done_list %d",
				ctx->id, disp_frame_buffer->status,
				dstbuf->vb.vb2_buf.index,
				dstbuf->queued_in_vb2);

		v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
		ctx->decoded_frame_cnt++;
	}
	mutex_unlock(&ctx->lock);
	return &dstbuf->vb.vb2_buf;
}
Beispiel #21
0
static irqreturn_t jpeg_hx_irq(int irq, void *priv)
{
	unsigned int int_status;
	struct vb2_buffer *src_vb, *dst_vb;
	struct jpeg_dev *ctrl = priv;
	struct jpeg_ctx *ctx;
	unsigned long payload_size = 0;

	if (ctrl->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_dec);

	if (ctx == 0) {
		printk(KERN_ERR "ctx is null.\n");
		jpeg_hx_sw_reset(ctrl->reg_base);
		goto ctx_err;
	}

	spin_lock(&ctx->slock);
	int_status = jpeg_hx_int_pending(ctrl);

	jpeg_hx_clear_int_status(ctrl->reg_base, int_status);

	if (int_status == 8 && ctrl->mode == DECODING) {
		jpeg_hx_re_start(ctrl->reg_base);
		spin_unlock(&ctx->slock);
		return IRQ_HANDLED;
	}

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (int_status) {
		switch (int_status & 0xfff) {
		case 0xe20:
			ctrl->irq_ret = OK_ENC_OR_DEC;
			break;
		default:
			ctrl->irq_ret = ERR_UNKNOWN;
			break;
		}
	} else {
		ctrl->irq_ret = ERR_UNKNOWN;
	}

	if (ctrl->irq_ret == OK_ENC_OR_DEC) {
		if (ctrl->mode == ENCODING) {
			payload_size = jpeg_hx_get_stream_size(ctrl->reg_base);
			vb2_set_plane_payload(dst_vb, 0, payload_size);
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
		} else if (int_status != 8 && ctrl->mode == DECODING) {
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
		}
	} else {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}

	if (ctrl->mode == ENCODING)
		v4l2_m2m_job_finish(ctrl->m2m_dev_enc, ctx->m2m_ctx);
	else
		v4l2_m2m_job_finish(ctrl->m2m_dev_dec, ctx->m2m_ctx);

	spin_unlock(&ctx->slock);
ctx_err:
	return IRQ_HANDLED;
}
Beispiel #22
0
static int hva_start_streaming(struct vb2_queue *vq, unsigned int count)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	struct vb2_v4l2_buffer *vbuf;
	int ret;
	unsigned int i;
	bool found = false;

	dev_dbg(dev, "%s %s start streaming\n", ctx->name,
		to_type_str(vq->type));

	/* open encoder when both start_streaming have been called */
	if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
			return 0;
	} else {
		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
			return 0;
	}

	/* store the instance context in the instances array */
	for (i = 0; i < HVA_MAX_INSTANCES; i++) {
		if (!hva->instances[i]) {
			hva->instances[i] = ctx;
			/* save the context identifier in the context */
			ctx->id = i;
			found = true;
			break;
		}
	}

	if (!found) {
		dev_err(dev, "%s maximum instances reached\n", ctx->name);
		ret = -ENOMEM;
		goto err;
	}

	hva->nb_of_instances++;

	if (!ctx->enc) {
		ret = hva_open_encoder(ctx,
				       ctx->streaminfo.streamformat,
				       ctx->frameinfo.pixelformat,
				       &ctx->enc);
		if (ret < 0)
			goto err_ctx;
	}

	return 0;

err_ctx:
	hva->instances[ctx->id] = NULL;
	hva->nb_of_instances--;
err:
	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		/* return of all pending buffers to vb2 (in queued state) */
		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
	} else {
		/* return of all pending buffers to vb2 (in queued state) */
		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
	}

	return ret;
}
Beispiel #23
0
static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
	struct venc_enc_param param;
	int ret;
	int i;

	/* Once state turn into MTK_STATE_ABORT, we need stop_streaming
	  * to clear it
	  */
	if ((ctx->state == MTK_STATE_ABORT) || (ctx->state == MTK_STATE_FREE)) {
		ret = -EIO;
		goto err_set_param;
	}

	/* Do the initialization when both start_streaming have been called */
	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
		if (!vb2_start_streaming_called(&ctx->m2m_ctx->cap_q_ctx.q))
			return 0;
	} else {
		if (!vb2_start_streaming_called(&ctx->m2m_ctx->out_q_ctx.q))
			return 0;
	}

	mtk_venc_set_param(ctx, &param);
	ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
	if (ret) {
		mtk_v4l2_err("venc_if_set_param failed=%d", ret);
		ctx->state = MTK_STATE_ABORT;
		goto err_set_param;
	}
	ctx->param_change = MTK_ENCODE_PARAM_NONE;

	if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) &&
	    (ctx->enc_params.seq_hdr_mode !=
				V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)) {
		ret = venc_if_set_param(ctx,
					VENC_SET_PARAM_PREPEND_HEADER,
					NULL);
		if (ret) {
			mtk_v4l2_err("venc_if_set_param failed=%d", ret);
			ctx->state = MTK_STATE_ABORT;
			goto err_set_param;
		}
		ctx->state = MTK_STATE_HEADER;
	}

	return 0;

err_set_param:
	for (i = 0; i < q->num_buffers; ++i) {
		if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
			mtk_v4l2_debug(0, "[%d] id=%d, type=%d, %d -> VB2_BUF_STATE_QUEUED",
					ctx->id, i, q->type,
					(int)q->bufs[i]->state);
			v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
					VB2_BUF_STATE_QUEUED);
		}
	}

	return ret;
}
Beispiel #24
0
/*
 * v4l2_m2m_streamoff() holds dev_mutex and waits mtk_venc_worker()
 * to call v4l2_m2m_job_finish().
 * If mtk_venc_worker() tries to acquire dev_mutex, it will deadlock.
 * So this function must not try to acquire dev->dev_mutex.
 * This means v4l2 ioctls and mtk_venc_worker() can run at the same time.
 * mtk_venc_worker() should be carefully implemented to avoid bugs.
 */
static void mtk_venc_worker(struct work_struct *work)
{
	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
				    encode_work);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct venc_frm_buf frm_buf;
	struct mtk_vcodec_mem bs_buf;
	struct venc_done_result enc_result;
	int ret, i;

	/* check dst_buf, dst_buf may be removed in device_run
	 * to stored encdoe header so we need check dst_buf and
	 * call job_finish here to prevent recursion
	 */
	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (!dst_buf) {
		v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
		return;
	}

	src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	memset(&frm_buf, 0, sizeof(frm_buf));
	for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) {
		frm_buf.fb_addr[i].dma_addr =
				vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, i);
		frm_buf.fb_addr[i].size =
				(size_t)src_buf->vb2_buf.planes[i].length;
	}
	bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
	bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;

	mtk_v4l2_debug(2,
			"Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu",
			(u64)frm_buf.fb_addr[0].dma_addr,
			frm_buf.fb_addr[0].size,
			(u64)frm_buf.fb_addr[1].dma_addr,
			frm_buf.fb_addr[1].size,
			(u64)frm_buf.fb_addr[2].dma_addr,
			frm_buf.fb_addr[2].size);

	ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
			     &frm_buf, &bs_buf, &enc_result);

	dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
	dst_buf->timecode = src_buf->timecode;

	if (enc_result.is_key_frm)
		dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;

	if (ret) {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
		dst_buf->vb2_buf.planes[0].bytesused = 0;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		mtk_v4l2_err("venc_if_encode failed=%d", ret);
	} else {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
		dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
		mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
				 enc_result.bs_size);
	}

	v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);

	mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
			src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret,
			enc_result.bs_size);
}
Beispiel #25
0
static irqreturn_t jpeg_irq(int irq, void *priv)
{
	unsigned int int_status;
	struct vb2_buffer *src_vb, *dst_vb;
	struct jpeg_dev *ctrl = priv;
	struct jpeg_ctx *ctx;

	spin_lock(&ctrl->slock);

	if (ctrl->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_dec);

	if (ctx == 0) {
		printk(KERN_ERR "ctx is null.\n");
		int_status = jpeg_int_pending(ctrl);
		jpeg_sw_reset(ctrl->reg_base);
		goto ctx_err;
	}

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	int_status = jpeg_int_pending(ctrl);

	if (int_status) {
		switch (int_status & 0x1f) {
		case 0x1:
			ctrl->irq_ret = ERR_PROT;
			break;
		case 0x2:
			ctrl->irq_ret = OK_ENC_OR_DEC;
			break;
		case 0x4:
			ctrl->irq_ret = ERR_DEC_INVALID_FORMAT;
			break;
		case 0x8:
			ctrl->irq_ret = ERR_MULTI_SCAN;
			break;
		case 0x10:
			ctrl->irq_ret = ERR_FRAME;
			break;
		default:
			ctrl->irq_ret = ERR_UNKNOWN;
			break;
		}
	} else {
		ctrl->irq_ret = ERR_UNKNOWN;
	}

	if (ctrl->irq_ret == OK_ENC_OR_DEC) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
	} else {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}

	clear_bit(0, &ctx->dev->hw_run);
	if (ctrl->mode == ENCODING)
		v4l2_m2m_job_finish(ctrl->m2m_dev_enc, ctx->m2m_ctx);
	else
		v4l2_m2m_job_finish(ctrl->m2m_dev_dec, ctx->m2m_ctx);
ctx_err:
	spin_unlock(&ctrl->slock);
	return IRQ_HANDLED;
}
static irqreturn_t rot_irq_handler(int irq, void *priv)
{
	struct rot_dev *rot = priv;
	struct rot_ctx *ctx;
	struct vb2_buffer *src_vb, *dst_vb;
	unsigned int irq_src;

	spin_lock(&rot->slock);

	clear_bit(DEV_RUN, &rot->state);
	if (timer_pending(&rot->wdt.timer))
		del_timer(&rot->wdt.timer);

	rot_hwget_irq_src(rot, &irq_src);
	rot_hwset_irq_clear(rot, &irq_src);

	if (irq_src != ISR_PEND_DONE) {
		rot_err("####################\n");
		rot_err("set SFR illegally\n");
		rot_err("maybe the result is wrong\n");
		rot_err("####################\n");
		rot_dump_register(rot);
	}

	ctx = v4l2_m2m_get_curr_priv(rot->m2m.m2m_dev);
	if (!ctx || !ctx->m2m_ctx) {
		rot_err("current ctx is NULL\n");
		goto isr_unlock;
	}

	clear_bit(CTX_RUN, &ctx->flags);
	rot->m2m.ctx = NULL;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);

		if (test_bit(DEV_SUSPEND, &rot->state)) {
			rot_dbg("wake up blocked process by suspend\n");
			wake_up(&rot->irq.wait);
		} else {
			v4l2_m2m_job_finish(rot->m2m.m2m_dev, ctx->m2m_ctx);
		}

		/* Wake up from CTX_ABORT state */
		if (test_and_clear_bit(CTX_ABORT, &ctx->flags))
			wake_up(&rot->irq.wait);

		queue_work(rot->wq, &rot->ws);
	} else {
		rot_err("failed to get the buffer done\n");
	}

isr_unlock:
	spin_unlock(&rot->slock);

	return IRQ_HANDLED;
}
Beispiel #27
0
static void mtk_vdec_worker(struct work_struct *work)
{
	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
				decode_work);
	struct mtk_vcodec_dev *dev = ctx->dev;
	struct vb2_buffer *src_buf, *dst_buf;
	struct mtk_vcodec_mem buf;
	struct vdec_fb *pfb;
	bool res_chg = false;
	int ret;
	struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
	struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;

	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
		return;
	}

	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	if (dst_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
		return;
	}

	src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
	src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);

	dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
	dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);

	pfb = &dst_buf_info->frame_buffer;
	pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
	pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
	pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;

	pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
	pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
	pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
	pfb->status = 0;
	mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);

	mtk_v4l2_debug(3,
			"id=%d Framebuf  pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
			dst_buf->index, pfb,
			pfb->base_y.va, &pfb->base_y.dma_addr,
			&pfb->base_c.dma_addr, pfb->base_y.size);

	if (src_buf_info->lastframe) {
		mtk_v4l2_debug(1, "Got empty flush input buffer.");
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);

		/* update dst buf status */
		dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
		mutex_lock(&ctx->lock);
		dst_buf_info->used = false;
		mutex_unlock(&ctx->lock);

		vdec_if_decode(ctx, NULL, NULL, &res_chg);
		clean_display_buffer(ctx);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
		dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST;
		v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
		clean_free_buffer(ctx);
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		return;
	}
	buf.va = vb2_plane_vaddr(src_buf, 0);
	buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
	buf.size = (size_t)src_buf->planes[0].bytesused;
	if (!buf.va) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
				ctx->id, src_buf->index);
		return;
	}
	mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
			ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
	dst_buf_info->vb.vb2_buf.timestamp
			= src_buf_info->vb.vb2_buf.timestamp;
	dst_buf_info->vb.timecode
			= src_buf_info->vb.timecode;
	mutex_lock(&ctx->lock);
	dst_buf_info->used = true;
	mutex_unlock(&ctx->lock);
	src_buf_info->used = true;

	ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);

	if (ret) {
		mtk_v4l2_err(
			" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
			ctx->id,
			src_buf->index,
			buf.size,
			src_buf_info->vb.vb2_buf.timestamp,
			dst_buf->index,
			ret, res_chg);
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
	} else if (res_chg == false) {
		/*
		 * we only return src buffer with VB2_BUF_STATE_DONE
		 * when decode success without resolution change
		 */
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
	}

	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	clean_display_buffer(ctx);
	clean_free_buffer(ctx);

	if (!ret && res_chg) {
		mtk_vdec_pic_info_update(ctx);
		/*
		 * On encountering a resolution change in the stream.
		 * The driver must first process and decode all
		 * remaining buffers from before the resolution change
		 * point, so call flush decode here
		 */
		mtk_vdec_flush_decoder(ctx);
		/*
		 * After all buffers containing decoded frames from
		 * before the resolution change point ready to be
		 * dequeued on the CAPTURE queue, the driver sends a
		 * V4L2_EVENT_SOURCE_CHANGE event for source change
		 * type V4L2_EVENT_SRC_CH_RESOLUTION
		 */
		mtk_vdec_queue_res_chg_event(ctx);
	}
	v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
}