示例#1
0
static void jpeg_watchdog_worker(struct work_struct *work)
{
	struct jpeg_dev *dev;
	struct jpeg_ctx *ctx;
	unsigned long flags;
	struct vb2_buffer *src_vb, *dst_vb;

	printk(KERN_DEBUG "jpeg_watchdog_worker\n");
	dev = container_of(work, struct jpeg_dev, watchdog_work);

	spin_lock_irqsave(&ctx->slock, flags);
	clear_bit(0, &dev->hw_run);
	if (dev->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_dec);

	if (ctx) {
		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
		if (dev->mode == ENCODING)
			v4l2_m2m_job_finish(dev->m2m_dev_enc, ctx->m2m_ctx);
		else
			v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
	} else {
		printk(KERN_ERR "watchdog_ctx is NULL\n");
	}

	spin_unlock_irqrestore(&ctx->slock, flags);
}
示例#2
0
static irqreturn_t fimc_isr(int irq, void *priv)
{
    struct fimc_vid_buffer *src_buf, *dst_buf;
    struct fimc_dev *fimc = (struct fimc_dev *)priv;
    struct fimc_ctx *ctx;

    BUG_ON(!fimc);
    fimc_hw_clear_irq(fimc);

    spin_lock(&fimc->slock);

    if (test_and_clear_bit(ST_M2M_PEND, &fimc->state)) {
        ctx = v4l2_m2m_get_curr_priv(fimc->m2m.m2m_dev);
        if (!ctx || !ctx->m2m_ctx)
            goto isr_unlock;
        src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
        dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
        if (src_buf && dst_buf) {
            spin_lock(&fimc->irqlock);
            src_buf->vb.state = dst_buf->vb.state =  VIDEOBUF_DONE;
            wake_up(&src_buf->vb.done);
            wake_up(&dst_buf->vb.done);
            spin_unlock(&fimc->irqlock);
            v4l2_m2m_job_finish(fimc->m2m.m2m_dev, ctx->m2m_ctx);
        }
    }

isr_unlock:
    spin_unlock(&fimc->slock);
    return IRQ_HANDLED;
}
示例#3
0
void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
{
	struct vb2_v4l2_buffer *src_vb, *dst_vb;

	if (!ctx || !ctx->m2m_ctx)
		return;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (src_vb && dst_vb) {
		dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
		dst_vb->timecode = src_vb->timecode;
		dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
		dst_vb->flags |=
			src_vb->flags
			& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;

		v4l2_m2m_buf_done(src_vb, vb_state);
		v4l2_m2m_buf_done(dst_vb, vb_state);

		v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
				    ctx->m2m_ctx);
	}
}
示例#4
0
/*
 * msm_jpegdma_job_abort - Dma abort job.
 * @priv: Pointer dma context.
 */
static void msm_jpegdma_job_abort(void *priv)
{
	struct jpegdma_ctx *ctx = priv;

	msm_jpegdma_hw_abort(ctx->jdma_device);
	v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev, ctx->m2m_ctx);
}
示例#5
0
static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
{
	struct vb2_v4l2_buffer *src_vb, *dst_vb;

	if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n"))
		return;

	dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);

	src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	if (src_vb && dst_vb) {
		dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
		dst_vb->timecode = src_vb->timecode;
		dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
		dst_vb->flags |= src_vb->flags &
					  V4L2_BUF_FLAG_TSTAMP_SRC_MASK;

		v4l2_m2m_buf_done(src_vb, vb_state);
		v4l2_m2m_buf_done(dst_vb, vb_state);

		v4l2_m2m_job_finish(ctx->bdisp_dev->m2m.m2m_dev,
				    ctx->fh.m2m_ctx);
	}
}
示例#6
0
static irqreturn_t g2d_isr(int irq, void *prv)
{
    struct g2d_dev *dev = prv;
    struct g2d_ctx *ctx = dev->curr;
    struct vb2_buffer *src, *dst;

    g2d_clear_int(dev);
    clk_disable(dev->gate);

    BUG_ON(ctx == NULL);

    src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
    dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

    BUG_ON(src == NULL);
    BUG_ON(dst == NULL);

    v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
    v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
    v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);

    dev->curr = NULL;
    wake_up(&dev->irq_queue);
    return IRQ_HANDLED;
}
示例#7
0
文件: g2d.c 项目: 7799/linux
static irqreturn_t g2d_isr(int irq, void *prv)
{
	struct g2d_dev *dev = prv;
	struct g2d_ctx *ctx = dev->curr;
	struct vb2_buffer *src, *dst;

	g2d_clear_int(dev);
	clk_disable(dev->gate);

	BUG_ON(ctx == NULL);

	src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	BUG_ON(src == NULL);
	BUG_ON(dst == NULL);

	dst->v4l2_buf.timecode = src->v4l2_buf.timecode;
	dst->v4l2_buf.timestamp = src->v4l2_buf.timestamp;
	dst->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
	dst->v4l2_buf.flags |=
		src->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;

	v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
	v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
	v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);

	dev->curr = NULL;
	wake_up(&dev->irq_queue);
	return IRQ_HANDLED;
}
示例#8
0
static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu,
				    struct rockchip_vpu_ctx *ctx,
				    unsigned int bytesused,
				    enum vb2_buffer_state result)
{
	struct vb2_v4l2_buffer *src, *dst;
	size_t avail_size;

	pm_runtime_mark_last_busy(vpu->dev);
	pm_runtime_put_autosuspend(vpu->dev);
	clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);

	src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	if (WARN_ON(!src))
		return;
	if (WARN_ON(!dst))
		return;

	src->sequence = ctx->sequence_out++;
	dst->sequence = ctx->sequence_cap++;

	dst->field = src->field;
	if (src->flags & V4L2_BUF_FLAG_TIMECODE)
		dst->timecode = src->timecode;
	dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
	dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
			V4L2_BUF_FLAG_TIMECODE);
	dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
				    V4L2_BUF_FLAG_TIMECODE);

	avail_size = vb2_plane_size(&dst->vb2_buf, 0) -
		     ctx->vpu_dst_fmt->header_size;
	if (bytesused <= avail_size) {
		if (ctx->bounce_buf) {
			memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) +
			       ctx->vpu_dst_fmt->header_size,
			       ctx->bounce_buf, bytesused);
		}
		dst->vb2_buf.planes[0].bytesused =
			ctx->vpu_dst_fmt->header_size + bytesused;
	} else {
		result = VB2_BUF_STATE_ERROR;
	}

	v4l2_m2m_buf_done(src, result);
	v4l2_m2m_buf_done(dst, result);

	v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
}
示例#9
0
static void hva_run_work(struct work_struct *work)
{
	struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	const struct hva_enc *enc = ctx->enc;
	struct hva_frame *frame;
	struct hva_stream *stream;
	int ret;

	/* protect instance against reentrancy */
	mutex_lock(&ctx->lock);

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
	hva_dbg_perf_begin(ctx);
#endif

	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	frame = to_hva_frame(src_buf);
	stream = to_hva_stream(dst_buf);
	frame->vbuf.sequence = ctx->frame_num++;

	ret = enc->encode(ctx, frame, stream);

	vb2_set_plane_payload(&dst_buf->vb2_buf, 0, stream->bytesused);
	if (ret) {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
	} else {
		/* propagate frame timestamp */
		dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
		dst_buf->field = V4L2_FIELD_NONE;
		dst_buf->sequence = ctx->stream_num - 1;

		ctx->encoded_frames++;

#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
		hva_dbg_perf_end(ctx, stream);
#endif

		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
	}

	mutex_unlock(&ctx->lock);

	v4l2_m2m_job_finish(ctx->hva_dev->m2m_dev, ctx->fh.m2m_ctx);
}
示例#10
0
void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
    struct vb2_buffer *src_vb, *dst_vb;

    if (!ctx || !ctx->m2m_ctx)
        return;

    src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
    dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

    if (src_vb && dst_vb) {
        v4l2_m2m_buf_done(src_vb, vb_state);
        v4l2_m2m_buf_done(dst_vb, vb_state);
        v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
                            ctx->m2m_ctx);
    }
}
示例#11
0
/*
 * msm_jpegdma_isr_processing_done - Invoked by dma_hw when processing is done.
 * @dma: Pointer dma device.
 */
void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
{
	struct vb2_buffer *src_buf;
	struct vb2_buffer *dst_buf;
	struct jpegdma_ctx *ctx;

	mutex_lock(&dma->lock);
	ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
	if (ctx) {
		mutex_lock(&ctx->lock);
		ctx->plane_idx++;
		if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
			src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
			dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			complete_all(&ctx->completion);
			ctx->plane_idx = 0;

			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev,
				ctx->m2m_ctx);
		} else {
			dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
			src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
		}
		mutex_unlock(&ctx->lock);
	}
	mutex_unlock(&dma->lock);
}
示例#12
0
static int rot_resume(struct device *dev)
{
	struct platform_device *pdev;
	struct rot_dev *rot;
	struct rot_ctx *ctx;

	pdev = to_platform_device(dev);
	rot = (struct rot_dev *)platform_get_drvdata(pdev);

	clear_bit(DEV_SUSPEND, &rot->state);

	ctx = rot->m2m.ctx;
	if (ctx != NULL) {
		clear_bit(CTX_SUSPEND, &ctx->flags);
		rot->m2m.ctx = NULL;
		v4l2_m2m_job_finish(rot->m2m.m2m_dev, ctx->m2m_ctx);
	}

	return 0;
}
示例#13
0
文件: gsc-m2m.c 项目: 03199618/linux
void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
{
	struct vb2_buffer *src_vb, *dst_vb;

	if (!ctx || !ctx->m2m_ctx)
		return;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (src_vb && dst_vb) {
		src_vb->v4l2_buf.timestamp = dst_vb->v4l2_buf.timestamp;
		src_vb->v4l2_buf.timecode = dst_vb->v4l2_buf.timecode;

		v4l2_m2m_buf_done(src_vb, vb_state);
		v4l2_m2m_buf_done(dst_vb, vb_state);

		v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
				    ctx->m2m_ctx);
	}
}
示例#14
0
void rot_work(struct work_struct *work)
{
	struct rot_dev *rot = container_of(work, struct rot_dev, ws);
	struct rot_ctx *ctx;
	unsigned long flags;
	struct vb2_buffer *src_vb, *dst_vb;

	spin_lock_irqsave(&rot->slock, flags);

	if (atomic_read(&rot->wdt.cnt) >= ROT_WDT_CNT) {
		rot_dbg("wakeup blocked process\n");
		ctx = v4l2_m2m_get_curr_priv(rot->m2m.m2m_dev);
		if (!ctx || !ctx->m2m_ctx) {
			rot_err("current ctx is NULL\n");
			goto wq_unlock;
		}
		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

		if (src_vb && dst_vb) {
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);

			v4l2_m2m_job_finish(rot->m2m.m2m_dev, ctx->m2m_ctx);
		}
		rot->m2m.ctx = NULL;
		atomic_set(&rot->wdt.cnt, 0);
		clear_bit(DEV_RUN, &rot->state);
		clear_bit(CTX_RUN, &ctx->flags);
	}

wq_unlock:
	spin_unlock_irqrestore(&rot->slock, flags);

	pm_runtime_put(&rot->pdev->dev);
}
示例#15
0
/*
 * v4l2_m2m_streamoff() holds dev_mutex and waits mtk_venc_worker()
 * to call v4l2_m2m_job_finish().
 * If mtk_venc_worker() tries to acquire dev_mutex, it will deadlock.
 * So this function must not try to acquire dev->dev_mutex.
 * This means v4l2 ioctls and mtk_venc_worker() can run at the same time.
 * mtk_venc_worker() should be carefully implemented to avoid bugs.
 */
static void mtk_venc_worker(struct work_struct *work)
{
	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
				    encode_work);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct venc_frm_buf frm_buf;
	struct mtk_vcodec_mem bs_buf;
	struct venc_done_result enc_result;
	int ret, i;

	/* check dst_buf, dst_buf may be removed in device_run
	 * to stored encdoe header so we need check dst_buf and
	 * call job_finish here to prevent recursion
	 */
	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (!dst_buf) {
		v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
		return;
	}

	src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	memset(&frm_buf, 0, sizeof(frm_buf));
	for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) {
		frm_buf.fb_addr[i].dma_addr =
				vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, i);
		frm_buf.fb_addr[i].size =
				(size_t)src_buf->vb2_buf.planes[i].length;
	}
	bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
	bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;

	mtk_v4l2_debug(2,
			"Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu",
			(u64)frm_buf.fb_addr[0].dma_addr,
			frm_buf.fb_addr[0].size,
			(u64)frm_buf.fb_addr[1].dma_addr,
			frm_buf.fb_addr[1].size,
			(u64)frm_buf.fb_addr[2].dma_addr,
			frm_buf.fb_addr[2].size);

	ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
			     &frm_buf, &bs_buf, &enc_result);

	dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
	dst_buf->timecode = src_buf->timecode;

	if (enc_result.is_key_frm)
		dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;

	if (ret) {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
		dst_buf->vb2_buf.planes[0].bytesused = 0;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		mtk_v4l2_err("venc_if_encode failed=%d", ret);
	} else {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
		dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
		mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
				 enc_result.bs_size);
	}

	v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);

	mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
			src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret,
			enc_result.bs_size);
}
示例#16
0
static void mtk_vdec_worker(struct work_struct *work)
{
	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
				decode_work);
	struct mtk_vcodec_dev *dev = ctx->dev;
	struct vb2_buffer *src_buf, *dst_buf;
	struct mtk_vcodec_mem buf;
	struct vdec_fb *pfb;
	bool res_chg = false;
	int ret;
	struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
	struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;

	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
		return;
	}

	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	if (dst_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
		return;
	}

	src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
	src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);

	dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
	dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);

	pfb = &dst_buf_info->frame_buffer;
	pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
	pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
	pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;

	pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
	pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
	pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
	pfb->status = 0;
	mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);

	mtk_v4l2_debug(3,
			"id=%d Framebuf  pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
			dst_buf->index, pfb,
			pfb->base_y.va, &pfb->base_y.dma_addr,
			&pfb->base_c.dma_addr, pfb->base_y.size);

	if (src_buf_info->lastframe) {
		mtk_v4l2_debug(1, "Got empty flush input buffer.");
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);

		/* update dst buf status */
		dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
		mutex_lock(&ctx->lock);
		dst_buf_info->used = false;
		mutex_unlock(&ctx->lock);

		vdec_if_decode(ctx, NULL, NULL, &res_chg);
		clean_display_buffer(ctx);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
		dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST;
		v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
		clean_free_buffer(ctx);
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		return;
	}
	buf.va = vb2_plane_vaddr(src_buf, 0);
	buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
	buf.size = (size_t)src_buf->planes[0].bytesused;
	if (!buf.va) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
				ctx->id, src_buf->index);
		return;
	}
	mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
			ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
	dst_buf_info->vb.vb2_buf.timestamp
			= src_buf_info->vb.vb2_buf.timestamp;
	dst_buf_info->vb.timecode
			= src_buf_info->vb.timecode;
	mutex_lock(&ctx->lock);
	dst_buf_info->used = true;
	mutex_unlock(&ctx->lock);
	src_buf_info->used = true;

	ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);

	if (ret) {
		mtk_v4l2_err(
			" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
			ctx->id,
			src_buf->index,
			buf.size,
			src_buf_info->vb.vb2_buf.timestamp,
			dst_buf->index,
			ret, res_chg);
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
	} else if (res_chg == false) {
		/*
		 * we only return src buffer with VB2_BUF_STATE_DONE
		 * when decode success without resolution change
		 */
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
	}

	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	clean_display_buffer(ctx);
	clean_free_buffer(ctx);

	if (!ret && res_chg) {
		mtk_vdec_pic_info_update(ctx);
		/*
		 * On encountering a resolution change in the stream.
		 * The driver must first process and decode all
		 * remaining buffers from before the resolution change
		 * point, so call flush decode here
		 */
		mtk_vdec_flush_decoder(ctx);
		/*
		 * After all buffers containing decoded frames from
		 * before the resolution change point ready to be
		 * dequeued on the CAPTURE queue, the driver sends a
		 * V4L2_EVENT_SOURCE_CHANGE event for source change
		 * type V4L2_EVENT_SRC_CH_RESOLUTION
		 */
		mtk_vdec_queue_res_chg_event(ctx);
	}
	v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
}
示例#17
0
static irqreturn_t rot_irq_handler(int irq, void *priv)
{
	struct rot_dev *rot = priv;
	struct rot_ctx *ctx;
	struct vb2_buffer *src_vb, *dst_vb;
	unsigned int irq_src;

	spin_lock(&rot->slock);

	clear_bit(DEV_RUN, &rot->state);
	if (timer_pending(&rot->wdt.timer))
		del_timer(&rot->wdt.timer);

	rot_hwget_irq_src(rot, &irq_src);
	rot_hwset_irq_clear(rot, &irq_src);

	if (irq_src != ISR_PEND_DONE) {
		rot_err("####################\n");
		rot_err("set SFR illegally\n");
		rot_err("maybe the result is wrong\n");
		rot_err("####################\n");
		rot_dump_register(rot);
	}

	ctx = v4l2_m2m_get_curr_priv(rot->m2m.m2m_dev);
	if (!ctx || !ctx->m2m_ctx) {
		rot_err("current ctx is NULL\n");
		goto isr_unlock;
	}

	clear_bit(CTX_RUN, &ctx->flags);
	rot->m2m.ctx = NULL;

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (src_vb && dst_vb) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);

		if (test_bit(DEV_SUSPEND, &rot->state)) {
			rot_dbg("wake up blocked process by suspend\n");
			wake_up(&rot->irq.wait);
		} else {
			v4l2_m2m_job_finish(rot->m2m.m2m_dev, ctx->m2m_ctx);
		}

		/* Wake up from CTX_ABORT state */
		if (test_and_clear_bit(CTX_ABORT, &ctx->flags))
			wake_up(&rot->irq.wait);

		queue_work(rot->wq, &rot->ws);
	} else {
		rot_err("failed to get the buffer done\n");
	}

isr_unlock:
	spin_unlock(&rot->slock);

	return IRQ_HANDLED;
}
示例#18
0
static irqreturn_t jpeg_irq(int irq, void *priv)
{
	unsigned int int_status;
	struct vb2_buffer *src_vb, *dst_vb;
	struct jpeg_dev *ctrl = priv;
	struct jpeg_ctx *ctx;

	spin_lock(&ctrl->slock);

	if (ctrl->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_dec);

	if (ctx == 0) {
		printk(KERN_ERR "ctx is null.\n");
		int_status = jpeg_int_pending(ctrl);
		jpeg_sw_reset(ctrl->reg_base);
		goto ctx_err;
	}

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	int_status = jpeg_int_pending(ctrl);

	if (int_status) {
		switch (int_status & 0x1f) {
		case 0x1:
			ctrl->irq_ret = ERR_PROT;
			break;
		case 0x2:
			ctrl->irq_ret = OK_ENC_OR_DEC;
			break;
		case 0x4:
			ctrl->irq_ret = ERR_DEC_INVALID_FORMAT;
			break;
		case 0x8:
			ctrl->irq_ret = ERR_MULTI_SCAN;
			break;
		case 0x10:
			ctrl->irq_ret = ERR_FRAME;
			break;
		default:
			ctrl->irq_ret = ERR_UNKNOWN;
			break;
		}
	} else {
		ctrl->irq_ret = ERR_UNKNOWN;
	}

	if (ctrl->irq_ret == OK_ENC_OR_DEC) {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
	} else {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}

	clear_bit(0, &ctx->dev->hw_run);
	if (ctrl->mode == ENCODING)
		v4l2_m2m_job_finish(ctrl->m2m_dev_enc, ctx->m2m_ctx);
	else
		v4l2_m2m_job_finish(ctrl->m2m_dev_dec, ctx->m2m_ctx);
ctx_err:
	spin_unlock(&ctrl->slock);
	return IRQ_HANDLED;
}
示例#19
0
static irqreturn_t jpeg_hx_irq(int irq, void *priv)
{
	unsigned int int_status;
	struct vb2_buffer *src_vb, *dst_vb;
	struct jpeg_dev *ctrl = priv;
	struct jpeg_ctx *ctx;
	unsigned long payload_size = 0;

	if (ctrl->mode == ENCODING)
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_enc);
	else
		ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_dec);

	if (ctx == 0) {
		printk(KERN_ERR "ctx is null.\n");
		jpeg_hx_sw_reset(ctrl->reg_base);
		goto ctx_err;
	}

	spin_lock(&ctx->slock);
	int_status = jpeg_hx_int_pending(ctrl);

	jpeg_hx_clear_int_status(ctrl->reg_base, int_status);

	if (int_status == 8 && ctrl->mode == DECODING) {
		jpeg_hx_re_start(ctrl->reg_base);
		spin_unlock(&ctx->slock);
		return IRQ_HANDLED;
	}

	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);

	if (int_status) {
		switch (int_status & 0xfff) {
		case 0xe20:
			ctrl->irq_ret = OK_ENC_OR_DEC;
			break;
		default:
			ctrl->irq_ret = ERR_UNKNOWN;
			break;
		}
	} else {
		ctrl->irq_ret = ERR_UNKNOWN;
	}

	if (ctrl->irq_ret == OK_ENC_OR_DEC) {
		if (ctrl->mode == ENCODING) {
			payload_size = jpeg_hx_get_stream_size(ctrl->reg_base);
			vb2_set_plane_payload(dst_vb, 0, payload_size);
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
		} else if (int_status != 8 && ctrl->mode == DECODING) {
			v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
		}
	} else {
		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
	}

	if (ctrl->mode == ENCODING)
		v4l2_m2m_job_finish(ctrl->m2m_dev_enc, ctx->m2m_ctx);
	else
		v4l2_m2m_job_finish(ctrl->m2m_dev_dec, ctx->m2m_ctx);

	spin_unlock(&ctx->slock);
ctx_err:
	return IRQ_HANDLED;
}