Пример #1
0
void rot_set_frame_addr(struct rot_ctx *ctx)
{
	struct vb2_buffer *vb;
	struct rot_frame *s_frame, *d_frame;
	struct rot_dev *rot = ctx->rot_dev;

	s_frame = &ctx->s_frame;
	d_frame = &ctx->d_frame;

	/* set source buffer address */
	vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	rot_get_bufaddr(rot, vb, s_frame, &s_frame->addr);

	rot_hwset_src_addr(rot, s_frame->addr.y, ROT_ADDR_Y);
	rot_hwset_src_addr(rot, s_frame->addr.cb, ROT_ADDR_CB);
	rot_hwset_src_addr(rot, s_frame->addr.cr, ROT_ADDR_CR);

	/* set destination buffer address */
	vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	rot_get_bufaddr(rot, vb, d_frame, &d_frame->addr);

	rot_hwset_dst_addr(rot, d_frame->addr.y, ROT_ADDR_Y);
	rot_hwset_dst_addr(rot, d_frame->addr.cb, ROT_ADDR_CB);
	rot_hwset_dst_addr(rot, d_frame->addr.cr, ROT_ADDR_CR);
}
Пример #2
0
static void device_run(void *prv)
{
    struct g2d_ctx *ctx = prv;
    struct g2d_dev *dev = ctx->dev;
    struct vb2_buffer *src, *dst;
    unsigned long flags;
    u32 cmd = 0;

    dev->curr = ctx;

    src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
    dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);

    clk_enable(dev->gate);
    g2d_reset(dev);

    spin_lock_irqsave(&dev->ctrl_lock, flags);

    g2d_set_src_size(dev, &ctx->in);
    g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));

    g2d_set_dst_size(dev, &ctx->out);
    g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));

    g2d_set_rop4(dev, ctx->rop);
    g2d_set_flip(dev, ctx->flip);

    if (ctx->in.c_width != ctx->out.c_width ||
            ctx->in.c_height != ctx->out.c_height)
        cmd |= g2d_cmd_stretch(1);
    g2d_set_cmd(dev, cmd);
    g2d_start(dev);

    spin_unlock_irqrestore(&dev->ctrl_lock, flags);
}
Пример #3
0
int gsc_fill_addr(struct gsc_ctx *ctx)
{
	struct gsc_frame *s_frame, *d_frame;
	struct vb2_buffer *vb = NULL;
	int ret = 0;

	s_frame = &ctx->s_frame;
	d_frame = &ctx->d_frame;

	vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (vb->num_planes != s_frame->fmt->num_planes) {
		gsc_err(" vb(%p) planes=%d s_frame(%p) planes=%d\n",
			vb, vb->num_planes, s_frame, s_frame->fmt->num_planes);
		return -EINVAL;
	}
	ret = gsc_prepare_addr(ctx, vb, s_frame, &s_frame->addr);
	if (ret)
		return ret;

	vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	if (vb->num_planes != d_frame->fmt->num_planes) {
		gsc_err("vb(%p) planes=%d d_frame(%p) planes=%d\n",
			vb, vb->num_planes, d_frame, d_frame->fmt->num_planes);
		return -EINVAL;
	}
	ret = gsc_prepare_addr(ctx, vb, d_frame, &d_frame->addr);

	return ret;
}
Пример #4
0
static void jpeg_hx_device_dec_run(void *priv)
{
	struct jpeg_ctx *ctx = priv;
	struct jpeg_dev *dev = ctx->dev;
	struct jpeg_dec_param dec_param;
	struct vb2_buffer *vb = NULL;
	unsigned long flags;

	dev = ctx->dev;

	spin_lock_irqsave(&ctx->slock, flags);

	dev->mode = DECODING;
	dec_param = ctx->param.dec_param;

	jpeg_hx_sw_reset(dev->reg_base);
	jpeg_hx_set_dma_num(dev->reg_base);
	jpeg_hx_clk_on(dev->reg_base);
	jpeg_hx_clk_set(dev->reg_base, 1);
	jpeg_hx_set_dec_out_fmt(dev->reg_base, dec_param.out_fmt);
	jpeg_hx_set_enc_dec_mode(dev->reg_base, DECODING);
	jpeg_hx_set_dec_bitstream_size(dev->reg_base, dec_param.size);
	jpeg_hx_color_mode_select(dev->reg_base, dec_param.out_fmt); /* need to check */
	jpeg_hx_set_interrupt(dev->reg_base);
	jpeg_hx_set_stream_size(dev->reg_base,
		dec_param.out_width, dec_param.out_height);

	vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	jpeg_hx_set_stream_buf_address(dev->reg_base, dev->vb2->plane_addr(vb, 0));
	vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	jpeg_hx_set_frame_buf_address(dev->reg_base,
	dec_param.out_fmt, dev->vb2->plane_addr(vb, 0), dec_param.in_width, dec_param.in_height);

	jpeg_hx_set_dec_luma_stride(dev->reg_base, dec_param.out_width, dec_param.out_fmt);
	jpeg_hx_set_dec_cbcr_stride(dev->reg_base, dec_param.out_width, dec_param.out_fmt);

	if (dec_param.out_width > 0 && dec_param.out_height > 0) {
		if ((dec_param.out_width * 2 == dec_param.in_width) &&
			(dec_param.out_height * 2 == dec_param.in_height))
			jpeg_hx_set_dec_scaling(dev->reg_base, JPEG_SCALE_2);
		else if ((dec_param.out_width * 4 == dec_param.in_width) &&
			(dec_param.out_height * 4 == dec_param.in_height))
			jpeg_hx_set_dec_scaling(dev->reg_base, JPEG_SCALE_4);
		else
			jpeg_hx_set_dec_scaling(dev->reg_base, JPEG_SCALE_NORMAL);
	}

	jpeg_hx_set_timer(dev->reg_base, 0x10000000);
	jpeg_hx_start(dev->reg_base);

	spin_unlock_irqrestore(&ctx->slock, flags);
}
Пример #5
0
static int mtk_venc_encode_header(void *priv)
{
	struct mtk_vcodec_ctx *ctx = priv;
	int ret;
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct mtk_vcodec_mem bs_buf;
	struct venc_done_result enc_result;

	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (!dst_buf) {
		mtk_v4l2_debug(1, "No dst buffer");
		return -EINVAL;
	}

	bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
	bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;

	mtk_v4l2_debug(1,
			"[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
			ctx->id,
			dst_buf->vb2_buf.index, bs_buf.va,
			(u64)bs_buf.dma_addr,
			bs_buf.size);

	ret = venc_if_encode(ctx,
			VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
			NULL, &bs_buf, &enc_result);

	if (ret) {
		dst_buf->vb2_buf.planes[0].bytesused = 0;
		ctx->state = MTK_STATE_ABORT;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		mtk_v4l2_err("venc_if_encode failed=%d", ret);
		return -EINVAL;
	}
	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf) {
		dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
		dst_buf->timecode = src_buf->timecode;
	} else {
		mtk_v4l2_err("No timestamp for the header buffer.");
	}

	ctx->state = MTK_STATE_HEADER;
	dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
	v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);

	return 0;
}
Пример #6
0
void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
{
	struct rockchip_vpu_dev *vpu = ctx->dev;
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct rockchip_vpu_jpeg_ctx jpeg_ctx;
	u32 reg;

	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);

	memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
	jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	jpeg_ctx.width = ctx->dst_fmt.width;
	jpeg_ctx.height = ctx->dst_fmt.height;
	jpeg_ctx.quality = ctx->jpeg_quality;
	rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);

	/* Switch to JPEG encoder mode before writing registers */
	vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
			   VEPU_REG_ENCODE_START);

	rk3399_vpu_set_src_img_ctrl(vpu, ctx);
	rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
	rk3399_vpu_jpeg_enc_set_qtable(vpu,
				       rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
				       rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));

	reg = VEPU_REG_OUTPUT_SWAP32
		| VEPU_REG_OUTPUT_SWAP16
		| VEPU_REG_OUTPUT_SWAP8
		| VEPU_REG_INPUT_SWAP8
		| VEPU_REG_INPUT_SWAP16
		| VEPU_REG_INPUT_SWAP32;
	/* Make sure that all registers are written at this point. */
	vepu_write(vpu, reg, VEPU_REG_DATA_ENDIAN);

	reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
	vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);

	reg = VEPU_REG_MB_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
		| VEPU_REG_MB_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
		| VEPU_REG_FRAME_TYPE_INTRA
		| VEPU_REG_ENCODE_FORMAT_JPEG
		| VEPU_REG_ENCODE_ENABLE;

	/* Kick the watchdog and start encoding */
	schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
	vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
}
Пример #7
0
static void jpeg_device_enc_run(void *priv)
{
	struct jpeg_ctx *ctx = priv;
	struct jpeg_dev *dev = ctx->dev;
	struct jpeg_enc_param enc_param;
	struct vb2_buffer *vb = NULL;
	unsigned long flags;

	dev = ctx->dev;
	spin_lock_irqsave(&ctx->dev->slock, flags);

	dev->mode = ENCODING;
	enc_param = ctx->param.enc_param;

	jpeg_sw_reset(dev->reg_base);
	jpeg_set_interrupt(dev->reg_base);
	jpeg_set_huf_table_enable(dev->reg_base, 1);
	jpeg_set_enc_tbl(dev->reg_base);
	jpeg_set_encode_tbl_select(dev->reg_base, enc_param.quality);
	jpeg_set_stream_size(dev->reg_base,
		enc_param.in_width, enc_param.in_height);
	jpeg_set_enc_out_fmt(dev->reg_base, enc_param.out_fmt);
	jpeg_set_enc_in_fmt(dev->reg_base, enc_param.in_fmt);
	vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	jpeg_set_stream_buf_address(dev->reg_base, dev->vb2->plane_addr(vb, 0));

	vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (enc_param.in_plane == 1)
		jpeg_set_frame_buf_address(dev->reg_base,
			enc_param.in_fmt, dev->vb2->plane_addr(vb, 0), 0, 0);
	if (enc_param.in_plane == 2)
		jpeg_set_frame_buf_address(dev->reg_base,
			enc_param.in_fmt, dev->vb2->plane_addr(vb, 0),
			dev->vb2->plane_addr(vb, 1), 0);
	if (enc_param.in_plane == 3)
		jpeg_set_frame_buf_address(dev->reg_base,
			enc_param.in_fmt, dev->vb2->plane_addr(vb, 0),
			dev->vb2->plane_addr(vb, 1), dev->vb2->plane_addr(vb, 2));

	jpeg_set_encode_hoff_cnt(dev->reg_base, enc_param.out_fmt);

	jpeg_set_enc_dec_mode(dev->reg_base, ENCODING);

	spin_unlock_irqrestore(&ctx->dev->slock, flags);
}
Пример #8
0
static void jpeg_hx_device_enc_run(void *priv)
{
	struct jpeg_ctx *ctx = priv;
	struct jpeg_dev *dev = ctx->dev;
	struct jpeg_enc_param enc_param;
	struct vb2_buffer *vb = NULL;
	unsigned long flags;

	dev = ctx->dev;
	spin_lock_irqsave(&ctx->slock, flags);

	dev->mode = ENCODING;
	enc_param = ctx->param.enc_param;

	jpeg_hx_sw_reset(dev->reg_base);
	jpeg_hx_set_enc_dec_mode(dev->reg_base, ENCODING);
	jpeg_hx_set_dma_num(dev->reg_base);
	jpeg_hx_clk_on(dev->reg_base);
	jpeg_hx_clk_set(dev->reg_base, 1);
	jpeg_hx_set_interrupt(dev->reg_base);
	jpeg_hx_coef(dev->reg_base, 0);
	jpeg_hx_set_enc_tbl(dev->reg_base, enc_param.quality);
	jpeg_hx_set_encode_tbl_select(dev->reg_base, enc_param.quality);
	jpeg_hx_set_stream_size(dev->reg_base,
		enc_param.in_width, enc_param.in_height);

	vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	jpeg_hx_set_stream_buf_address(dev->reg_base, dev->vb2->plane_addr(vb, 0));
	vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	jpeg_hx_set_frame_buf_address(dev->reg_base,
	enc_param.in_fmt, dev->vb2->plane_addr(vb, 0), enc_param.in_width, enc_param.in_height);

	jpeg_hx_set_enc_out_fmt(dev->reg_base, enc_param.out_fmt);
	jpeg_hx_set_enc_in_fmt(dev->reg_base, enc_param.in_fmt);
	jpeg_hx_set_enc_luma_stride(dev->reg_base, enc_param.in_width, enc_param.in_fmt);
	jpeg_hx_set_enc_cbcr_stride(dev->reg_base, enc_param.in_width, enc_param.in_fmt);
	if (enc_param.in_fmt == RGB_565)
		jpeg_hx_set_y16(dev->reg_base);
	jpeg_hx_set_timer(dev->reg_base, 0x10000000);
	jpeg_hx_start(dev->reg_base);

	spin_unlock_irqrestore(&ctx->slock, flags);
}
Пример #9
0
/*
 * msm_jpegdma_isr_processing_done - Invoked by dma_hw when processing is done.
 * @dma: Pointer dma device.
 */
void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
{
	struct vb2_buffer *src_buf;
	struct vb2_buffer *dst_buf;
	struct jpegdma_ctx *ctx;

	mutex_lock(&dma->lock);
	ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
	if (ctx) {
		mutex_lock(&ctx->lock);
		ctx->plane_idx++;
		if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
			src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
			dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			complete_all(&ctx->completion);
			ctx->plane_idx = 0;

			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
			v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev,
				ctx->m2m_ctx);
		} else {
			dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
			src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
			if (src_buf == NULL || dst_buf == NULL) {
				dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
				mutex_unlock(&ctx->lock);
				mutex_unlock(&dma->lock);
				return;
			}
			msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
		}
		mutex_unlock(&ctx->lock);
	}
	mutex_unlock(&dma->lock);
}
Пример #10
0
static int bdisp_get_bufs(struct bdisp_ctx *ctx)
{
	struct bdisp_frame *src, *dst;
	struct vb2_v4l2_buffer *src_vb, *dst_vb;
	int ret;

	src = &ctx->src;
	dst = &ctx->dst;

	src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
	ret = bdisp_get_addr(ctx, &src_vb->vb2_buf, src, src->paddr);
	if (ret)
		return ret;

	dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
	ret = bdisp_get_addr(ctx, &dst_vb->vb2_buf, dst, dst->paddr);
	if (ret)
		return ret;

	dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;

	return 0;
}
Пример #11
0
static int gsc_get_bufs(struct gsc_ctx *ctx)
{
	struct gsc_frame *s_frame, *d_frame;
	struct vb2_buffer *src_vb = NULL;
	struct vb2_buffer *dst_vb = NULL;
	int ret = 0;

	s_frame = &ctx->s_frame;
	d_frame = &ctx->d_frame;

	src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
	if (ret)
		return ret;

	dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);

	memcpy(&dst_vb->v4l2_buf.timestamp, &src_vb->v4l2_buf.timestamp,
	    sizeof(dst_vb->v4l2_buf.timestamp));

	return ret;
}
Пример #12
0
static int gsc_get_bufs(struct gsc_ctx *ctx)
{
	struct gsc_frame *s_frame, *d_frame;
	struct vb2_v4l2_buffer *src_vb, *dst_vb;
	int ret;

	s_frame = &ctx->s_frame;
	d_frame = &ctx->d_frame;

	src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
	if (ret)
		return ret;

	dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
	if (ret)
		return ret;

	dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;

	return 0;
}
Пример #13
0
/*
 * msm_jpegdma_device_run - Dma device run.
 * @priv: Pointer dma context.
 */
static void msm_jpegdma_device_run(void *priv)
{
	struct vb2_buffer *src_buf;
	struct vb2_buffer *dst_buf;
	struct jpegdma_ctx *ctx = priv;

	dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma device run E\n");

	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf == NULL || dst_buf == NULL) {
		dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
		return;
	}

	if (ctx->pending_config) {
		msm_jpegdma_schedule_next_config(ctx);
		ctx->pending_config = 0;
	}

	msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
	dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma device run X\n");
}
Пример #14
0
static void mtk_vdec_worker(struct work_struct *work)
{
	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
				decode_work);
	struct mtk_vcodec_dev *dev = ctx->dev;
	struct vb2_buffer *src_buf, *dst_buf;
	struct mtk_vcodec_mem buf;
	struct vdec_fb *pfb;
	bool res_chg = false;
	int ret;
	struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
	struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;

	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
		return;
	}

	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	if (dst_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
		return;
	}

	src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
	src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);

	dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
	dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);

	pfb = &dst_buf_info->frame_buffer;
	pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
	pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
	pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;

	pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
	pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
	pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
	pfb->status = 0;
	mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);

	mtk_v4l2_debug(3,
			"id=%d Framebuf  pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
			dst_buf->index, pfb,
			pfb->base_y.va, &pfb->base_y.dma_addr,
			&pfb->base_c.dma_addr, pfb->base_y.size);

	if (src_buf_info->lastframe) {
		mtk_v4l2_debug(1, "Got empty flush input buffer.");
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);

		/* update dst buf status */
		dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
		mutex_lock(&ctx->lock);
		dst_buf_info->used = false;
		mutex_unlock(&ctx->lock);

		vdec_if_decode(ctx, NULL, NULL, &res_chg);
		clean_display_buffer(ctx);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
		dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST;
		v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
		clean_free_buffer(ctx);
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		return;
	}
	buf.va = vb2_plane_vaddr(src_buf, 0);
	buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
	buf.size = (size_t)src_buf->planes[0].bytesused;
	if (!buf.va) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
				ctx->id, src_buf->index);
		return;
	}
	mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
			ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
	dst_buf_info->vb.vb2_buf.timestamp
			= src_buf_info->vb.vb2_buf.timestamp;
	dst_buf_info->vb.timecode
			= src_buf_info->vb.timecode;
	mutex_lock(&ctx->lock);
	dst_buf_info->used = true;
	mutex_unlock(&ctx->lock);
	src_buf_info->used = true;

	ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);

	if (ret) {
		mtk_v4l2_err(
			" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
			ctx->id,
			src_buf->index,
			buf.size,
			src_buf_info->vb.vb2_buf.timestamp,
			dst_buf->index,
			ret, res_chg);
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
	} else if (res_chg == false) {
		/*
		 * we only return src buffer with VB2_BUF_STATE_DONE
		 * when decode success without resolution change
		 */
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
	}

	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	clean_display_buffer(ctx);
	clean_free_buffer(ctx);

	if (!ret && res_chg) {
		mtk_vdec_pic_info_update(ctx);
		/*
		 * On encountering a resolution change in the stream.
		 * The driver must first process and decode all
		 * remaining buffers from before the resolution change
		 * point, so call flush decode here
		 */
		mtk_vdec_flush_decoder(ctx);
		/*
		 * After all buffers containing decoded frames from
		 * before the resolution change point ready to be
		 * dequeued on the CAPTURE queue, the driver sends a
		 * V4L2_EVENT_SOURCE_CHANGE event for source change
		 * type V4L2_EVENT_SRC_CH_RESOLUTION
		 */
		mtk_vdec_queue_res_chg_event(ctx);
	}
	v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
}
Пример #15
0
static void jpeg_device_dec_run(void *priv)
{
	struct jpeg_ctx *ctx = priv;
	struct jpeg_dev *dev = ctx->dev;
	struct jpeg_dec_param dec_param;
	struct vb2_buffer *vb = NULL;
	unsigned long flags;

	dev = ctx->dev;

	spin_lock_irqsave(&ctx->dev->slock, flags);

	printk(KERN_DEBUG "dec_run.\n");

	if (timer_pending(&ctx->dev->watchdog_timer) == 0) {
		ctx->dev->watchdog_timer.expires = jiffies +
					msecs_to_jiffies(JPEG_WATCHDOG_INTERVAL);
		add_timer(&ctx->dev->watchdog_timer);
	}

	set_bit(0, &ctx->dev->hw_run);

	dev->mode = DECODING;
	dec_param = ctx->param.dec_param;

	jpeg_sw_reset(dev->reg_base);
	jpeg_set_interrupt(dev->reg_base);

	jpeg_set_encode_tbl_select(dev->reg_base, 0);

	vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	jpeg_set_stream_buf_address(dev->reg_base, dev->vb2->plane_addr(vb, 0));

	vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	if (dec_param.out_plane == 1)
		jpeg_set_frame_buf_address(dev->reg_base,
			dec_param.out_fmt, dev->vb2->plane_addr(vb, 0), 0, 0);
	else if (dec_param.out_plane == 2) {
		jpeg_set_frame_buf_address(dev->reg_base,
		dec_param.out_fmt, dev->vb2->plane_addr(vb, 0), dev->vb2->plane_addr(vb, 1), 0);
	} else if (dec_param.out_plane == 3)
		jpeg_set_frame_buf_address(dev->reg_base,
			dec_param.out_fmt, dev->vb2->plane_addr(vb, 0),
			dev->vb2->plane_addr(vb, 1), dev->vb2->plane_addr(vb, 2));

	if (dec_param.out_width > 0 && dec_param.out_height > 0) {
		if ((dec_param.out_width * 2 == dec_param.in_width) &&
			(dec_param.out_height * 2 == dec_param.in_height))
			jpeg_set_dec_scaling(dev->reg_base, JPEG_SCALE_2, JPEG_SCALE_2);
		else if ((dec_param.out_width * 4 == dec_param.in_width) &&
			(dec_param.out_height * 4 == dec_param.in_height))
			jpeg_set_dec_scaling(dev->reg_base, JPEG_SCALE_4, JPEG_SCALE_4);
		else
			jpeg_set_dec_scaling(dev->reg_base, JPEG_SCALE_NORMAL, JPEG_SCALE_NORMAL);
	}

	jpeg_set_dec_out_fmt(dev->reg_base, dec_param.out_fmt);
	jpeg_set_dec_bitstream_size(dev->reg_base, dec_param.size);
	jpeg_set_enc_dec_mode(dev->reg_base, DECODING);

	spin_unlock_irqrestore(&ctx->dev->slock, flags);
}
Пример #16
0
static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
{
	struct venc_enc_param enc_prm;
	struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	struct mtk_video_enc_buf *mtk_buf =
			container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);

	int ret = 0;

	memset(&enc_prm, 0, sizeof(enc_prm));
	if (mtk_buf->param_change == MTK_ENCODE_PARAM_NONE)
		return 0;

	if (mtk_buf->param_change & MTK_ENCODE_PARAM_BITRATE) {
		enc_prm.bitrate = mtk_buf->enc_params.bitrate;
		mtk_v4l2_debug(1, "[%d] id=%d, change param br=%d",
				ctx->id,
				mtk_buf->vb.vb2_buf.index,
				enc_prm.bitrate);
		ret |= venc_if_set_param(ctx,
					 VENC_SET_PARAM_ADJUST_BITRATE,
					 &enc_prm);
	}
	if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FRAMERATE) {
		enc_prm.frm_rate = mtk_buf->enc_params.framerate_num /
				   mtk_buf->enc_params.framerate_denom;
		mtk_v4l2_debug(1, "[%d] id=%d, change param fr=%d",
			       ctx->id,
			       mtk_buf->vb.vb2_buf.index,
			       enc_prm.frm_rate);
		ret |= venc_if_set_param(ctx,
					 VENC_SET_PARAM_ADJUST_FRAMERATE,
					 &enc_prm);
	}
	if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_GOP_SIZE) {
		enc_prm.gop_size = mtk_buf->enc_params.gop_size;
		mtk_v4l2_debug(1, "change param intra period=%d",
			       enc_prm.gop_size);
		ret |= venc_if_set_param(ctx,
					 VENC_SET_PARAM_GOP_SIZE,
					 &enc_prm);
	}
	if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FORCE_INTRA) {
		mtk_v4l2_debug(1, "[%d] id=%d, change param force I=%d",
				ctx->id,
				mtk_buf->vb.vb2_buf.index,
				mtk_buf->enc_params.force_intra);
		if (mtk_buf->enc_params.force_intra)
			ret |= venc_if_set_param(ctx,
						 VENC_SET_PARAM_FORCE_INTRA,
						 NULL);
	}

	mtk_buf->param_change = MTK_ENCODE_PARAM_NONE;

	if (ret) {
		ctx->state = MTK_STATE_ABORT;
		mtk_v4l2_err("venc_if_set_param %d failed=%d",
				mtk_buf->param_change, ret);
		return -1;
	}

	return 0;
}
Пример #17
0
static void fimc_device_run(void *priv)
{
    struct vb2_buffer *src_vb, *dst_vb;
    struct fimc_ctx *ctx = priv;
    struct fimc_frame *sf, *df;
    struct fimc_dev *fimc;
    unsigned long flags;
    int ret;

    if (WARN(!ctx, "Null context\n"))
        return;

    fimc = ctx->fimc_dev;
    spin_lock_irqsave(&fimc->slock, flags);

    set_bit(ST_M2M_PEND, &fimc->state);
    sf = &ctx->s_frame;
    df = &ctx->d_frame;

    if (ctx->state & FIMC_PARAMS) {
        /* Prepare the DMA offsets for scaler */
        fimc_prepare_dma_offset(ctx, sf);
        fimc_prepare_dma_offset(ctx, df);
    }

    src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
    ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr);
    if (ret)
        goto dma_unlock;

    dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
    ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr);
    if (ret)
        goto dma_unlock;

    dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;

    /* Reconfigure hardware if the context has changed. */
    if (fimc->m2m.ctx != ctx) {
        ctx->state |= FIMC_PARAMS;
        fimc->m2m.ctx = ctx;
    }

    if (ctx->state & FIMC_PARAMS) {
        fimc_set_yuv_order(ctx);
        fimc_hw_set_input_path(ctx);
        fimc_hw_set_in_dma(ctx);
        ret = fimc_set_scaler_info(ctx);
        if (ret)
            goto dma_unlock;
        fimc_hw_set_prescaler(ctx);
        fimc_hw_set_mainscaler(ctx);
        fimc_hw_set_target_format(ctx);
        fimc_hw_set_rotation(ctx);
        fimc_hw_set_effect(ctx);
        fimc_hw_set_out_dma(ctx);
        if (fimc->drv_data->alpha_color)
            fimc_hw_set_rgb_alpha(ctx);
        fimc_hw_set_output_path(ctx);
    }
    fimc_hw_set_input_addr(fimc, &sf->paddr);
    fimc_hw_set_output_addr(fimc, &df->paddr, -1);

    fimc_activate_capture(ctx);
    ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
    fimc_hw_activate_input_dma(fimc, true);

dma_unlock:
    spin_unlock_irqrestore(&fimc->slock, flags);
}