Example #1
0
static int hva_buf_prepare(struct vb2_buffer *vb)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct device *dev = ctx_to_dev(ctx);
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);

	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		struct hva_frame *frame = to_hva_frame(vbuf);

		if (vbuf->field == V4L2_FIELD_ANY)
			vbuf->field = V4L2_FIELD_NONE;
		if (vbuf->field != V4L2_FIELD_NONE) {
			dev_dbg(dev,
				"%s frame[%d] prepare: %d field not supported\n",
				ctx->name, vb->index, vbuf->field);
			return -EINVAL;
		}

		if (!frame->prepared) {
			/* get memory addresses */
			frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
			frame->paddr = vb2_dma_contig_plane_dma_addr(
					&vbuf->vb2_buf, 0);
			frame->info = ctx->frameinfo;
			frame->prepared = true;

			dev_dbg(dev,
				"%s frame[%d] prepared; virt=%p, phy=%pad\n",
				ctx->name, vb->index,
				frame->vaddr, &frame->paddr);
		}
	} else {
		struct hva_stream *stream = to_hva_stream(vbuf);

		if (!stream->prepared) {
			/* get memory addresses */
			stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
			stream->paddr = vb2_dma_contig_plane_dma_addr(
					&vbuf->vb2_buf, 0);
			stream->size = vb2_plane_size(&vbuf->vb2_buf, 0);
			stream->prepared = true;

			dev_dbg(dev,
				"%s stream[%d] prepared; virt=%p, phy=%pad\n",
				ctx->name, vb->index,
				stream->vaddr, &stream->paddr);
		}
	}

	return 0;
}
Example #2
0
static int uvc_buffer_prepare(struct vb2_buffer *vb)
{
	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);

	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
	    vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
		return -EINVAL;
	}

	if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
		return -ENODEV;

	buf->state = UVC_BUF_STATE_QUEUED;
	buf->mem = vb2_plane_vaddr(vb, 0);
	buf->length = vb2_plane_size(vb, 0);
	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
		buf->bytesused = 0;
	else
		buf->bytesused = vb2_get_plane_payload(vb, 0);

	return 0;
}
Example #3
0
static void buffer_queue(struct vb2_buffer *vb)
{
	unsigned long flags;
	struct smi2021 *smi2021 = vb2_get_drv_priv(vb->vb2_queue);
	struct smi2021_buf *buf = container_of(vb, struct smi2021_buf, vb);

	if (smi2021->udev == NULL) {
		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
		return;
	}

	buf->mem = vb2_plane_vaddr(vb, 0);
	buf->length = vb2_plane_size(vb, 0);

	buf->pos = 0;
	buf->trc_av = 0;
	buf->in_blank = true;
	buf->second_field = false;

	spin_lock_irqsave(&smi2021->buf_lock, flags);
	if (buf->length < smi2021->cur_height * SMI2021_BYTES_PER_LINE)
		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
	else
		list_add_tail(&buf->list, &smi2021->bufs);
	spin_unlock_irqrestore(&smi2021->buf_lock, flags);
}
static void unicam_videobuf_queue(struct vb2_buffer *vb)
{
	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
	struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
	struct unicam_camera_dev *unicam_dev = ici->priv;
	struct unicam_camera_buffer *buf = to_unicam_camera_vb(vb);
	unsigned long flags;

	dprintk("-enter");
	dprintk("vb=0x%p vbuf=0x%p pbuf=0x%p size=%lu", vb,
		vb2_plane_vaddr(vb, 0), (void *)vb2_dma_contig_plane_dma_addr(vb,
									   0),
		vb2_get_plane_payload(vb, 0));

	spin_lock_irqsave(&unicam_dev->lock, flags);
	list_add_tail(&buf->queue, &unicam_dev->capture);

	if (!unicam_dev->active) {
		unicam_dev->active = vb;
		/* use this buffer to trigger capture */
		/* Configure HW only is streamon has been done
		 * else only update active, HW would be configured
		 * by streamon  */
		if(unicam_dev->streaming){
			unicam_camera_update_buf(unicam_dev);
			if (unicam_dev->if_params.if_mode ==
				V4L2_SUBDEV_SENSOR_MODE_SERIAL_CSI2)
				unicam_camera_capture(unicam_dev);
		}
	}
	spin_unlock_irqrestore(&unicam_dev->lock, flags);
	dprintk("-exit");
}
static int unicam_videobuf_prepare(struct vb2_buffer *vb)
{
	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
	int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
						     icd->
						     current_fmt->host_fmt);
	unsigned long size;

	dprintk("-enter");
	if (bytes_per_line < 0)
		return bytes_per_line;

	dprintk("vb=0x%p vbuf=0x%p pbuf=0x%p, size=%lu", vb,
		vb2_plane_vaddr(vb, 0), (void *)vb2_dma_contig_plane_dma_addr(vb,
									   0),
		vb2_get_plane_payload(vb, 0));

	size = icd->user_height * bytes_per_line;

	if (vb2_plane_size(vb, 0) < size) {
		dev_err(icd->dev.parent, "Buffer too small (%lu < %lu)\n",
			vb2_plane_size(vb, 0), size);
		return -ENOBUFS;
	}
	vb2_set_plane_payload(vb, 0, size);

	dprintk("-exit");
	return 0;
}
Example #6
0
static u32 get_frame_type_flag(struct go7007_buffer *vb, int format)
{
	u8 *ptr = vb2_plane_vaddr(&vb->vb, 0);

	switch (format) {
	case V4L2_PIX_FMT_MJPEG:
		return V4L2_BUF_FLAG_KEYFRAME;
	case V4L2_PIX_FMT_MPEG4:
		switch ((ptr[vb->frame_offset + 4] >> 6) & 0x3) {
		case 0:
			return V4L2_BUF_FLAG_KEYFRAME;
		case 1:
			return V4L2_BUF_FLAG_PFRAME;
		case 2:
			return V4L2_BUF_FLAG_BFRAME;
		default:
			return 0;
		}
	case V4L2_PIX_FMT_MPEG1:
	case V4L2_PIX_FMT_MPEG2:
		switch ((ptr[vb->frame_offset + 5] >> 3) & 0x7) {
		case 1:
			return V4L2_BUF_FLAG_KEYFRAME;
		case 2:
			return V4L2_BUF_FLAG_PFRAME;
		case 3:
			return V4L2_BUF_FLAG_BFRAME;
		default:
			return 0;
		}
	}

	return 0;
}
Example #7
0
static void buffer_queue(struct vb2_buffer *vb)
{
	unsigned long flags;
	struct stk1160 *dev = vb2_get_drv_priv(vb->vb2_queue);
	struct stk1160_buffer *buf =
		container_of(vb, struct stk1160_buffer, vb);

	spin_lock_irqsave(&dev->buf_lock, flags);
	if (!dev->udev) {
		/*
		 * If the device is disconnected return the buffer to userspace
		 * directly. The next QBUF call will fail with -ENODEV.
		 */
		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
	} else {

		buf->mem = vb2_plane_vaddr(vb, 0);
		buf->length = vb2_plane_size(vb, 0);
		buf->bytesused = 0;
		buf->pos = 0;

		/*
		 * If buffer length is less from expected then we return
		 * the buffer to userspace directly.
		 */
		if (buf->length < dev->width * dev->height * 2)
			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
		else
			list_add_tail(&buf->list, &dev->avail_bufs);

	}
	spin_unlock_irqrestore(&dev->buf_lock, flags);
}
Example #8
0
/*
 * Draw the image into the overlay buffer.
 * Note that the combination of overlay and multiplanar is not supported.
 */
static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
{
	struct tpg_data *tpg = &dev->tpg;
	unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2;
	void *vbase = dev->fb_vbase_cap;
	void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
	unsigned img_width = dev->compose_cap.width;
	unsigned img_height = dev->compose_cap.height;
	unsigned stride = tpg->bytesperline[0];
	/* if quick is true, then valid_pix() doesn't have to be called */
	bool quick = dev->bitmap_cap == NULL && dev->clipcount_cap == 0;
	int x, y, w, out_x = 0;

	if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
	     dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
	    dev->overlay_cap_field != buf->vb.v4l2_buf.field)
		return;

	vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride;
	x = dev->overlay_cap_left;
	w = img_width;
	if (x < 0) {
		out_x = -x;
		w = w - out_x;
		x = 0;
	} else {
		w = dev->fb_cap.fmt.width - x;
		if (w > img_width)
			w = img_width;
	}
	if (w <= 0)
		return;
	if (dev->overlay_cap_top >= 0)
		vbase += dev->overlay_cap_top * dev->fb_cap.fmt.bytesperline;
	for (y = dev->overlay_cap_top;
	     y < dev->overlay_cap_top + (int)img_height;
	     y++, vbuf += stride) {
		int px;

		if (y < 0 || y > dev->fb_cap.fmt.height)
			continue;
		if (quick) {
			memcpy(vbase + x * pixsize,
			       vbuf + out_x * pixsize, w * pixsize);
			vbase += dev->fb_cap.fmt.bytesperline;
			continue;
		}
		for (px = 0; px < w; px++) {
			if (!valid_pix(dev, y - dev->overlay_cap_top,
				       px + out_x, y, px + x))
				continue;
			memcpy(vbase + (px + x) * pixsize,
			       vbuf + (px + out_x) * pixsize,
			       pixsize);
		}
		vbase += dev->fb_cap.fmt.bytesperline;
	}
}
Example #9
0
/* Called for each 256-byte image chunk.
 * First word identifies the chunk, followed by 240 words of image
 * data and padding. */
static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
{
	int frame_id, odd, chunk_no;
	u32 *frame;
	struct usbtv_buf *buf;
	unsigned long flags;

	/* Ignore corrupted lines. */
	if (!USBTV_MAGIC_OK(chunk))
		return;
	frame_id = USBTV_FRAME_ID(chunk);
	odd = USBTV_ODD(chunk);
	chunk_no = USBTV_CHUNK_NO(chunk);
	if (chunk_no >= usbtv->n_chunks)
		return;

	/* Beginning of a frame. */
	if (chunk_no == 0) {
		usbtv->frame_id = frame_id;
		usbtv->chunks_done = 0;
	}

	if (usbtv->frame_id != frame_id)
		return;

	spin_lock_irqsave(&usbtv->buflock, flags);
	if (list_empty(&usbtv->bufs)) {
		/* No free buffers. Userspace likely too slow. */
		spin_unlock_irqrestore(&usbtv->buflock, flags);
		return;
	}

	/* First available buffer. */
	buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
	frame = vb2_plane_vaddr(&buf->vb, 0);

	/* Copy the chunk data. */
	usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
	usbtv->chunks_done++;

	/* Last chunk in a frame, signalling an end */
	if (odd && chunk_no == usbtv->n_chunks-1) {
		int size = vb2_plane_size(&buf->vb, 0);
		enum vb2_buffer_state state = usbtv->chunks_done ==
						usbtv->n_chunks ?
						VB2_BUF_STATE_DONE :
						VB2_BUF_STATE_ERROR;

		buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
		buf->vb.v4l2_buf.sequence = usbtv->sequence++;
		v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
		vb2_set_plane_payload(&buf->vb, 0, size);
		vb2_buffer_done(&buf->vb, state);
		list_del(&buf->list);
	}

	spin_unlock_irqrestore(&usbtv->buflock, flags);
}
Example #10
0
static int alsa_pb_fnc(struct vb2_buffer *vb, void *priv)
{
	struct cobalt_stream *s = priv;

	if (s->alsa->alsa_pb_channel)
		cobalt_alsa_pb_pcm_data(s->alsa,
				vb2_plane_vaddr(vb, 0),
				8 * 4,
				vb2_get_plane_payload(vb, 0) / (8 * 4));
	return 0;
}
Example #11
0
static int alsa_fnc(struct vb2_buffer *vb, void *priv)
{
	struct cobalt_stream *s = priv;
	unsigned char *p = vb2_plane_vaddr(vb, 0);
	int i;

	if (pcm_debug) {
		pr_info("alsa: ");
		for (i = 0; i < 8 * 4; i++) {
			if (!(i & 3))
				pr_cont(" ");
			pr_cont("%02x", p[i]);
		}
		pr_cont("\n");
	}
	cobalt_alsa_announce_pcm_data(s->alsa,
			vb2_plane_vaddr(vb, 0),
			8 * 4,
			vb2_get_plane_payload(vb, 0) / (8 * 4));
	return 0;
}
Example #12
0
static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu,
				    struct rockchip_vpu_ctx *ctx,
				    unsigned int bytesused,
				    enum vb2_buffer_state result)
{
	struct vb2_v4l2_buffer *src, *dst;
	size_t avail_size;

	pm_runtime_mark_last_busy(vpu->dev);
	pm_runtime_put_autosuspend(vpu->dev);
	clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);

	src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
	dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);

	if (WARN_ON(!src))
		return;
	if (WARN_ON(!dst))
		return;

	src->sequence = ctx->sequence_out++;
	dst->sequence = ctx->sequence_cap++;

	dst->field = src->field;
	if (src->flags & V4L2_BUF_FLAG_TIMECODE)
		dst->timecode = src->timecode;
	dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
	dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
			V4L2_BUF_FLAG_TIMECODE);
	dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
				    V4L2_BUF_FLAG_TIMECODE);

	avail_size = vb2_plane_size(&dst->vb2_buf, 0) -
		     ctx->vpu_dst_fmt->header_size;
	if (bytesused <= avail_size) {
		if (ctx->bounce_buf) {
			memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) +
			       ctx->vpu_dst_fmt->header_size,
			       ctx->bounce_buf, bytesused);
		}
		dst->vb2_buf.planes[0].bytesused =
			ctx->vpu_dst_fmt->header_size + bytesused;
	} else {
		result = VB2_BUF_STATE_ERROR;
	}

	v4l2_m2m_buf_done(src, result);
	v4l2_m2m_buf_done(dst, result);

	v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
}
Example #13
0
static int mtk_venc_encode_header(void *priv)
{
	struct mtk_vcodec_ctx *ctx = priv;
	int ret;
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct mtk_vcodec_mem bs_buf;
	struct venc_done_result enc_result;

	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (!dst_buf) {
		mtk_v4l2_debug(1, "No dst buffer");
		return -EINVAL;
	}

	bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
	bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;

	mtk_v4l2_debug(1,
			"[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
			ctx->id,
			dst_buf->vb2_buf.index, bs_buf.va,
			(u64)bs_buf.dma_addr,
			bs_buf.size);

	ret = venc_if_encode(ctx,
			VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
			NULL, &bs_buf, &enc_result);

	if (ret) {
		dst_buf->vb2_buf.planes[0].bytesused = 0;
		ctx->state = MTK_STATE_ABORT;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		mtk_v4l2_err("venc_if_encode failed=%d", ret);
		return -EINVAL;
	}
	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf) {
		dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
		dst_buf->timecode = src_buf->timecode;
	} else {
		mtk_v4l2_err("No timestamp for the header buffer.");
	}

	ctx->state = MTK_STATE_HEADER;
	dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
	v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);

	return 0;
}
void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
{
	struct rockchip_vpu_dev *vpu = ctx->dev;
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct rockchip_vpu_jpeg_ctx jpeg_ctx;
	u32 reg;

	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);

	memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
	jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	jpeg_ctx.width = ctx->dst_fmt.width;
	jpeg_ctx.height = ctx->dst_fmt.height;
	jpeg_ctx.quality = ctx->jpeg_quality;
	rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);

	/* Switch to JPEG encoder mode before writing registers */
	vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
			   VEPU_REG_ENCODE_START);

	rk3399_vpu_set_src_img_ctrl(vpu, ctx);
	rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
	rk3399_vpu_jpeg_enc_set_qtable(vpu,
				       rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
				       rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));

	reg = VEPU_REG_OUTPUT_SWAP32
		| VEPU_REG_OUTPUT_SWAP16
		| VEPU_REG_OUTPUT_SWAP8
		| VEPU_REG_INPUT_SWAP8
		| VEPU_REG_INPUT_SWAP16
		| VEPU_REG_INPUT_SWAP32;
	/* Make sure that all registers are written at this point. */
	vepu_write(vpu, reg, VEPU_REG_DATA_ENDIAN);

	reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
	vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);

	reg = VEPU_REG_MB_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
		| VEPU_REG_MB_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
		| VEPU_REG_FRAME_TYPE_INTRA
		| VEPU_REG_ENCODE_FORMAT_JPEG
		| VEPU_REG_ENCODE_ENABLE;

	/* Kick the watchdog and start encoding */
	schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
	vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
}
Example #15
0
static void
vbi_buffer_queue(struct vb2_buffer *vb)
{
	struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue);
	struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb);
	struct em28xx_dmaqueue *vbiq = &dev->vbiq;
	unsigned long flags = 0;

	buf->mem = vb2_plane_vaddr(vb, 0);
	buf->length = vb2_plane_size(vb, 0);

	spin_lock_irqsave(&dev->slock, flags);
	list_add_tail(&buf->list, &vbiq->active);
	spin_unlock_irqrestore(&dev->slock, flags);
}
Example #16
0
static int histo_buffer_prepare(struct vb2_buffer *vb)
{
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
	struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);

	if (vb->num_planes != 1)
		return -EINVAL;

	if (vb2_plane_size(vb, 0) < histo->data_size)
		return -EINVAL;

	buf->addr = vb2_plane_vaddr(vb, 0);

	return 0;
}
Example #17
0
static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
{
	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
	struct rcar_vin_priv *priv = ici->priv;
	unsigned long size;

	size = icd->sizeimage;

	if (vb2_plane_size(vb, 0) < size) {
		dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
			vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
		goto error;
	}

	vb2_set_plane_payload(vb, 0, size);

	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
		vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));

	spin_lock_irq(&priv->lock);

	list_add_tail(to_buf_list(vb), &priv->capture);
	rcar_vin_fill_hw_slot(priv);

	/* If we weren't running, and have enough buffers, start capturing! */
	if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
		if (rcar_vin_setup(priv)) {
			/* Submit error */
			list_del_init(to_buf_list(vb));
			spin_unlock_irq(&priv->lock);
			goto error;
		}
		priv->request_to_stop = false;
		init_completion(&priv->capture_stop);
		priv->state = RUNNING;
		rcar_vin_capture(priv);
	}

	spin_unlock_irq(&priv->lock);

	return;

error:
	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
Example #18
0
int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb)
{
	const u8 *buf = vb2_plane_vaddr(vb, 0);
	const u8 *end = buf + vb2_get_plane_payload(vb, 0);

	/* Find SPS header */
	do {
		buf = coda_find_nal_header(buf, end);
		if (!buf)
			return -EINVAL;
	} while ((*buf++ & 0x1f) != 0x7);

	ctx->params.h264_profile_idc = buf[0];
	ctx->params.h264_level_idc = buf[2];

	return 0;
}
Example #19
0
/*
 * The hardware takes care only of ext hdr and dct partition. The software
 * must take care of frame header.
 *
 * Buffer layout as received from hardware:
 *   |<--gap-->|<--ext hdr-->|<-gap->|<---dct part---
 *   |<-------dct part offset------->|
 *
 * Required buffer layout:
 *   |<--hdr-->|<--ext hdr-->|<---dct part---
 */
void rk3288_vpu_vp8e_assemble_bitstream(struct rk3288_vpu_ctx *ctx,
					struct rk3288_vpu_buf *dst_buf)
{
	size_t ext_hdr_size = dst_buf->vp8e.ext_hdr_size;
	size_t dct_size = dst_buf->vp8e.dct_size;
	size_t hdr_size = dst_buf->vp8e.hdr_size;
	size_t dst_size;
	size_t tag_size;
	void *dst;
	u32 *tag;

	dst_size = vb2_plane_size(&dst_buf->b, 0);
	dst = vb2_plane_vaddr(&dst_buf->b, 0);
	tag = dst; /* To access frame tag words. */

	if (WARN_ON(hdr_size + ext_hdr_size + dct_size > dst_size))
		return;
	if (WARN_ON(dst_buf->vp8e.dct_offset + dct_size > dst_size))
		return;

	vpu_debug(1, "%s: hdr_size = %u, ext_hdr_size = %u, dct_size = %u\n",
			__func__, hdr_size, ext_hdr_size, dct_size);

	memmove(dst + hdr_size + ext_hdr_size,
		dst + dst_buf->vp8e.dct_offset, dct_size);
	memcpy(dst, dst_buf->vp8e.header, hdr_size);

	/* Patch frame tag at first 32-bit word of the frame. */
	if (dst_buf->b.v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
		tag_size = VP8_KEY_FRAME_HDR_SIZE;
		tag[0] &= ~VP8_FRAME_TAG_KEY_FRAME_BIT;
	} else {
		tag_size = VP8_INTER_FRAME_HDR_SIZE;
		tag[0] |= VP8_FRAME_TAG_KEY_FRAME_BIT;
	}

	tag[0] &= ~VP8_FRAME_TAG_LENGTH_MASK;
	tag[0] |= (hdr_size + ext_hdr_size - tag_size)
						<< VP8_FRAME_TAG_LENGTH_SHIFT;

	vb2_set_plane_payload(&dst_buf->b, 0,
				hdr_size + ext_hdr_size + dct_size);
}
Example #20
0
static void solo_fillbuf(struct solo_dev *solo_dev,
			 struct vb2_buffer *vb)
{
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	dma_addr_t addr;
	unsigned int fdma_addr;
	int error = -1;
	int i;

	addr = vb2_dma_contig_plane_dma_addr(vb, 0);
	if (!addr)
		goto finish_buf;

	if (erase_off(solo_dev)) {
		void *p = vb2_plane_vaddr(vb, 0);
		int image_size = solo_image_size(solo_dev);

		for (i = 0; i < image_size; i += 2) {
			((u8 *)p)[i] = 0x80;
			((u8 *)p)[i + 1] = 0x00;
		}
		error = 0;
	} else {
		fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write *
				(SOLO_HW_BPL * solo_vlines(solo_dev)));

		error = solo_p2m_dma_t(solo_dev, 0, addr, fdma_addr,
				       solo_bytesperline(solo_dev),
				       solo_vlines(solo_dev), SOLO_HW_BPL);
	}

finish_buf:
	if (!error) {
		vb2_set_plane_payload(vb, 0,
			solo_vlines(solo_dev) * solo_bytesperline(solo_dev));
		vbuf->sequence = solo_dev->sequence++;
		vb->timestamp = ktime_get_ns();
	}

	vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
Example #21
0
static void buffer_queue(struct vb2_buffer *vb)
{
	unsigned long flags;
	struct smi2021 *smi2021 = vb2_get_drv_priv(vb->vb2_queue);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
	struct smi2021_buf *buf = container_of(vb, struct smi2021_buf, vb);
#else
	struct smi2021_buf *buf = container_of(vb, struct smi2021_buf, vb.vb2_buf);
#endif
	spin_lock_irqsave(&smi2021->buf_lock, flags);
	if (!smi2021->udev) {
		/*
		 * If the device is disconnected return the buffer to userspace
		 * directly. The next QBUF call will fail with -ENODEV.
		 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
#else
		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
#endif
	} else {
		buf->mem = vb2_plane_vaddr(vb, 0);
		buf->length = vb2_plane_size(vb, 0);
		buf->pos = 0;

		buf->in_blank = true;
		buf->odd = false;

		/*
		 * If the buffer length is less than expected,
		 * we return the buffer back to userspace
		 */
		if (buf->length < SMI2021_BYTES_PER_LINE * smi2021->cur_height)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
			vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
#else
			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
#endif
		else
			list_add_tail(&buf->list, &smi2021->avail_bufs);
	}
static void unicam_videobuf_release(struct vb2_buffer *vb)
{
	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
	struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
	struct unicam_camera_dev *unicam_dev = ici->priv;
	struct unicam_camera_buffer *buf = to_unicam_camera_vb(vb);
	unsigned long flags;

	dprintk("-enter");

	dprintk("vb=0x%p vbuf=0x%p pbuf=0x%p size=%lu", vb,
		vb2_plane_vaddr(vb, 0), (void *)vb2_dma_contig_plane_dma_addr(vb,
									   0),
		vb2_get_plane_payload(vb, 0));
	spin_lock_irqsave(&unicam_dev->lock, flags);

	if (buf->magic == UNICAM_BUF_MAGIC)
		list_del_init(&buf->queue);
	spin_unlock_irqrestore(&unicam_dev->lock, flags);

	dprintk("-exit");
}
Example #23
0
static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
{
	unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
	unsigned line_height = 16 / factor;
	bool is_tv = vivid_is_sdtv_cap(dev);
	bool is_60hz = is_tv && (dev->std_cap & V4L2_STD_525_60);
	unsigned p;
	int line = 1;
	u8 *basep[TPG_MAX_PLANES][2];
	unsigned ms;
	char str[100];
	s32 gain;
	bool is_loop = false;

	if (dev->loop_video && dev->can_loop_video &&
	    ((vivid_is_svid_cap(dev) && !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
	     (vivid_is_hdmi_cap(dev) && !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
		is_loop = true;

	buf->vb.v4l2_buf.sequence = dev->vid_cap_seq_count;
	/*
	 * Take the timestamp now if the timestamp source is set to
	 * "Start of Exposure".
	 */
	if (dev->tstamp_src_is_soe)
		v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
	if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
		/*
		 * 60 Hz standards start with the bottom field, 50 Hz standards
		 * with the top field. So if the 0-based seq_count is even,
		 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
		 * standards.
		 */
		buf->vb.v4l2_buf.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
			V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
		/*
		 * The sequence counter counts frames, not fields. So divide
		 * by two.
		 */
		buf->vb.v4l2_buf.sequence /= 2;
	} else {
		buf->vb.v4l2_buf.field = dev->field_cap;
	}
	tpg_s_field(&dev->tpg, buf->vb.v4l2_buf.field);
	tpg_s_perc_fill_blank(&dev->tpg, dev->must_blank[buf->vb.v4l2_buf.index]);

	vivid_precalc_copy_rects(dev);

	for (p = 0; p < tpg_g_planes(&dev->tpg); p++) {
		void *vbuf = vb2_plane_vaddr(&buf->vb, p);

		/*
		 * The first plane of a multiplanar format has a non-zero
		 * data_offset. This helps testing whether the application
		 * correctly supports non-zero data offsets.
		 */
		if (dev->fmt_cap->data_offset[p]) {
			memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
			       dev->fmt_cap->data_offset[p]);
			vbuf += dev->fmt_cap->data_offset[p];
		}
		tpg_calc_text_basep(&dev->tpg, basep, p, vbuf);
		if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
			tpg_fillbuffer(&dev->tpg, vivid_get_std_cap(dev), p, vbuf);
	}
	dev->must_blank[buf->vb.v4l2_buf.index] = false;

	/* Updates stream time, only update at the start of a new frame. */
	if (dev->field_cap != V4L2_FIELD_ALTERNATE || (buf->vb.v4l2_buf.sequence & 1) == 0)
		dev->ms_vid_cap = jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);

	ms = dev->ms_vid_cap;
	if (dev->osd_mode <= 1) {
		snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d %u%s",
				(ms / (60 * 60 * 1000)) % 24,
				(ms / (60 * 1000)) % 60,
				(ms / 1000) % 60,
				ms % 1000,
				buf->vb.v4l2_buf.sequence,
				(dev->field_cap == V4L2_FIELD_ALTERNATE) ?
					(buf->vb.v4l2_buf.field == V4L2_FIELD_TOP ?
					 " top" : " bottom") : "");
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
	}
	if (dev->osd_mode == 0) {
		snprintf(str, sizeof(str), " %dx%d, input %d ",
				dev->src_rect.width, dev->src_rect.height, dev->input);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);

		gain = v4l2_ctrl_g_ctrl(dev->gain);
		mutex_lock(dev->ctrl_hdl_user_vid.lock);
		snprintf(str, sizeof(str),
			" brightness %3d, contrast %3d, saturation %3d, hue %d ",
			dev->brightness->cur.val,
			dev->contrast->cur.val,
			dev->saturation->cur.val,
			dev->hue->cur.val);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		snprintf(str, sizeof(str),
			" autogain %d, gain %3d, alpha 0x%02x ",
			dev->autogain->cur.val, gain, dev->alpha->cur.val);
		mutex_unlock(dev->ctrl_hdl_user_vid.lock);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		mutex_lock(dev->ctrl_hdl_user_aud.lock);
		snprintf(str, sizeof(str),
			" volume %3d, mute %d ",
			dev->volume->cur.val, dev->mute->cur.val);
		mutex_unlock(dev->ctrl_hdl_user_aud.lock);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		mutex_lock(dev->ctrl_hdl_user_gen.lock);
		snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
			dev->int32->cur.val,
			*dev->int64->p_cur.p_s64,
			dev->bitmask->cur.val);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
			dev->boolean->cur.val,
			dev->menu->qmenu[dev->menu->cur.val],
			dev->string->p_cur.p_char);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
			dev->int_menu->qmenu_int[dev->int_menu->cur.val],
			dev->int_menu->cur.val);
		mutex_unlock(dev->ctrl_hdl_user_gen.lock);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		if (dev->button_pressed) {
			dev->button_pressed--;
			snprintf(str, sizeof(str), " button pressed!");
			tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		}
	}

	/*
	 * If "End of Frame" is specified at the timestamp source, then take
	 * the timestamp now.
	 */
	if (!dev->tstamp_src_is_soe)
		v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
	buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
}
Example #24
0
static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
		struct vivid_buffer *vid_cap_buf)
{
	bool blank = dev->must_blank[vid_cap_buf->vb.v4l2_buf.index];
	struct tpg_data *tpg = &dev->tpg;
	struct vivid_buffer *vid_out_buf = NULL;
	unsigned pixsize = tpg_g_twopixelsize(tpg, p) / 2;
	unsigned img_width = dev->compose_cap.width;
	unsigned img_height = dev->compose_cap.height;
	unsigned stride_cap = tpg->bytesperline[p];
	unsigned stride_out = dev->bytesperline_out[p];
	unsigned stride_osd = dev->display_byte_stride;
	unsigned hmax = (img_height * tpg->perc_fill) / 100;
	u8 *voutbuf;
	u8 *vosdbuf = NULL;
	unsigned y;
	bool blend = dev->bitmap_out || dev->clipcount_out || dev->fbuf_out_flags;
	/* Coarse scaling with Bresenham */
	unsigned vid_out_int_part;
	unsigned vid_out_fract_part;
	unsigned vid_out_y = 0;
	unsigned vid_out_error = 0;
	unsigned vid_overlay_int_part = 0;
	unsigned vid_overlay_fract_part = 0;
	unsigned vid_overlay_y = 0;
	unsigned vid_overlay_error = 0;
	unsigned vid_cap_right;
	bool quick;

	vid_out_int_part = dev->loop_vid_out.height / dev->loop_vid_cap.height;
	vid_out_fract_part = dev->loop_vid_out.height % dev->loop_vid_cap.height;

	if (!list_empty(&dev->vid_out_active))
		vid_out_buf = list_entry(dev->vid_out_active.next,
					 struct vivid_buffer, list);
	if (vid_out_buf == NULL)
		return -ENODATA;

	vid_cap_buf->vb.v4l2_buf.field = vid_out_buf->vb.v4l2_buf.field;

	voutbuf = vb2_plane_vaddr(&vid_out_buf->vb, p) +
				  vid_out_buf->vb.v4l2_planes[p].data_offset;
	voutbuf += dev->loop_vid_out.left * pixsize + dev->loop_vid_out.top * stride_out;
	vcapbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride_cap;

	if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) {
		/*
		 * If there is nothing to copy, then just fill the capture window
		 * with black.
		 */
		for (y = 0; y < hmax; y++, vcapbuf += stride_cap)
			memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
		return 0;
	}

	if (dev->overlay_out_enabled &&
	    dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) {
		vosdbuf = dev->video_vbase;
		vosdbuf += dev->loop_fb_copy.left * pixsize +
			   dev->loop_fb_copy.top * stride_osd;
		vid_overlay_int_part = dev->loop_vid_overlay.height /
				       dev->loop_vid_overlay_cap.height;
		vid_overlay_fract_part = dev->loop_vid_overlay.height %
					 dev->loop_vid_overlay_cap.height;
	}

	vid_cap_right = dev->loop_vid_cap.left + dev->loop_vid_cap.width;
	/* quick is true if no video scaling is needed */
	quick = dev->loop_vid_out.width == dev->loop_vid_cap.width;

	dev->cur_scaled_line = dev->loop_vid_out.height;
	for (y = 0; y < hmax; y++, vcapbuf += stride_cap) {
		/* osdline is true if this line requires overlay blending */
		bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top &&
			  y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height;

		/*
		 * If this line of the capture buffer doesn't get any video, then
		 * just fill with black.
		 */
		if (y < dev->loop_vid_cap.top ||
		    y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) {
			memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
			continue;
		}

		/* fill the left border with black */
		if (dev->loop_vid_cap.left)
			memcpy(vcapbuf, tpg->black_line[p], dev->loop_vid_cap.left * pixsize);

		/* fill the right border with black */
		if (vid_cap_right < img_width)
			memcpy(vcapbuf + vid_cap_right * pixsize,
				tpg->black_line[p], (img_width - vid_cap_right) * pixsize);

		if (quick && !osdline) {
			memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
			       voutbuf + vid_out_y * stride_out,
			       dev->loop_vid_cap.width * pixsize);
			goto update_vid_out_y;
		}
		if (dev->cur_scaled_line == vid_out_y) {
			memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
			       dev->scaled_line,
			       dev->loop_vid_cap.width * pixsize);
			goto update_vid_out_y;
		}
		if (!osdline) {
			scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line,
				dev->loop_vid_out.width, dev->loop_vid_cap.width,
				tpg_g_twopixelsize(tpg, p));
		} else {
			/*
			 * Offset in bytes within loop_vid_copy to the start of the
			 * loop_vid_overlay rectangle.
			 */
			unsigned offset =
				(dev->loop_vid_overlay.left - dev->loop_vid_copy.left) * pixsize;
			u8 *osd = vosdbuf + vid_overlay_y * stride_osd;

			scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line,
				dev->loop_vid_out.width, dev->loop_vid_copy.width,
				tpg_g_twopixelsize(tpg, p));
			if (blend)
				blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top,
					   dev->loop_vid_overlay.left,
					   dev->blended_line + offset, osd,
					   dev->loop_vid_overlay.width, pixsize);
			else
				memcpy(dev->blended_line + offset,
				       osd, dev->loop_vid_overlay.width * pixsize);
			scale_line(dev->blended_line, dev->scaled_line,
					dev->loop_vid_copy.width, dev->loop_vid_cap.width,
					tpg_g_twopixelsize(tpg, p));
		}
		dev->cur_scaled_line = vid_out_y;
		memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
		       dev->scaled_line,
		       dev->loop_vid_cap.width * pixsize);

update_vid_out_y:
		if (osdline) {
			vid_overlay_y += vid_overlay_int_part;
			vid_overlay_error += vid_overlay_fract_part;
			if (vid_overlay_error >= dev->loop_vid_overlay_cap.height) {
				vid_overlay_error -= dev->loop_vid_overlay_cap.height;
				vid_overlay_y++;
			}
		}
		vid_out_y += vid_out_int_part;
		vid_out_error += vid_out_fract_part;
		if (vid_out_error >= dev->loop_vid_cap.height) {
			vid_out_error -= dev->loop_vid_cap.height;
			vid_out_y++;
		}
	}

	if (!blank)
		return 0;
	for (; y < img_height; y++, vcapbuf += stride_cap)
		memcpy(vcapbuf, tpg->contrast_line[p], img_width * pixsize);
	return 0;
}
Example #25
0
/*
 * v4l2_m2m_streamoff() holds dev_mutex and waits mtk_venc_worker()
 * to call v4l2_m2m_job_finish().
 * If mtk_venc_worker() tries to acquire dev_mutex, it will deadlock.
 * So this function must not try to acquire dev->dev_mutex.
 * This means v4l2 ioctls and mtk_venc_worker() can run at the same time.
 * mtk_venc_worker() should be carefully implemented to avoid bugs.
 */
static void mtk_venc_worker(struct work_struct *work)
{
	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
				    encode_work);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	struct venc_frm_buf frm_buf;
	struct mtk_vcodec_mem bs_buf;
	struct venc_done_result enc_result;
	int ret, i;

	/* check dst_buf, dst_buf may be removed in device_run
	 * to stored encdoe header so we need check dst_buf and
	 * call job_finish here to prevent recursion
	 */
	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	if (!dst_buf) {
		v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
		return;
	}

	src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
	memset(&frm_buf, 0, sizeof(frm_buf));
	for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) {
		frm_buf.fb_addr[i].dma_addr =
				vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, i);
		frm_buf.fb_addr[i].size =
				(size_t)src_buf->vb2_buf.planes[i].length;
	}
	bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
	bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
	bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;

	mtk_v4l2_debug(2,
			"Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu",
			(u64)frm_buf.fb_addr[0].dma_addr,
			frm_buf.fb_addr[0].size,
			(u64)frm_buf.fb_addr[1].dma_addr,
			frm_buf.fb_addr[1].size,
			(u64)frm_buf.fb_addr[2].dma_addr,
			frm_buf.fb_addr[2].size);

	ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
			     &frm_buf, &bs_buf, &enc_result);

	dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
	dst_buf->timecode = src_buf->timecode;

	if (enc_result.is_key_frm)
		dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;

	if (ret) {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
		dst_buf->vb2_buf.planes[0].bytesused = 0;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		mtk_v4l2_err("venc_if_encode failed=%d", ret);
	} else {
		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
		dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
		mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
				 enc_result.bs_size);
	}

	v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);

	mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
			src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret,
			enc_result.bs_size);
}
/**
 * __vb2_init_fileio() - initialize file io emulator
 * @q:		videobuf2 queue
 * @read:	mode selector (1 means read, 0 means write)
 */
static int __vb2_init_fileio(struct vb2_queue *q, int read)
{
	struct vb2_fileio_data *fileio;
	int i, ret;
	unsigned int count = 0;

	/*
	 * Sanity check
	 */
	if ((read && !(q->io_modes & VB2_READ)) ||
	   (!read && !(q->io_modes & VB2_WRITE)))
		BUG();

	/*
	 * Check if device supports mapping buffers to kernel virtual space.
	 */
	if (!q->mem_ops->vaddr)
		return -EBUSY;

	/*
	 * Check if streaming api has not been already activated.
	 */
	if (q->streaming || q->num_buffers > 0)
		return -EBUSY;

	/*
	 * Start with count 1, driver can increase it in queue_setup()
	 */
	count = 1;

	dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
		(read) ? "read" : "write", count, q->io_flags);

	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
	if (fileio == NULL)
		return -ENOMEM;

	fileio->flags = q->io_flags;

	/*
	 * Request buffers and use MMAP type to force driver
	 * to allocate buffers by itself.
	 */
	fileio->req.count = count;
	fileio->req.memory = V4L2_MEMORY_MMAP;
	fileio->req.type = q->type;
	ret = vb2_reqbufs(q, &fileio->req);
	if (ret)
		goto err_kfree;

	/*
	 * Check if plane_count is correct
	 * (multiplane buffers are not supported).
	 */
	if (q->bufs[0]->num_planes != 1) {
		fileio->req.count = 0;
		ret = -EBUSY;
		goto err_reqbufs;
	}

	/*
	 * Get kernel address of each buffer.
	 */
	for (i = 0; i < q->num_buffers; i++) {
		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
		if (fileio->bufs[i].vaddr == NULL)
			goto err_reqbufs;
		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
	}

	/*
	 * Read mode requires pre queuing of all buffers.
	 */
	if (read) {
		/*
		 * Queue all buffers.
		 */
		for (i = 0; i < q->num_buffers; i++) {
			struct v4l2_buffer *b = &fileio->b;
			memset(b, 0, sizeof(*b));
			b->type = q->type;
			b->memory = q->memory;
			b->index = i;
			ret = vb2_qbuf(q, b);
			if (ret)
				goto err_reqbufs;
			fileio->bufs[i].queued = 1;
		}

		/*
		 * Start streaming.
		 */
		ret = vb2_streamon(q, q->type);
		if (ret)
			goto err_reqbufs;
	}

	q->fileio = fileio;

	return ret;

err_reqbufs:
	vb2_reqbufs(q, &fileio->req);

err_kfree:
	kfree(fileio);
	return ret;
}
Example #27
0
static int __vb2_init_fileio(struct vb2_queue *q, int read)
{
	struct vb2_fileio_data *fileio;
	int i, ret;
	unsigned int count = 0;

	if ((read && !(q->io_modes & VB2_READ)) ||
	   (!read && !(q->io_modes & VB2_WRITE)))
		BUG();

	if (!q->mem_ops->vaddr)
		return -EBUSY;

	if (q->streaming || q->num_buffers > 0)
		return -EBUSY;

	count = 1;

	dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
		(read) ? "read" : "write", count, q->io_flags);

	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
	if (fileio == NULL)
		return -ENOMEM;

	fileio->flags = q->io_flags;

	fileio->req.count = count;
	fileio->req.memory = V4L2_MEMORY_MMAP;
	fileio->req.type = q->type;
	ret = vb2_reqbufs(q, &fileio->req);
	if (ret)
		goto err_kfree;

	if (q->bufs[0]->num_planes != 1) {
		fileio->req.count = 0;
		ret = -EBUSY;
		goto err_reqbufs;
	}

	for (i = 0; i < q->num_buffers; i++) {
		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
		if (fileio->bufs[i].vaddr == NULL)
			goto err_reqbufs;
		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
	}

	if (read) {
		for (i = 0; i < q->num_buffers; i++) {
			struct v4l2_buffer *b = &fileio->b;
			memset(b, 0, sizeof(*b));
			b->type = q->type;
			b->memory = q->memory;
			b->index = i;
			ret = vb2_qbuf(q, b);
			if (ret)
				goto err_reqbufs;
			fileio->bufs[i].queued = 1;
		}

		ret = vb2_streamon(q, q->type);
		if (ret)
			goto err_reqbufs;
	}

	q->fileio = fileio;

	return ret;

err_reqbufs:
	vb2_reqbufs(q, &fileio->req);

err_kfree:
	kfree(fileio);
	return ret;
}
Example #28
0
int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
			const unsigned char *src, int len,
			enum dmx_buffer_flags *buffer_flags)
{
	unsigned long flags = 0;
	void *vbuf = NULL;
	int todo = len;
	unsigned char *psrc = (unsigned char *)src;
	int ll = 0;

	/*
	 * normal case: This func is called twice from demux driver
	 * one with valid src pointer, second time with NULL pointer
	 */
	if (!src || !len)
		return 0;
	spin_lock_irqsave(&ctx->slock, flags);
	if (buffer_flags && *buffer_flags) {
		ctx->flags |= *buffer_flags;
		*buffer_flags = 0;
	}
	while (todo) {
		if (!ctx->buf) {
			if (list_empty(&ctx->dvb_q)) {
				dprintk(3, "[%s] Buffer overflow!!!\n",
					ctx->name);
				break;
			}

			ctx->buf = list_entry(ctx->dvb_q.next,
					      struct dvb_buffer, list);
			ctx->remain = vb2_plane_size(&ctx->buf->vb, 0);
			ctx->offset = 0;
		}

		if (!dvb_vb2_is_streaming(ctx)) {
			vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_ERROR);
			list_del(&ctx->buf->list);
			ctx->buf = NULL;
			break;
		}

		/* Fill buffer */
		ll = min(todo, ctx->remain);
		vbuf = vb2_plane_vaddr(&ctx->buf->vb, 0);
		memcpy(vbuf + ctx->offset, psrc, ll);
		todo -= ll;
		psrc += ll;

		ctx->remain -= ll;
		ctx->offset += ll;

		if (ctx->remain == 0) {
			vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_DONE);
			list_del(&ctx->buf->list);
			ctx->buf = NULL;
		}
	}

	if (ctx->nonblocking && ctx->buf) {
		vb2_set_plane_payload(&ctx->buf->vb, 0, ll);
		vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_DONE);
		list_del(&ctx->buf->list);
		ctx->buf = NULL;
	}
	spin_unlock_irqrestore(&ctx->slock, flags);

	if (todo)
		dprintk(1, "[%s] %d bytes are dropped.\n", ctx->name, todo);
	else
		dprintk(3, "[%s]\n", ctx->name);

	dprintk(3, "[%s] %d bytes are copied\n", ctx->name, len - todo);
	return (len - todo);
}
Example #29
0
static void mtk_vdec_worker(struct work_struct *work)
{
	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
				decode_work);
	struct mtk_vcodec_dev *dev = ctx->dev;
	struct vb2_buffer *src_buf, *dst_buf;
	struct mtk_vcodec_mem buf;
	struct vdec_fb *pfb;
	bool res_chg = false;
	int ret;
	struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
	struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;

	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
	if (src_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
		return;
	}

	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
	if (dst_buf == NULL) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
		return;
	}

	src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
	src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);

	dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
	dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);

	pfb = &dst_buf_info->frame_buffer;
	pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
	pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
	pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;

	pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
	pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
	pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
	pfb->status = 0;
	mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);

	mtk_v4l2_debug(3,
			"id=%d Framebuf  pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
			dst_buf->index, pfb,
			pfb->base_y.va, &pfb->base_y.dma_addr,
			&pfb->base_c.dma_addr, pfb->base_y.size);

	if (src_buf_info->lastframe) {
		mtk_v4l2_debug(1, "Got empty flush input buffer.");
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);

		/* update dst buf status */
		dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
		mutex_lock(&ctx->lock);
		dst_buf_info->used = false;
		mutex_unlock(&ctx->lock);

		vdec_if_decode(ctx, NULL, NULL, &res_chg);
		clean_display_buffer(ctx);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
		dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST;
		v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
		clean_free_buffer(ctx);
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		return;
	}
	buf.va = vb2_plane_vaddr(src_buf, 0);
	buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
	buf.size = (size_t)src_buf->planes[0].bytesused;
	if (!buf.va) {
		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
		mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
				ctx->id, src_buf->index);
		return;
	}
	mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
			ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
	dst_buf_info->vb.vb2_buf.timestamp
			= src_buf_info->vb.vb2_buf.timestamp;
	dst_buf_info->vb.timecode
			= src_buf_info->vb.timecode;
	mutex_lock(&ctx->lock);
	dst_buf_info->used = true;
	mutex_unlock(&ctx->lock);
	src_buf_info->used = true;

	ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);

	if (ret) {
		mtk_v4l2_err(
			" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
			ctx->id,
			src_buf->index,
			buf.size,
			src_buf_info->vb.vb2_buf.timestamp,
			dst_buf->index,
			ret, res_chg);
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
	} else if (res_chg == false) {
		/*
		 * we only return src buffer with VB2_BUF_STATE_DONE
		 * when decode success without resolution change
		 */
		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
	}

	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
	clean_display_buffer(ctx);
	clean_free_buffer(ctx);

	if (!ret && res_chg) {
		mtk_vdec_pic_info_update(ctx);
		/*
		 * On encountering a resolution change in the stream.
		 * The driver must first process and decode all
		 * remaining buffers from before the resolution change
		 * point, so call flush decode here
		 */
		mtk_vdec_flush_decoder(ctx);
		/*
		 * After all buffers containing decoded frames from
		 * before the resolution change point ready to be
		 * dequeued on the CAPTURE queue, the driver sends a
		 * V4L2_EVENT_SOURCE_CHANGE event for source change
		 * type V4L2_EVENT_SRC_CH_RESOLUTION
		 */
		mtk_vdec_queue_res_chg_event(ctx);
	}
	v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
}
/* Set decoding frame buffer */
int hevc_set_dec_frame_buffer(struct hevc_ctx *ctx)
{
	struct hevc_dev *dev;
	struct hevc_dec *dec;
	unsigned int i, frame_size_mv;
	size_t buf_addr1;
	int buf_size1;
	int align_gap;
	struct hevc_buf *buf;
	struct hevc_raw_info *raw;
	struct list_head *buf_queue;
	unsigned char *dpb_vir;
	int j;

	if (!ctx) {
		hevc_err("no hevc context to run\n");
		return -EINVAL;
	}
	dev = ctx->dev;
	if (!dev) {
		hevc_err("no hevc device to run\n");
		return -EINVAL;
	}
	dec = ctx->dec_priv;
	if (!dec) {
		hevc_err("no hevc decoder to run\n");
		return -EINVAL;
	}

	raw = &ctx->raw_buf;
	buf_addr1 = ctx->port_a_phys;
	buf_size1 = ctx->port_a_size;

	hevc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
	hevc_info("Total DPB COUNT: %d\n", dec->total_dpb_count);
	hevc_debug(2, "Setting display delay to %d\n", dec->display_delay);
	hevc_debug(2, "ctx->scratch_buf_size %d\n", ctx->scratch_buf_size);

	WRITEL(dec->total_dpb_count, HEVC_D_NUM_DPB);

	hevc_debug(2, "raw->num_planes %d\n", raw->num_planes);
	for (i = 0; i < raw->num_planes; i++) {
		hevc_debug(2, "raw->plane_size[%d]= %d\n", i, raw->plane_size[i]);
		WRITEL(raw->plane_size[i], HEVC_D_FIRST_PLANE_DPB_SIZE + i*4);
	}

	if (dec->is_dynamic_dpb)
		WRITEL((0x1 << HEVC_D_OPT_DYNAMIC_DPB_SET_SHIFT), HEVC_D_INIT_BUFFER_OPTIONS);

	WRITEL(buf_addr1, HEVC_D_SCRATCH_BUFFER_ADDR);
	WRITEL(ctx->scratch_buf_size, HEVC_D_SCRATCH_BUFFER_SIZE);
	buf_addr1 += ctx->scratch_buf_size;
	buf_size1 -= ctx->scratch_buf_size;

	WRITEL(ctx->mv_size, HEVC_D_MV_BUFFER_SIZE);

	frame_size_mv = ctx->mv_size;
	hevc_debug(2, "Frame size: %d, %d, %d, mv: %d\n",
			raw->plane_size[0], raw->plane_size[1],
			raw->plane_size[2], frame_size_mv);

	if (dec->dst_memtype == V4L2_MEMORY_USERPTR || dec->dst_memtype == V4L2_MEMORY_DMABUF)
		buf_queue = &ctx->dst_queue;
	else
		buf_queue = &dec->dpb_queue;

	i = 0;
	list_for_each_entry(buf, buf_queue, list) {
		/* Do not setting DPB */
		if (dec->is_dynamic_dpb)
			break;
		for (j = 0; j < raw->num_planes; j++) {
			hevc_debug(2, "buf->planes.raw[%d] = 0x%x\n", j, buf->planes.raw[j]);
			WRITEL(buf->planes.raw[j], HEVC_D_FIRST_PLANE_DPB0 + (j*0x100 + i*4));
		}

		if ((i == 0) && (!ctx->is_drm)) {
			int j, color[3] = { 0x0, 0x80, 0x80 };
			for (j = 0; j < raw->num_planes; j++) {
				dpb_vir = vb2_plane_vaddr(&buf->vb, j);
				if (dpb_vir)
					memset(dpb_vir, color[j],
							raw->plane_size[j]);
			}
			hevc_mem_clean_vb(&buf->vb, j);
		}
		i++;
	}

	hevc_set_dec_stride_buffer(ctx, buf_queue);

	WRITEL(dec->mv_count, HEVC_D_NUM_MV);

	for (i = 0; i < dec->mv_count; i++) {
		/* To test alignment */
		align_gap = buf_addr1;
		buf_addr1 = ALIGN(buf_addr1, 16);
		align_gap = buf_addr1 - align_gap;
		buf_size1 -= align_gap;

		hevc_debug(2, "\tBuf1: %x, size: %d\n", buf_addr1, buf_size1);
		WRITEL(buf_addr1, HEVC_D_MV_BUFFER0 + i * 4);
		buf_addr1 += frame_size_mv;
		buf_size1 -= frame_size_mv;
	}

	hevc_debug(2, "Buf1: %u, buf_size1: %d (frames %d)\n",
			buf_addr1, buf_size1, dec->total_dpb_count);
	if (buf_size1 < 0) {
		hevc_debug(2, "Not enough memory has been allocated.\n");
		return -ENOMEM;
	}

	WRITEL(ctx->inst_no, HEVC_INSTANCE_ID);
	hevc_cmd_host2risc(HEVC_CH_INIT_BUFS, NULL);

	hevc_debug(2, "After setting buffers.\n");

	return 0;
}