static int uvc_buffer_prepare(struct vb2_buffer *vb)
{
	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
	struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);

	if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
	    vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
		uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
		return -EINVAL;
	}

	if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
		return -ENODEV;

	buf->state = UVC_BUF_STATE_QUEUED;
	buf->mem = vb2_plane_vaddr(vb, 0);
	buf->length = vb2_plane_size(vb, 0);
	if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
		buf->bytesused = 0;
	else
		buf->bytesused = vb2_get_plane_payload(vb, 0);

	return 0;
}
Esempio n. 2
0
static int isp_video_capture_queue_setup(struct vb2_queue *vq,
			unsigned int *num_buffers, unsigned int *num_planes,
			unsigned int sizes[], void *allocators[])
{
	struct fimc_isp *isp = vb2_get_drv_priv(vq);
	struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
	const struct fimc_fmt *fmt = isp->video_capture.format;
	unsigned int wh, i;

	wh = vid_fmt->width * vid_fmt->height;

	if (fmt == NULL)
		return -EINVAL;

	*num_buffers = clamp_t(u32, *num_buffers, FIMC_ISP_REQ_BUFS_MIN,
						FIMC_ISP_REQ_BUFS_MAX);
	if (*num_planes) {
		if (*num_planes != fmt->memplanes)
			return -EINVAL;
		for (i = 0; i < *num_planes; i++) {
			if (sizes[i] < (wh * fmt->depth[i]) / 8)
				return -EINVAL;
			allocators[i] = isp->alloc_ctx;
		}
		return 0;
	}

	*num_planes = fmt->memplanes;

	for (i = 0; i < fmt->memplanes; i++) {
		sizes[i] = (wh * fmt->depth[i]) / 8;
		allocators[i] = isp->alloc_ctx;
	}

	return 0;
}
Esempio n. 3
0
static int jpeg_dec_queue_setup(struct vb2_queue *vq,
					const struct v4l2_format *fmt, unsigned int *num_buffers,
					unsigned int *num_planes, unsigned int sizes[],
					void *allocators[])
{
	struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);

	int i;

	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
		*num_planes = ctx->param.dec_param.in_plane;
		for (i = 0; i < ctx->param.dec_param.in_plane; i++) {
			sizes[i] = ctx->param.dec_param.mem_size;
			allocators[i] = ctx->dev->alloc_ctx;
		}
	} else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
		*num_planes = ctx->param.dec_param.out_plane;
		for (i = 0; i < ctx->param.dec_param.out_plane; i++) {
			sizes[i] = (ctx->param.dec_param.out_width *
				ctx->param.dec_param.out_height *
				ctx->param.dec_param.out_depth[i]) >> 3;
			allocators[i] = ctx->dev->alloc_ctx;
		}
	}
Esempio n. 4
0
static void buffer_queue(struct vb2_buffer *vb)
{
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	struct flite_buffer *buf
		= container_of(vbuf, struct flite_buffer, vb);
	struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
	unsigned long flags;

	spin_lock_irqsave(&fimc->slock, flags);
	buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0);

	buf->index = fimc->buf_index++;
	if (fimc->buf_index >= fimc->reqbufs_count)
		fimc->buf_index = 0;

	if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) &&
	    !test_bit(ST_FLITE_STREAM, &fimc->state) &&
	    list_empty(&fimc->active_buf_q)) {
		flite_hw_set_dma_buffer(fimc, buf);
		fimc_lite_active_queue_add(fimc, buf);
	} else {
		fimc_lite_pending_queue_add(fimc, buf);
	}

	if (vb2_is_streaming(&fimc->vb_queue) &&
	    !list_empty(&fimc->pending_buf_q) &&
	    !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
		flite_hw_capture_start(fimc);
		spin_unlock_irqrestore(&fimc->slock, flags);

		if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
			fimc_pipeline_call(&fimc->ve, set_stream, 1);
		return;
	}
	spin_unlock_irqrestore(&fimc->slock, flags);
}
Esempio n. 5
0
static void fimc_buf_queue(struct vb2_buffer *vb)
{
	struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
Esempio n. 6
0
/* abort streaming and wait for last buffer */
static int stop_streaming(struct vb2_queue *vq)
{
	struct stk1160 *dev = vb2_get_drv_priv(vq);
	return stk1160_stop_streaming(dev);
}
Esempio n. 7
0
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
	struct stk1160 *dev = vb2_get_drv_priv(vq);
	return stk1160_start_streaming(dev);
}
Esempio n. 8
0
static void myvivi_buffer_cleanup(struct vb2_buffer *vb)
{
	vb2_get_drv_priv(vb->vb2_queue);
}
Esempio n. 9
0
static void g2d_buf_queue(struct vb2_buffer *vb)
{
    struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
    v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
}
Esempio n. 10
0
static void xvip_dma_wait_finish(struct vb2_queue *vq)
{
	struct xvip_dma *dma = vb2_get_drv_priv(vq);

	mutex_lock(&dma->lock);
}
Esempio n. 11
0
static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
	struct venc_enc_param param;
	int ret;
	int i;

	/* Once state turn into MTK_STATE_ABORT, we need stop_streaming
	  * to clear it
	  */
	if ((ctx->state == MTK_STATE_ABORT) || (ctx->state == MTK_STATE_FREE)) {
		ret = -EIO;
		goto err_set_param;
	}

	/* Do the initialization when both start_streaming have been called */
	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
		if (!vb2_start_streaming_called(&ctx->m2m_ctx->cap_q_ctx.q))
			return 0;
	} else {
		if (!vb2_start_streaming_called(&ctx->m2m_ctx->out_q_ctx.q))
			return 0;
	}

	mtk_venc_set_param(ctx, &param);
	ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
	if (ret) {
		mtk_v4l2_err("venc_if_set_param failed=%d", ret);
		ctx->state = MTK_STATE_ABORT;
		goto err_set_param;
	}
	ctx->param_change = MTK_ENCODE_PARAM_NONE;

	if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) &&
	    (ctx->enc_params.seq_hdr_mode !=
				V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)) {
		ret = venc_if_set_param(ctx,
					VENC_SET_PARAM_PREPEND_HEADER,
					NULL);
		if (ret) {
			mtk_v4l2_err("venc_if_set_param failed=%d", ret);
			ctx->state = MTK_STATE_ABORT;
			goto err_set_param;
		}
		ctx->state = MTK_STATE_HEADER;
	}

	return 0;

err_set_param:
	for (i = 0; i < q->num_buffers; ++i) {
		if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
			mtk_v4l2_debug(0, "[%d] id=%d, type=%d, %d -> VB2_BUF_STATE_QUEUED",
					ctx->id, i, q->type,
					(int)q->bufs[i]->state);
			v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
					VB2_BUF_STATE_QUEUED);
		}
	}

	return ret;
}
Esempio n. 12
0
static void jpeg_enc_unlock(struct vb2_queue *vq)
{
	struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);
	mutex_unlock(&ctx->dev->lock);
}
Esempio n. 13
0
static void fimc_is_isp_unlock(struct vb2_queue *q)
{
	struct fimc_is_video_dev *video = vb2_get_drv_priv(q);
	struct fimc_is_dev	*is_dev = video->dev;
	mutex_unlock(&is_dev->lock);
}
Esempio n. 14
0
static int hva_start_streaming(struct vb2_queue *vq, unsigned int count)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	struct vb2_v4l2_buffer *vbuf;
	int ret;
	unsigned int i;
	bool found = false;

	dev_dbg(dev, "%s %s start streaming\n", ctx->name,
		to_type_str(vq->type));

	/* open encoder when both start_streaming have been called */
	if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
			return 0;
	} else {
		if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
			return 0;
	}

	/* store the instance context in the instances array */
	for (i = 0; i < HVA_MAX_INSTANCES; i++) {
		if (!hva->instances[i]) {
			hva->instances[i] = ctx;
			/* save the context identifier in the context */
			ctx->id = i;
			found = true;
			break;
		}
	}

	if (!found) {
		dev_err(dev, "%s maximum instances reached\n", ctx->name);
		ret = -ENOMEM;
		goto err;
	}

	hva->nb_of_instances++;

	if (!ctx->enc) {
		ret = hva_open_encoder(ctx,
				       ctx->streaminfo.streamformat,
				       ctx->frameinfo.pixelformat,
				       &ctx->enc);
		if (ret < 0)
			goto err_ctx;
	}

	return 0;

err_ctx:
	hva->instances[ctx->id] = NULL;
	hva->nb_of_instances--;
err:
	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		/* return of all pending buffers to vb2 (in queued state) */
		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
	} else {
		/* return of all pending buffers to vb2 (in queued state) */
		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
	}

	return ret;
}
Esempio n. 15
0
/* abort streaming and wait for last buffer */
static void stop_streaming(struct vb2_queue *vq)
{
	struct stk1160 *dev = vb2_get_drv_priv(vq);
	stk1160_stop_streaming(dev);
}
static void gsc_capture_buf_queue(struct vb2_buffer *vb)
{
	struct gsc_input_buf *buf
		= container_of(vb, struct gsc_input_buf, vb);
	struct vb2_queue *q = vb->vb2_queue;
	struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct gsc_dev *gsc = ctx->gsc_dev;
	int ret;

	if (vb->acquire_fence) {
		gsc_dbg("acquire fence has..");
		ret = sync_fence_wait(vb->acquire_fence, 100);
		sync_fence_put(vb->acquire_fence);
		vb->acquire_fence = NULL;
		if (ret < 0) {
			gsc_err("synce_fence_wait() timeout");
			return;
		}
	}

	if (!q->streaming) {
		gsc_info("gsc-wb initialize");
		INIT_LIST_HEAD(&gsc->cap.active_buf_q);
		ret = v4l2_subdev_call(gsc->cap.sd, video,
						s_stream, 1);
		if (ret) {
			gsc_err("gsc s_stream failed");
			return;
		}
	}

	ret = gsc_cap_set_addr(gsc, ctx, buf, vb->v4l2_buf.index);
	if (ret) {
		gsc_err("Failed to prepare output addr");
		return;
	}

	if (!test_and_set_bit(ST_CAPT_RUN, &gsc->state)) {
		ret = gsc_set_scaler_info(ctx);
		if (ret) {
			gsc_err("Scaler setup error");
			return;
		}
		gsc_hw_set_in_size(ctx);
		gsc_hw_set_out_size(ctx);
		gsc_hw_set_prescaler(ctx);
		gsc_hw_set_mainscaler(ctx);
		gsc_hw_set_h_coef(ctx);
		gsc_hw_set_v_coef(ctx);

		gsc_hw_set_output_rotation(ctx);

		gsc_hw_set_global_alpha(ctx);
		if (is_rotation) {
			ret = gsc_check_rotation_size(ctx);
			if (ret < 0) {
				gsc_err("Scaler setup error");
				return;
			}
		}

		gsc_hw_set_sfr_update(ctx);
		gsc_hw_enable_control(gsc, true);
		ret = gsc_wait_operating(gsc);
		if (ret < 0) {
			gsc_err("gscaler wait operating timeout");
			return;
		}
		gsc_dbg("gsc-wb start");
	} else {
		gsc_err();
	}
}
Esempio n. 17
0
static void fimc_unlock(struct vb2_queue *vq)
{
    struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
    mutex_unlock(&ctx->fimc_dev->lock);
}
Esempio n. 18
0
static void uvc_wait_prepare(struct vb2_queue *vq)
{
	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);

	mutex_unlock(&queue->mutex);
}
Esempio n. 19
0
static void fimc_unlock(struct vb2_queue *vq)
{
	struct fimc_lite *fimc = vb2_get_drv_priv(vq);
	mutex_unlock(&fimc->lock);
}
Esempio n. 20
0
void vc_outbuf_unlock( struct vb2_queue * vq )
{
    struct vc_device * dev = vb2_get_drv_priv(vq);
    mutex_unlock( &dev->vc_mutex );
}
static int capture_start_streaming(struct vb2_queue *vq)
{
	struct vcap_client_data *c_data = vb2_get_drv_priv(vq);
	dprintk(2, "VC start streaming\n");
	return vc_start_capture(c_data);
}
Esempio n. 22
0
static void rot_vb2_unlock(struct vb2_queue *vq)
{
	struct rot_ctx *ctx = vb2_get_drv_priv(vq);
	mutex_unlock(&ctx->rot_dev->lock);
}
Esempio n. 23
0
static void uvc_wait_finish(struct vb2_queue *vq)
{
	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);

	mutex_lock(&queue->mutex);
}
Esempio n. 24
0
static void xvip_dma_wait_prepare(struct vb2_queue *vq)
{
	struct xvip_dma *dma = vb2_get_drv_priv(vq);

	mutex_unlock(&dma->lock);
}