Ejemplo n.º 1
0
/* Set format */
static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
	struct s5p_mfc_dev *dev = video_drvdata(file);
	struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
	int ret = 0;
	struct v4l2_pix_format_mplane *pix_mp;
	struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;

	mfc_debug_enter();
	ret = vidioc_try_fmt(file, priv, f);
	pix_mp = &f->fmt.pix_mp;
	if (ret)
		return ret;
	if (vb2_is_streaming(&ctx->vq_src) || vb2_is_streaming(&ctx->vq_dst)) {
		v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
		ret = -EBUSY;
		goto out;
	}
	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
		/* dst_fmt is validated by call to vidioc_try_fmt */
		ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
		ret = 0;
		goto out;
	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
		/* src_fmt is validated by call to vidioc_try_fmt */
		ctx->src_fmt = find_format(f, MFC_FMT_DEC);
		ctx->codec_mode = ctx->src_fmt->codec_mode;
		mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
		pix_mp->height = 0;
		pix_mp->width = 0;
		if (pix_mp->plane_fmt[0].sizeimage == 0)
			pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
								DEF_CPB_SIZE;
		else if (pix_mp->plane_fmt[0].sizeimage > buf_size->cpb)
			ctx->dec_src_buf_size = buf_size->cpb;
		else
			ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
		pix_mp->plane_fmt[0].bytesperline = 0;
		ctx->state = MFCINST_INIT;
		ret = 0;
		goto out;
	} else {
		mfc_err("Wrong type error for S_FMT : %d", f->type);
		ret = -EINVAL;
		goto out;
	}

out:
	mfc_debug_leave();
	return ret;
}
Ejemplo n.º 2
0
static int gsc_capture_s_fmt_mplane(struct file *file, void *fh,
				 struct v4l2_format *f)
{
	struct gsc_dev *gsc = video_drvdata(file);
	struct gsc_ctx *ctx = gsc->cap.ctx;
	struct gsc_frame *frame;
	struct v4l2_pix_format_mplane *pix;
	int i, ret = 0;

	ret = gsc_capture_try_fmt_mplane(file, fh, f);
	if (ret)
		return ret;

	if (vb2_is_streaming(&gsc->cap.vbq)) {
		gsc_err("queue (%d) busy", f->type);
		return -EBUSY;
	}

	frame = &ctx->d_frame;

	pix = &f->fmt.pix_mp;
	frame->fmt = find_format(&pix->pixelformat, NULL, 0);
	if (!frame->fmt)
		return -EINVAL;

	for (i = 0; i < frame->fmt->nr_comp; i++)
		frame->payload[i] =
			pix->plane_fmt[i].bytesperline * pix->height;

	gsc_set_frame_size(frame, pix->width, pix->height);

	gsc_dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);

	return 0;
}
Ejemplo n.º 3
0
static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
{
	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
	struct dma_async_tx_descriptor *desc;
	enum dma_transfer_direction dir;
	u32 flags;

	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
		dir = DMA_DEV_TO_MEM;
	} else {
		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
		dir = DMA_MEM_TO_DEV;
	}

	desc = dmaengine_prep_slave_single(dma->dma, buf->addr, buf->length,
					   dir, flags);
	if (!desc) {
		dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
		vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
		return;
	}
	desc->callback = xvip_dma_complete;
	desc->callback_param = buf;

	dmaengine_submit(desc);

	if (vb2_is_streaming(&dma->queue))
		dma_async_issue_pending(dma->dma);
}
Ejemplo n.º 4
0
static void buffer_queue(struct vb2_buffer *vb)
{
	struct flite_buffer *buf
		= container_of(vb, struct flite_buffer, vb);
	struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
	unsigned long flags;

	spin_lock_irqsave(&fimc->slock, flags);
	buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0);

	if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) &&
	    !test_bit(ST_FLITE_STREAM, &fimc->state) &&
	    list_empty(&fimc->active_buf_q)) {
		flite_hw_set_output_addr(fimc, buf->paddr);
		fimc_lite_active_queue_add(fimc, buf);
	} else {
		fimc_lite_pending_queue_add(fimc, buf);
	}

	if (vb2_is_streaming(&fimc->vb_queue) &&
	    !list_empty(&fimc->pending_buf_q) &&
	    !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
		flite_hw_capture_start(fimc);
		spin_unlock_irqrestore(&fimc->slock, flags);

		if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
			fimc_pipeline_call(fimc, set_stream,
					   &fimc->pipeline, 1);
		return;
	}
	spin_unlock_irqrestore(&fimc->slock, flags);
}
Ejemplo n.º 5
0
static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct device *dev = ctx_to_dev(ctx);
	struct vb2_queue *vq;
	int ret;

	ret = hva_try_fmt_stream(file, fh, f);
	if (ret) {
		dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): unsupported format %.4s\n",
			ctx->name, (char *)&f->fmt.pix.pixelformat);
		return ret;
	}

	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
	if (vb2_is_streaming(vq)) {
		dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
			ctx->name);
		return -EBUSY;
	}

	ctx->max_stream_size = f->fmt.pix.sizeimage;
	ctx->streaminfo.width = f->fmt.pix.width;
	ctx->streaminfo.height = f->fmt.pix.height;
	ctx->streaminfo.streamformat = f->fmt.pix.pixelformat;
	ctx->flags |= HVA_FLAG_STREAMINFO;

	return 0;
}
Ejemplo n.º 6
0
static void hva_stop_streaming(struct vb2_queue *vq)
{
	struct hva_ctx *ctx = vb2_get_drv_priv(vq);
	struct hva_dev *hva = ctx_to_hdev(ctx);
	struct device *dev = ctx_to_dev(ctx);
	const struct hva_enc *enc = ctx->enc;
	struct vb2_v4l2_buffer *vbuf;

	dev_dbg(dev, "%s %s stop streaming\n", ctx->name,
		to_type_str(vq->type));

	if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
		/* return of all pending buffers to vb2 (in error state) */
		ctx->frame_num = 0;
		while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
	} else {
		/* return of all pending buffers to vb2 (in error state) */
		ctx->stream_num = 0;
		while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
			v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
	}

	if ((V4L2_TYPE_IS_OUTPUT(vq->type) &&
	     vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
	    (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
	     vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
		dev_dbg(dev, "%s %s out=%d cap=%d\n",
			ctx->name, to_type_str(vq->type),
			vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
			vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
		return;
	}

	/* close encoder when both stop_streaming have been called */
	if (enc) {
		dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
		enc->close(ctx);
		ctx->enc = NULL;

		/* clear instance context in instances array */
		hva->instances[ctx->id] = NULL;
		hva->nb_of_instances--;
	}

	ctx->aborting = false;
}
Ejemplo n.º 7
0
static int vidioc_decoder_cmd(struct file *file, void *priv,
				struct v4l2_decoder_cmd *cmd)
{
	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
	struct vb2_queue *src_vq, *dst_vq;
	int ret;

	ret = vidioc_try_decoder_cmd(file, priv, cmd);
	if (ret)
		return ret;

	mtk_v4l2_debug(1, "decoder cmd=%u", cmd->cmd);
	dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
				V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
	switch (cmd->cmd) {
	case V4L2_DEC_CMD_STOP:
		src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
		if (!vb2_is_streaming(src_vq)) {
			mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
			return 0;
		}
		if (!vb2_is_streaming(dst_vq)) {
			mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
			return 0;
		}
		v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb);
		v4l2_m2m_try_schedule(ctx->m2m_ctx);
		break;

	case V4L2_DEC_CMD_START:
		vb2_clear_last_buffer_dequeued(dst_vq);
		break;

	default:
		return -EINVAL;
	}

	return 0;
}
Ejemplo n.º 8
0
static int bdisp_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
	struct bdisp_ctx *ctx = fh_to_ctx(fh);
	struct vb2_queue *vq;
	struct bdisp_frame *frame;
	struct v4l2_pix_format *pix;
	int ret;
	u32 state;

	ret = bdisp_try_fmt(file, fh, f);
	if (ret) {
		dev_err(ctx->bdisp_dev->dev, "Cannot set format\n");
		return ret;
	}

	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
	if (vb2_is_streaming(vq)) {
		dev_err(ctx->bdisp_dev->dev, "queue (%d) busy\n", f->type);
		return -EBUSY;
	}

	frame = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
			&ctx->src : &ctx->dst;
	pix = &f->fmt.pix;
	frame->fmt = bdisp_find_fmt(pix->pixelformat);
	if (!frame->fmt) {
		dev_err(ctx->bdisp_dev->dev, "Unknown format 0x%x\n",
			pix->pixelformat);
		return -EINVAL;
	}

	frame->width = pix->width;
	frame->height = pix->height;
	frame->bytesperline = pix->bytesperline;
	frame->sizeimage = pix->sizeimage;
	frame->field = pix->field;
	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
		frame->colorspace = pix->colorspace;

	frame->crop.width = frame->width;
	frame->crop.height = frame->height;
	frame->crop.left = 0;
	frame->crop.top = 0;

	state = BDISP_PARAMS;
	state |= (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
			BDISP_DST_FMT : BDISP_SRC_FMT;
	bdisp_ctx_state_lock_set(state, ctx);

	return 0;
}
Ejemplo n.º 9
0
static int rot_v4l2_s_fmt_mplane(struct file *file, void *priv,
				 struct v4l2_format *f)
{
	struct rot_ctx *ctx = priv;
	struct vb2_queue *vq;
	struct rot_frame *frame;
	struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
	int i, ret = 0;

	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);

	if (vb2_is_streaming(vq)) {
		rot_err("device is busy\n");
		return -EBUSY;
	}

	ret = rot_v4l2_try_fmt_mplane(file, priv, f);
	if (ret < 0)
		return ret;

	frame = ctx_get_frame(ctx, f->type);
	if (IS_ERR(frame))
		return PTR_ERR(frame);

	set_bit(CTX_PARAMS, &ctx->flags);

	frame->rot_fmt = rot_find_format(f);
	if (!frame->rot_fmt) {
		rot_err("not supported format values\n");
		return -EINVAL;
	}

	rot_adjust_pixminfo(ctx, frame, pixm);

	frame->pix_mp.pixelformat = pixm->pixelformat;
	frame->pix_mp.width	= pixm->width;
	frame->pix_mp.height	= pixm->height;

	/*
	 * Shouldn't call s_crop or g_crop before called g_fmt or s_fmt.
	 * Let's assume that we can keep the order.
	 */
	frame->crop.width	= pixm->width;
	frame->crop.height	= pixm->height;

	for (i = 0; i < frame->rot_fmt->num_planes; ++i)
		frame->bytesused[i] = (pixm->width * pixm->height *
				frame->rot_fmt->bitperpixel[i]) >> 3;

	return 0;
}
Ejemplo n.º 10
0
static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
{
	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	int ret;

	mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type);

	if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
		while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
			dst_buf->vb2_buf.planes[0].bytesused = 0;
			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
		}
	} else {
		while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
	}

	if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
	     vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) ||
	    (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
	     vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) {
		mtk_v4l2_debug(1, "[%d]-> q type %d out=%d cap=%d",
			       ctx->id, q->type,
			       vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q),
			       vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q));
		return;
	}

	/* Release the encoder if both streams are stopped. */
	ret = venc_if_deinit(ctx);
	if (ret)
		mtk_v4l2_err("venc_if_deinit failed=%d", ret);

	ctx->state = MTK_STATE_FREE;
}
Ejemplo n.º 11
0
static void buffer_queue(struct vb2_buffer *vb)
{
	struct fimc_vid_buffer *buf
		= container_of(vb, struct fimc_vid_buffer, vb);
	struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct fimc_dev *fimc = ctx->fimc_dev;
	struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
	unsigned long flags;
	int min_bufs;

	spin_lock_irqsave(&fimc->slock, flags);
	fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr);

	if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) &&
	    !test_bit(ST_CAPT_STREAM, &fimc->state) &&
	    vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) {
		/* Setup the buffer directly for processing. */
		int buf_id = (vid_cap->reqbufs_count == 1) ? -1 :
				vid_cap->buf_index;

		fimc_hw_set_output_addr(fimc, &buf->paddr, buf_id);
		buf->index = vid_cap->buf_index;
		fimc_active_queue_add(vid_cap, buf);

		if (++vid_cap->buf_index >= FIMC_MAX_OUT_BUFS)
			vid_cap->buf_index = 0;
	} else {
		fimc_pending_queue_add(vid_cap, buf);
	}

	min_bufs = vid_cap->reqbufs_count > 1 ? 2 : 1;


	if (vb2_is_streaming(&vid_cap->vbq) &&
	    vid_cap->active_buf_cnt >= min_bufs &&
	    !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) {
		fimc_activate_capture(ctx);
		spin_unlock_irqrestore(&fimc->slock, flags);

		if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
			fimc_pipeline_call(fimc, set_stream,
					   &fimc->pipeline, 1);
		return;
	}
	spin_unlock_irqrestore(&fimc->slock, flags);
}
Ejemplo n.º 12
0
static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
				 struct v4l2_format *f)
{
	struct gsc_ctx *ctx = fh_to_ctx(fh);
	struct vb2_queue *vq;
	struct gsc_frame *frame;
	struct v4l2_pix_format_mplane *pix;
	int i, ret = 0;

	ret = gsc_m2m_try_fmt_mplane(file, fh, f);
	if (ret)
		return ret;

	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);

	if (vb2_is_streaming(vq)) {
		pr_err("queue (%d) busy", f->type);
		return -EBUSY;
	}

	if (V4L2_TYPE_IS_OUTPUT(f->type))
		frame = &ctx->s_frame;
	else
		frame = &ctx->d_frame;

	pix = &f->fmt.pix_mp;
	frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
	frame->colorspace = pix->colorspace;
	if (!frame->fmt)
		return -EINVAL;

	for (i = 0; i < frame->fmt->num_planes; i++)
		frame->payload[i] = pix->plane_fmt[i].sizeimage;

	gsc_set_frame_size(frame, pix->width, pix->height);

	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
		gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
	else
		gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);

	pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);

	return 0;
}
Ejemplo n.º 13
0
static int
iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
	struct iss_video_fh *vfh = to_iss_video_fh(fh);
	struct iss_video *video = video_drvdata(file);
	struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
	enum iss_pipeline_state state;
	unsigned long flags;

	if (type != video->type)
		return -EINVAL;

	mutex_lock(&video->stream_lock);

	if (!vb2_is_streaming(&vfh->queue))
		goto done;

	/* Update the pipeline state. */
	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
		state = ISS_PIPELINE_STREAM_OUTPUT
		      | ISS_PIPELINE_QUEUE_OUTPUT;
	else
		state = ISS_PIPELINE_STREAM_INPUT
		      | ISS_PIPELINE_QUEUE_INPUT;

	spin_lock_irqsave(&pipe->lock, flags);
	pipe->state &= ~state;
	spin_unlock_irqrestore(&pipe->lock, flags);

	/* Stop the stream. */
	omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_STOPPED);
	vb2_streamoff(&vfh->queue, type);
	video->queue = NULL;

	if (video->iss->pdata->set_constraints)
		video->iss->pdata->set_constraints(video->iss, false);
	media_entity_pipeline_stop(&video->video.entity);

done:
	mutex_unlock(&video->stream_lock);
	return 0;
}
Ejemplo n.º 14
0
static void gsc_capture_buf_queue(struct vb2_buffer *vb)
{
	struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct gsc_dev *gsc = ctx->gsc_dev;
	struct gsc_capture_device *cap = &gsc->cap;
	struct exynos_md *mdev = gsc->mdev[MDEV_CAPTURE];
	int min_bufs, ret;
	unsigned long flags;

	spin_lock_irqsave(&gsc->slock, flags);
	ret = gsc_capture_set_addr(vb);
	if (ret)
		gsc_err("Failed to prepare output addr");

	gsc_hw_set_output_buf_masking(gsc, vb->v4l2_buf.index, 0);

	min_bufs = cap->reqbufs_cnt > 1 ? 2 : 1;

	if (vb2_is_streaming(&cap->vbq) &&
		(gsc_hw_get_nr_unmask_bits(gsc) >= min_bufs) &&
		!test_bit(ST_CAPT_STREAM, &gsc->state)) {
		if (!test_and_set_bit(ST_CAPT_PIPE_STREAM, &gsc->state)) {
			spin_unlock_irqrestore(&gsc->slock, flags);
			if (!mdev->is_flite_on)
				gsc_cap_pipeline_s_stream(gsc, 1);
			else
				v4l2_subdev_call(gsc->cap.sd_cap, video,
							s_stream, 1);
			return;
		}

		if (!test_bit(ST_CAPT_STREAM, &gsc->state)) {
			gsc_dbg("G-Scaler h/w enable control");
			gsc_hw_enable_control(gsc, true);
			set_bit(ST_CAPT_STREAM, &gsc->state);
		}
	}
	spin_unlock_irqrestore(&gsc->slock, flags);

	return;
}
Ejemplo n.º 15
0
static int
xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
	struct v4l2_fh *vfh = file->private_data;
	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
	const struct xvip_video_format *info;
	struct xilinx_vdma_config config;
	int ret;

	__xvip_dma_try_format(dma, &format->fmt.pix, &info);

	mutex_lock(&dma->lock);

	if (vb2_is_streaming(&dma->queue)) {
		ret = -EBUSY;
		goto done;
	}

	dma->format = format->fmt.pix;
	dma->fmtinfo = info;

	/* Configure the DMA engine. */
	memset(&config, 0, sizeof(config));

	config.park = 1;
	config.park_frm = 0;
	config.vsize = dma->format.height;
	config.hsize = dma->format.width * info->bpp;
	config.stride = dma->format.bytesperline;
	config.ext_fsync = 2;

	dmaengine_device_control(dma->dma, DMA_SLAVE_CONFIG,
				 (unsigned long)&config);

	ret = 0;

done:
	mutex_unlock(&dma->lock);
	return ret;
}
Ejemplo n.º 16
0
static void gsc_capture_buf_queue(struct vb2_buffer *vb)
{
	struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct gsc_dev *gsc = ctx->gsc_dev;
	struct gsc_capture_device *cap = &gsc->cap;
	int min_bufs, ret;
	unsigned long flags;

	spin_lock_irqsave(&gsc->slock, flags);
	ret = gsc_capture_set_addr(vb);
	if (ret)
		gsc_err("Failed to prepare output addr");

	if (!test_bit(ST_CAPT_SUSPENDED, &gsc->state)) {
		gsc_info("buf_index : %d", vb->v4l2_buf.index);
		gsc_hw_set_output_buf_masking(gsc, vb->v4l2_buf.index, 0);
	}

	min_bufs = cap->reqbufs_cnt > 1 ? 2 : 1;

	if (vb2_is_streaming(&cap->vbq) &&
		(gsc_hw_get_nr_unmask_bits(gsc) >= min_bufs) &&
		!test_bit(ST_CAPT_STREAM, &gsc->state)) {
		if (!test_and_set_bit(ST_CAPT_PIPE_STREAM, &gsc->state)) {
			spin_unlock_irqrestore(&gsc->slock, flags);
			gsc_cap_pipeline_s_stream(gsc, 1);
			return;
		}

		if (!test_bit(ST_CAPT_STREAM, &gsc->state)) {
			gsc_info("G-Scaler h/w enable control");
			gsc_hw_enable_control(gsc, true);
			set_bit(ST_CAPT_STREAM, &gsc->state);
		}
	}
	spin_unlock_irqrestore(&gsc->slock, flags);

	return;
}
Ejemplo n.º 17
0
static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
	struct hva_ctx *ctx = fh_to_ctx(file->private_data);
	struct device *dev = ctx_to_dev(ctx);
	struct v4l2_pix_format *pix = &f->fmt.pix;
	struct vb2_queue *vq;
	int ret;

	ret = hva_try_fmt_frame(file, fh, f);
	if (ret) {
		dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): unsupported format %.4s\n",
			ctx->name, (char *)&pix->pixelformat);
		return ret;
	}

	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
	if (vb2_is_streaming(vq)) {
		dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n", ctx->name);
		return -EBUSY;
	}

	ctx->colorspace = pix->colorspace;
	ctx->xfer_func = pix->xfer_func;
	ctx->ycbcr_enc = pix->ycbcr_enc;
	ctx->quantization = pix->quantization;

	ctx->frameinfo.aligned_width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
	ctx->frameinfo.aligned_height = ALIGN(pix->height,
					      HVA_HEIGHT_ALIGNMENT);
	ctx->frameinfo.size = pix->sizeimage;
	ctx->frameinfo.pixelformat = pix->pixelformat;
	ctx->frameinfo.width = pix->width;
	ctx->frameinfo.height = pix->height;
	ctx->flags |= HVA_FLAG_FRAMEINFO;

	return 0;
}
Ejemplo n.º 18
0
/**
 * vb2_poll() - implements poll userspace operation
 * @q:		videobuf2 queue
 * @file:	file argument passed to the poll file operation handler
 * @wait:	wait argument passed to the poll file operation handler
 *
 * This function implements poll file operation handler for a driver.
 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
 * be informed that the file descriptor of a video device is available for
 * reading.
 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
 * will be reported as available for writing.
 *
 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
 * pending events.
 *
 * The return values from this function are intended to be directly returned
 * from poll handler in driver.
 */
unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
	struct video_device *vfd = video_devdata(file);
	unsigned long req_events = poll_requested_events(wait);
	struct vb2_buffer *vb = NULL;
	unsigned int res = 0;
	unsigned long flags;

	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
		struct v4l2_fh *fh = file->private_data;

		if (v4l2_event_pending(fh))
			res = POLLPRI;
		else if (req_events & POLLPRI)
			poll_wait(file, &fh->wait, wait);
	}

	if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM)))
		return res;
	if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM)))
		return res;

	/*
	 * Start file I/O emulator only if streaming API has not been used yet.
	 */
	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
		if (!q->is_output && (q->io_modes & VB2_READ) &&
				(req_events & (POLLIN | POLLRDNORM))) {
			if (__vb2_init_fileio(q, 1))
				return res | POLLERR;
		}
		if (q->is_output && (q->io_modes & VB2_WRITE) &&
				(req_events & (POLLOUT | POLLWRNORM))) {
			if (__vb2_init_fileio(q, 0))
				return res | POLLERR;
			/*
			 * Write to OUTPUT queue can be done immediately.
			 */
			return res | POLLOUT | POLLWRNORM;
		}
	}

	/*
	 * There is nothing to wait for if the queue isn't streaming, or if the
	 * error flag is set.
	 */
	if (!vb2_is_streaming(q) || q->error)
		return res | POLLERR;
	/*
	 * For compatibility with vb1: if QBUF hasn't been called yet, then
	 * return POLLERR as well. This only affects capture queues, output
	 * queues will always initialize waiting_for_buffers to false.
	 */
	if (q->waiting_for_buffers)
		return res | POLLERR;

	/*
	 * For output streams you can call write() as long as there are fewer
	 * buffers queued than there are buffers available.
	 */
	if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
		return res | POLLOUT | POLLWRNORM;

	if (list_empty(&q->done_list)) {
		/*
		 * If the last buffer was dequeued from a capture queue,
		 * return immediately. DQBUF will return -EPIPE.
		 */
		if (q->last_buffer_dequeued)
			return res | POLLIN | POLLRDNORM;

		poll_wait(file, &q->done_wq, wait);
	}

	/*
	 * Take first buffer available for dequeuing.
	 */
	spin_lock_irqsave(&q->done_lock, flags);
	if (!list_empty(&q->done_list))
		vb = list_first_entry(&q->done_list, struct vb2_buffer,
					done_entry);
	spin_unlock_irqrestore(&q->done_lock, flags);

	if (vb && (vb->state == VB2_BUF_STATE_DONE
			|| vb->state == VB2_BUF_STATE_ERROR)) {
		return (q->is_output) ?
				res | POLLOUT | POLLWRNORM :
				res | POLLIN | POLLRDNORM;
	}
	return res;
}
Ejemplo n.º 19
0
void msm_video_stop_streaming(struct camss_video *video)
{
	if (vb2_is_streaming(&video->vb2_q))
		vb2_queue_release(&video->vb2_q);
}
Ejemplo n.º 20
0
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
{
    struct cobalt *cobalt = (struct cobalt *)dev_id;
    u32 dma_interrupt =
        cobalt_read_bar0(cobalt, DMA_INTERRUPT_STATUS_REG) & 0xffff;
    u32 mask = cobalt_read_bar1(cobalt, COBALT_SYS_STAT_MASK);
    u32 edge = cobalt_read_bar1(cobalt, COBALT_SYS_STAT_EDGE);
    int i;

    /* Clear DMA interrupt */
    cobalt_write_bar0(cobalt, DMA_INTERRUPT_STATUS_REG, dma_interrupt);
    cobalt_write_bar1(cobalt, COBALT_SYS_STAT_MASK, mask & ~edge);
    cobalt_write_bar1(cobalt, COBALT_SYS_STAT_EDGE, edge);

    for (i = 0; i < COBALT_NUM_STREAMS; i++) {
        struct cobalt_stream *s = &cobalt->streams[i];
        unsigned dma_fifo_mask = s->dma_fifo_mask;

        if (dma_interrupt & (1 << s->dma_channel)) {
            cobalt->irq_dma[i]++;
            /* Give fresh buffer to user and chain newly
             * queued buffers */
            cobalt_dma_stream_queue_handler(s);
            if (!s->is_audio) {
                edge &= ~dma_fifo_mask;
                cobalt_write_bar1(cobalt, COBALT_SYS_STAT_MASK,
                                  mask & ~edge);
            }
        }
        if (s->is_audio)
            continue;
        if (edge & s->adv_irq_mask)
            set_bit(COBALT_STREAM_FL_ADV_IRQ, &s->flags);
        if ((edge & mask & dma_fifo_mask) && vb2_is_streaming(&s->q)) {
            cobalt_info("full rx FIFO %d\n", i);
            cobalt->irq_full_fifo++;
        }
    }

    queue_work(cobalt->irq_work_queues, &cobalt->irq_work_queue);

    if (edge & mask & (COBALT_SYSSTAT_VI0_INT1_MSK |
                       COBALT_SYSSTAT_VI1_INT1_MSK |
                       COBALT_SYSSTAT_VI2_INT1_MSK |
                       COBALT_SYSSTAT_VI3_INT1_MSK |
                       COBALT_SYSSTAT_VIHSMA_INT1_MSK |
                       COBALT_SYSSTAT_VOHSMA_INT1_MSK))
        cobalt->irq_adv1++;
    if (edge & mask & (COBALT_SYSSTAT_VI0_INT2_MSK |
                       COBALT_SYSSTAT_VI1_INT2_MSK |
                       COBALT_SYSSTAT_VI2_INT2_MSK |
                       COBALT_SYSSTAT_VI3_INT2_MSK |
                       COBALT_SYSSTAT_VIHSMA_INT2_MSK))
        cobalt->irq_adv2++;
    if (edge & mask & COBALT_SYSSTAT_VOHSMA_INT1_MSK)
        cobalt->irq_advout++;
    if (dma_interrupt)
        cobalt->irq_dma_tot++;
    if (!(edge & mask) && !dma_interrupt)
        cobalt->irq_none++;
    dma_interrupt = cobalt_read_bar0(cobalt, DMA_INTERRUPT_STATUS_REG);

    return IRQ_HANDLED;
}