static int gsc_capture_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct gsc_dev *gsc = video_drvdata(file); struct v4l2_subdev *sd; struct gsc_pipeline *p = &gsc->pipeline; int ret; if (p->disp) { gsc_pm_qos_ctrl(gsc, GSC_QOS_OFF, 0, 0); sd = gsc->pipeline.disp; } else if (p->sensor) { sd = gsc->pipeline.sensor; } else { gsc_err("Error pipeline"); return -EPIPE; } ret = vb2_streamoff(&gsc->cap.vbq, type); if (ret == 0) { if (p->disp) media_entity_pipeline_stop(&p->disp->entity); else if (p->sensor) media_entity_pipeline_stop(&p->sensor->entity); } return ret; }
static int isp_video_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_isp *isp = video_drvdata(file); struct exynos_video_entity *ve = &isp->video_capture.ve; struct media_entity *me = &ve->vdev.entity; int ret; ret = media_entity_pipeline_start(me, &ve->pipe->mp); if (ret < 0) return ret; ret = isp_video_pipeline_validate(isp); if (ret < 0) goto p_stop; ret = vb2_ioctl_streamon(file, priv, type); if (ret < 0) goto p_stop; isp->video_capture.streaming = 1; return 0; p_stop: media_entity_pipeline_stop(me); return ret; }
static int isp_video_release(struct file *file) { struct fimc_isp *isp = video_drvdata(file); struct fimc_is_video *ivc = &isp->video_capture; struct media_entity *entity = &ivc->ve.vdev.entity; struct media_device *mdev = entity->graph_obj.mdev; mutex_lock(&isp->video_lock); if (v4l2_fh_is_singular_file(file) && ivc->streaming) { media_entity_pipeline_stop(entity); ivc->streaming = 0; } vb2_fop_release(file); if (v4l2_fh_is_singular_file(file)) { fimc_pipeline_call(&ivc->ve, close); mutex_lock(&mdev->graph_mutex); entity->use_count--; mutex_unlock(&mdev->graph_mutex); } pm_runtime_put(&isp->pdev->dev); mutex_unlock(&isp->video_lock); return 0; }
static int fimc_lite_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); struct media_entity *entity = &fimc->ve.vdev.entity; int ret; if (fimc_lite_active(fimc)) return -EBUSY; ret = media_entity_pipeline_start(entity, &fimc->ve.pipe->mp); if (ret < 0) return ret; ret = fimc_pipeline_validate(fimc); if (ret < 0) goto err_p_stop; fimc->sensor = fimc_find_remote_sensor(&fimc->subdev.entity); ret = vb2_ioctl_streamon(file, priv, type); if (!ret) { fimc->streaming = true; return ret; } err_p_stop: media_entity_pipeline_stop(entity); return 0; }
static int fimc_lite_release(struct file *file) { struct fimc_lite *fimc = video_drvdata(file); struct media_entity *entity = &fimc->ve.vdev.entity; mutex_lock(&fimc->lock); if (v4l2_fh_is_singular_file(file) && atomic_read(&fimc->out_path) == FIMC_IO_DMA) { if (fimc->streaming) { media_entity_pipeline_stop(entity); fimc->streaming = false; } fimc_lite_stop_capture(fimc, false); fimc_pipeline_call(&fimc->ve, close); clear_bit(ST_FLITE_IN_USE, &fimc->state); mutex_lock(&entity->parent->graph_mutex); entity->use_count--; mutex_unlock(&entity->parent->graph_mutex); } _vb2_fop_release(file, NULL); pm_runtime_put(&fimc->pdev->dev); clear_bit(ST_FLITE_SUSPENDED, &fimc->state); mutex_unlock(&fimc->lock); return 0; }
static int fimc_lite_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR]; int ret; ret = vb2_streamoff(&fimc->vb_queue, type); if (ret == 0) media_entity_pipeline_stop(&sd->entity); return ret; }
static int fimc_cap_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_dev *fimc = video_drvdata(file); struct v4l2_subdev *sd = fimc->pipeline.sensor; int ret; ret = vb2_streamoff(&fimc->vid_cap.vbq, type); if (ret == 0) media_entity_pipeline_stop(&sd->entity); return ret; }
static int fimc_lite_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); int ret; ret = vb2_ioctl_streamoff(file, priv, type); if (ret < 0) return ret; media_entity_pipeline_stop(&fimc->ve.vdev.entity); fimc->streaming = false; return 0; }
static int isp_video_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_isp *isp = video_drvdata(file); struct fimc_is_video *video = &isp->video_capture; int ret; ret = vb2_ioctl_streamoff(file, priv, type); if (ret < 0) return ret; media_entity_pipeline_stop(&video->ve.vdev.entity); video->streaming = 0; return 0; }
static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) { struct xvip_dma *dma = vb2_get_drv_priv(vq); struct xvip_pipeline *pipe; int ret; dma->sequence = 0; /* * Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. * * Use the pipeline object embedded in the first DMA object that starts * streaming. */ pipe = dma->video.entity.pipe ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe; ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe); if (ret < 0) return ret; /* Verify that the configured format matches the output of the * connected subdev. */ ret = xvip_dma_verify_format(dma); if (ret < 0) goto error; ret = xvip_pipeline_prepare(pipe, dma); if (ret < 0) goto error; /* Start the DMA engine. This must be done before starting the blocks * in the pipeline to avoid DMA synchronization issues. */ dma_async_issue_pending(dma->dma); /* Start the pipeline. */ xvip_pipeline_set_stream(pipe, true); return 0; error: media_entity_pipeline_stop(&dma->video.entity); return ret; }
static int iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct iss_video_fh *vfh = to_iss_video_fh(fh); struct iss_video *video = video_drvdata(file); struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity); enum iss_pipeline_state state; unsigned long flags; if (type != video->type) return -EINVAL; mutex_lock(&video->stream_lock); if (!vb2_is_streaming(&vfh->queue)) goto done; /* Update the pipeline state. */ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISS_PIPELINE_STREAM_OUTPUT | ISS_PIPELINE_QUEUE_OUTPUT; else state = ISS_PIPELINE_STREAM_INPUT | ISS_PIPELINE_QUEUE_INPUT; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~state; spin_unlock_irqrestore(&pipe->lock, flags); /* Stop the stream. */ omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_STOPPED); vb2_streamoff(&vfh->queue, type); video->queue = NULL; if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, false); media_entity_pipeline_stop(&video->video.entity); done: mutex_unlock(&video->stream_lock); return 0; }
static int fimc_lite_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); struct v4l2_subdev *sensor = fimc->pipeline.subdevs[IDX_SENSOR]; struct fimc_pipeline *p = &fimc->pipeline; int ret; if (fimc_lite_active(fimc)) return -EBUSY; ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline); if (ret < 0) return ret; ret = fimc_pipeline_validate(fimc); if (ret) { media_entity_pipeline_stop(&sensor->entity); return ret; } return vb2_streamon(&fimc->vb_queue, type); }
static int xvip_dma_stop_streaming(struct vb2_queue *vq) { struct xvip_dma *dma = vb2_get_drv_priv(vq); struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity); struct xilinx_vdma_config config; /* Stop the pipeline. */ xvip_pipeline_set_stream(pipe, false); /* Stop and reset the DMA engine. */ dmaengine_device_control(dma->dma, DMA_TERMINATE_ALL, 0); config.reset = 1; dmaengine_device_control(dma->dma, DMA_SLAVE_CONFIG, (unsigned long)&config); /* Cleanup the pipeline and mark it as being stopped. */ xvip_pipeline_cleanup(pipe); media_entity_pipeline_stop(&dma->video.entity); return 0; }
static int fimc_cap_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_pipeline *p = &fimc->pipeline; struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR]; int ret; if (fimc_capture_active(fimc)) return -EBUSY; ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline); if (ret < 0) return ret; if (fimc->vid_cap.user_subdev_api) { ret = fimc_pipeline_validate(fimc); if (ret < 0) { media_entity_pipeline_stop(&sd->entity); return ret; } } return vb2_streamon(&fimc->vid_cap.vbq, type); }
/* * Stream management * * Every ISS pipeline has a single input and a single output. The input can be * either a sensor or a video node. The output is always a video node. * * As every pipeline has an output video node, the ISS video objects at the * pipeline output stores the pipeline state. It tracks the streaming state of * both the input and output, as well as the availability of buffers. * * In sensor-to-memory mode, frames are always available at the pipeline input. * Starting the sensor usually requires I2C transfers and must be done in * interruptible context. The pipeline is started and stopped synchronously * to the stream on/off commands. All modules in the pipeline will get their * subdev set stream handler called. The module at the end of the pipeline must * delay starting the hardware until buffers are available at its output. * * In memory-to-memory mode, starting/stopping the stream requires * synchronization between the input and output. ISS modules can't be stopped * in the middle of a frame, and at least some of the modules seem to become * busy as soon as they're started, even if they don't receive a frame start * event. For that reason frames need to be processed in single-shot mode. The * driver needs to wait until a frame is completely processed and written to * memory before restarting the pipeline for the next frame. Pipelined * processing might be possible but requires more testing. * * Stream start must be delayed until buffers are available at both the input * and output. The pipeline must be started in the videobuf queue callback with * the buffers queue spinlock held. The modules subdev set stream operation must * not sleep. */ static int iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct iss_video_fh *vfh = to_iss_video_fh(fh); struct iss_video *video = video_drvdata(file); struct media_entity_graph graph; struct media_entity *entity; enum iss_pipeline_state state; struct iss_pipeline *pipe; struct iss_video *far_end; unsigned long flags; int ret; if (type != video->type) return -EINVAL; mutex_lock(&video->stream_lock); /* Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. */ pipe = video->video.entity.pipe ? to_iss_pipeline(&video->video.entity) : &video->pipe; pipe->external = NULL; pipe->external_rate = 0; pipe->external_bpp = 0; pipe->entities = 0; if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, true); ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe); if (ret < 0) goto err_media_entity_pipeline_start; entity = &video->video.entity; media_entity_graph_walk_start(&graph, entity); while ((entity = media_entity_graph_walk_next(&graph))) pipe->entities |= 1 << entity->id; /* Verify that the currently configured format matches the output of * the connected subdev. */ ret = iss_video_check_format(video, vfh); if (ret < 0) goto err_iss_video_check_format; video->bpl_padding = ret; video->bpl_value = vfh->format.fmt.pix.bytesperline; /* Find the ISS video node connected at the far end of the pipeline and * update the pipeline. */ far_end = iss_video_far_end(video); if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { state = ISS_PIPELINE_STREAM_OUTPUT | ISS_PIPELINE_IDLE_OUTPUT; pipe->input = far_end; pipe->output = video; } else { if (far_end == NULL) { ret = -EPIPE; goto err_iss_video_check_format; } state = ISS_PIPELINE_STREAM_INPUT | ISS_PIPELINE_IDLE_INPUT; pipe->input = video; pipe->output = far_end; } spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~ISS_PIPELINE_STREAM; pipe->state |= state; spin_unlock_irqrestore(&pipe->lock, flags); /* Set the maximum time per frame as the value requested by userspace. * This is a soft limit that can be overridden if the hardware doesn't * support the request limit. */ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) pipe->max_timeperframe = vfh->timeperframe; video->queue = &vfh->queue; INIT_LIST_HEAD(&video->dmaqueue); spin_lock_init(&video->qlock); video->error = false; atomic_set(&pipe->frame_number, -1); ret = vb2_streamon(&vfh->queue, type); if (ret < 0) goto err_iss_video_check_format; /* In sensor-to-memory mode, the stream can be started synchronously * to the stream on command. In memory-to-memory mode, it will be * started when buffers are queued on both the input and output. */ if (pipe->input == NULL) { unsigned long flags; ret = omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_CONTINUOUS); if (ret < 0) goto err_omap4iss_set_stream; spin_lock_irqsave(&video->qlock, flags); if (list_empty(&video->dmaqueue)) video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN; spin_unlock_irqrestore(&video->qlock, flags); } mutex_unlock(&video->stream_lock); return 0; err_omap4iss_set_stream: vb2_streamoff(&vfh->queue, type); err_iss_video_check_format: media_entity_pipeline_stop(&video->video.entity); err_media_entity_pipeline_start: if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, false); video->queue = NULL; mutex_unlock(&video->stream_lock); return ret; }