static int fimc_pipeline_validate(struct fimc_lite *fimc) { struct v4l2_subdev *sd = &fimc->subdev; struct v4l2_subdev_format sink_fmt, src_fmt; struct media_pad *pad; int ret; while (1) { /* Retrieve format at the sink pad */ pad = &sd->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; /* Don't call FIMC subdev operation to avoid nested locking */ if (sd == &fimc->subdev) { struct flite_frame *ff = &fimc->out_frame; sink_fmt.format.width = ff->f_width; sink_fmt.format.height = ff->f_height; sink_fmt.format.code = fimc->fmt->mbus_code; } else { sink_fmt.pad = pad->index; sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; } /* Retrieve format at the source pad */ pad = media_entity_remote_source(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); src_fmt.pad = pad->index; src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; if (src_fmt.format.width != sink_fmt.format.width || src_fmt.format.height != sink_fmt.format.height || src_fmt.format.code != sink_fmt.format.code) return -EPIPE; } return 0; }
/* * iss_pipeline_enable - Enable streaming on a pipeline * @pipe: ISS pipeline * @mode: Stream mode (single shot or continuous) * * Walk the entities chain starting at the pipeline output video node and start * all modules in the chain in the given mode. * * Return 0 if successful, or the return value of the failed video::s_stream * operation otherwise. */ static int iss_pipeline_enable(struct iss_pipeline *pipe, enum iss_pipeline_stream_state mode) { struct iss_device *iss = pipe->output->iss; struct media_entity *entity; struct media_pad *pad; struct v4l2_subdev *subdev; unsigned long flags; int ret; /* If one of the entities in the pipeline has crashed it will not work * properly. Refuse to start streaming in that case. This check must be * performed before the loop below to avoid starting entities if the * pipeline won't start anyway (those entities would then likely fail to * stop, making the problem worse). */ if (pipe->entities & iss->crashed) return -EIO; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT); spin_unlock_irqrestore(&pipe->lock, flags); pipe->do_propagation = false; entity = &pipe->output->video.entity; while (1) { pad = &entity->pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; pad = media_entity_remote_pad(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; entity = pad->entity; subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, mode); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; } iss_print_status(pipe->output->iss); return 0; }
/* * csi2_link_setup - Setup CSI2 connections. * @entity : Pointer to media entity structure * @local : Pointer to local pad array * @remote : Pointer to remote pad array * @flags : Link flags * return -EINVAL or zero on success */ static int csi2_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct isp_csi2_ctrl_cfg *ctrl = &csi2->ctrl; /* * The ISP core doesn't support pipelines with multiple video outputs. * Revisit this when it will be implemented, and return -EBUSY for now. */ switch (local->index | media_entity_type(remote->entity)) { case CSI2_PAD_SOURCE | MEDIA_ENT_T_DEVNODE: if (flags & MEDIA_LNK_FL_ENABLED) { if (csi2->output & ~CSI2_OUTPUT_MEMORY) return -EBUSY; csi2->output |= CSI2_OUTPUT_MEMORY; } else { csi2->output &= ~CSI2_OUTPUT_MEMORY; } break; case CSI2_PAD_SOURCE | MEDIA_ENT_T_V4L2_SUBDEV: if (flags & MEDIA_LNK_FL_ENABLED) { if (csi2->output & ~CSI2_OUTPUT_CCDC) return -EBUSY; csi2->output |= CSI2_OUTPUT_CCDC; } else { csi2->output &= ~CSI2_OUTPUT_CCDC; } break; default: /* Link from camera to CSI2 is fixed... */ return -EINVAL; } ctrl->vp_only_enable = (csi2->output & CSI2_OUTPUT_MEMORY) ? false : true; ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_CCDC); return 0; }
/** * fimc_pipeline_prepare - update pipeline information with subdevice pointers * @fimc: fimc device terminating the pipeline * * Caller holds the graph mutex. */ void fimc_pipeline_prepare(struct fimc_dev *fimc, struct media_entity *me) { struct media_entity_graph graph; struct v4l2_subdev *sd; media_entity_graph_walk_start(&graph, me); while ((me = media_entity_graph_walk_next(&graph))) { if (media_entity_type(me) != MEDIA_ENT_T_V4L2_SUBDEV) continue; sd = media_entity_to_v4l2_subdev(me); if (sd->grp_id == SENSOR_GROUP_ID) fimc->pipeline.sensor = sd; else if (sd->grp_id == CSIS_GROUP_ID) fimc->pipeline.csis = sd; } }
/* * ipipe_link_setup - Setup IPIPE connections * @entity: IPIPE media entity * @local: Pad at the local end of the link * @remote: Pad at the remote end of the link * @flags: Link flags * * return -EINVAL or zero on success */ static int ipipe_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd); struct iss_device *iss = to_iss_device(ipipe); if (!is_media_entity_v4l2_subdev(remote->entity)) return -EINVAL; switch (local->index) { case IPIPE_PAD_SINK: /* Read from IPIPEIF. */ if (!(flags & MEDIA_LNK_FL_ENABLED)) { ipipe->input = IPIPE_INPUT_NONE; break; } if (ipipe->input != IPIPE_INPUT_NONE) return -EBUSY; if (remote->entity == &iss->ipipeif.subdev.entity) ipipe->input = IPIPE_INPUT_IPIPEIF; break; case IPIPE_PAD_SOURCE_VP: /* Send to RESIZER */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ipipe->output & ~IPIPE_OUTPUT_VP) return -EBUSY; ipipe->output |= IPIPE_OUTPUT_VP; } else { ipipe->output &= ~IPIPE_OUTPUT_VP; } break; default: return -EINVAL; } return 0; }
static int video_start_streaming(struct vb2_queue *q, unsigned int count) { struct camss_video *video = vb2_get_drv_priv(q); struct video_device *vdev = &video->vdev; struct media_entity *entity; struct media_pad *pad; struct v4l2_subdev *subdev; int ret; ret = media_pipeline_start(&vdev->entity, &video->pipe); if (ret < 0) return ret; ret = video_check_format(video); if (ret < 0) goto error; entity = &vdev->entity; while (1) { pad = &entity->pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; pad = media_entity_remote_pad(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; entity = pad->entity; subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) goto error; } return 0; error: media_pipeline_stop(&vdev->entity); video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); return ret; }
static int v4l2_subdev_link_validate_get_format(struct media_pad *pad, struct v4l2_subdev_format *fmt) { if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(pad->entity); fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt->pad = pad->index; return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt); } WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L, "Driver bug! Wrong media entity type 0x%08x, entity %s\n", pad->entity->type, pad->entity->name); return -EINVAL; }
/** * fimc_pipeline_prepare - update pipeline information with subdevice pointers * @fimc: fimc device terminating the pipeline * * Caller holds the graph mutex. */ static void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me) { struct media_pad *pad = &me->pads[0]; struct v4l2_subdev *sd; int i; for (i = 0; i < IDX_MAX; i++) p->subdevs[i] = NULL; while (1) { if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; /* source pad */ pad = media_entity_remote_source(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); switch (sd->grp_id) { case SENSOR_GROUP_ID: p->subdevs[IDX_SENSOR] = sd; break; case CSIS_GROUP_ID: p->subdevs[IDX_CSIS] = sd; break; case FLITE_GROUP_ID: p->subdevs[IDX_FLITE] = sd; break; case FIMC_GROUP_ID: /* No need to control FIMC subdev through subdev ops */ break; default: pr_warn("%s: Unknown subdev grp_id: %#x\n", __func__, sd->grp_id); } /* sink pad */ pad = &sd->entity.pads[0]; } }
static struct v4l2_subdev * _get_remote_subdev(struct nxp_video *me, u32 type, u32 *pad) { struct media_pad *remote; if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) remote = media_entity_remote_source(&me->pads[1]); /* output, I'm source */ else remote = media_entity_remote_source(&me->pads[0]); /* capture, I'm sink */ if (!remote || media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV) return NULL; if (pad) *pad = remote->index; return media_entity_to_v4l2_subdev(remote->entity); }
static int v4l2_subdev_link_validate_get_format(struct media_pad *pad, struct v4l2_subdev_format *fmt) { switch (media_entity_type(pad->entity)) { case MEDIA_ENT_T_V4L2_SUBDEV: fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt->pad = pad->index; return v4l2_subdev_call(media_entity_to_v4l2_subdev( pad->entity), pad, get_fmt, NULL, fmt); default: WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n", media_entity_type(pad->entity), pad->entity->name); /* Fall through */ case MEDIA_ENT_T_DEVNODE_V4L: return -EINVAL; } }
void gsc_cap_pipeline_prepare(struct gsc_dev *gsc, struct media_entity *me) { struct media_entity_graph graph; struct v4l2_subdev *sd; media_entity_graph_walk_start(&graph, me); while ((me = media_entity_graph_walk_next(&graph))) { gsc_dbg("me->name : %s", me->name); if (media_entity_type(me) != MEDIA_ENT_T_V4L2_SUBDEV) continue; sd = media_entity_to_v4l2_subdev(me); switch (sd->grp_id) { case GSC_CAP_GRP_ID: gsc->pipeline.sd_gsc = sd; break; case FLITE_GRP_ID: gsc->pipeline.flite = sd; break; case SENSOR_GRP_ID: gsc->pipeline.sensor = sd; break; case CSIS_GRP_ID: gsc->pipeline.csis = sd; break; case FIMD_GRP_ID: gsc->pipeline.disp = sd; break; default: gsc_err("Unsupported group id"); break; } } gsc_dbg("gsc->pipeline.sd_gsc : 0x%p", gsc->pipeline.sd_gsc); gsc_dbg("gsc->pipeline.flite : 0x%p", gsc->pipeline.flite); gsc_dbg("gsc->pipeline.sensor : 0x%p", gsc->pipeline.sensor); gsc_dbg("gsc->pipeline.csis : 0x%p", gsc->pipeline.csis); gsc_dbg("gsc->pipeline.disp : 0x%p", gsc->pipeline.disp); }
/* * iss_pipeline_disable - Disable streaming on a pipeline * @pipe: ISS pipeline * @until: entity at which to stop pipeline walk * * Walk the entities chain starting at the pipeline output video node and stop * all modules in the chain. Wait synchronously for the modules to be stopped if * necessary. * * If the until argument isn't NULL, stop the pipeline walk when reaching the * until entity. This is used to disable a partially started pipeline due to a * subdev start error. */ static int iss_pipeline_disable(struct iss_pipeline *pipe, struct media_entity *until) { struct iss_device *iss = pipe->output->iss; struct media_entity *entity; struct media_pad *pad; struct v4l2_subdev *subdev; int failure = 0; int ret; entity = &pipe->output->video.entity; while (1) { pad = &entity->pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; pad = media_entity_remote_pad(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; entity = pad->entity; if (entity == until) break; subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, 0); if (ret < 0) { dev_warn(iss->dev, "%s: module stop timeout.\n", subdev->name); /* If the entity failed to stopped, assume it has * crashed. Mark it as such, the ISS will be reset when * applications will release it. */ media_entity_enum_set(&iss->crashed, &subdev->entity); failure = -ETIMEDOUT; } } return failure; }
static int flite_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct flite_dev *flite = to_flite_dev(sd); flite_info(""); switch (local->index | media_entity_type(remote->entity)) { case FLITE_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV: if (flags & MEDIA_LNK_FL_ENABLED) { if (flite->input != FLITE_INPUT_NONE) { flite_err("link is busy"); return -EBUSY; } if (remote->index == CSIS_PAD_SOURCE) flite->input |= FLITE_INPUT_CSIS; else flite->input |= FLITE_INPUT_SENSOR; } else { flite->input = FLITE_INPUT_NONE; } break; case FLITE_PAD_SOURCE_PREVIEW | MEDIA_ENT_T_V4L2_SUBDEV: /* fall through */ case FLITE_PAD_SOURCE_CAMCORDING | MEDIA_ENT_T_V4L2_SUBDEV: if (flags & MEDIA_LNK_FL_ENABLED) flite->output = FLITE_OUTPUT_GSC; else flite->output = FLITE_OUTPUT_NONE; break; default: flite_err("ERR link"); return -EINVAL; } return 0; }
/* * isp_pipeline_pm_power_one - Apply power change to an entity * @entity: The entity * @change: Use count change * * Change the entity use count by @change. If the entity is a subdev update its * power state by calling the core::s_power operation when the use count goes * from 0 to != 0 or from != 0 to 0. * * Return 0 on success or a negative error code on failure. */ static int isp_pipeline_pm_power_one(struct media_entity *entity, int change) { struct v4l2_subdev *subdev; int ret; subdev = media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV ? media_entity_to_v4l2_subdev(entity) : NULL; if (entity->use_count == 0 && change > 0 && subdev != NULL) { ret = v4l2_subdev_call(subdev, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; } entity->use_count += change; WARN_ON(entity->use_count < 0); if (entity->use_count == 0 && change < 0 && subdev != NULL) v4l2_subdev_call(subdev, core, s_power, 0); return 0; }
/* Called with the media graph mutex held or entity->stream_count > 0. */ struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity) { struct media_pad *pad = &entity->pads[0]; struct v4l2_subdev *sd; while (pad->flags & MEDIA_PAD_FL_SINK) { /* source pad */ pad = media_entity_remote_pad(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); if (sd->grp_id == GRP_ID_FIMC_IS_SENSOR || sd->grp_id == GRP_ID_SENSOR) return sd; /* sink pad */ pad = &sd->entity.pads[0]; } return NULL; }
/* * Check for source/sink format differences at each link. * Return 0 if the formats match or -EPIPE otherwise. */ static int isp_video_pipeline_validate(struct fimc_isp *isp) { struct v4l2_subdev *sd = &isp->subdev; struct v4l2_subdev_format sink_fmt, src_fmt; struct media_pad *pad; int ret; while (1) { /* Retrieve format at the sink pad */ pad = &sd->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; sink_fmt.pad = pad->index; sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Retrieve format at the source pad */ pad = media_entity_remote_pad(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; sd = media_entity_to_v4l2_subdev(pad->entity); src_fmt.pad = pad->index; src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; if (src_fmt.format.width != sink_fmt.format.width || src_fmt.format.height != sink_fmt.format.height || src_fmt.format.code != sink_fmt.format.code) return -EPIPE; } return 0; }
int omap4iss_get_external_info(struct iss_pipeline *pipe, struct media_link *link) { struct iss_device *iss = container_of(pipe, struct iss_video, pipe)->iss; struct v4l2_subdev_format fmt; struct v4l2_ctrl *ctrl; int ret; if (!pipe->external) return 0; if (pipe->external_rate) return 0; memset(&fmt, 0, sizeof(fmt)); fmt.pad = link->source->index; fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(link->sink->entity), pad, get_fmt, NULL, &fmt); if (ret < 0) return -EPIPE; pipe->external_bpp = omap4iss_video_format_info(fmt.format.code)->bpp; ctrl = v4l2_ctrl_find(pipe->external->ctrl_handler, V4L2_CID_PIXEL_RATE); if (!ctrl) { dev_warn(iss->dev, "no pixel rate control in subdev %s\n", pipe->external->name); return -EPIPE; } pipe->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl); return 0; }
static int fimc_md_link_notify(struct media_pad *source, struct media_pad *sink, u32 flags) { struct v4l2_subdev *sd; struct fimc_dev *fimc; int ret = 0; if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV) return 0; sd = media_entity_to_v4l2_subdev(sink->entity); fimc = v4l2_get_subdevdata(sd); if (!(flags & MEDIA_LNK_FL_ENABLED)) { ret = __fimc_pipeline_shutdown(fimc); fimc->pipeline.sensor = NULL; fimc->pipeline.csis = NULL; mutex_lock(&fimc->lock); fimc_ctrls_delete(fimc->vid_cap.ctx); mutex_unlock(&fimc->lock); return ret; } /* * Link activation. Enable power of pipeline elements only if the * pipeline is already in use, i.e. its video node is opened. * Recreate the controls destroyed during the link deactivation. */ mutex_lock(&fimc->lock); if (fimc->vid_cap.refcnt > 0) { ret = __fimc_pipeline_initialize(fimc, source->entity, true); if (!ret) ret = fimc_capture_ctrls_create(fimc); } mutex_unlock(&fimc->lock); return ret ? -EPIPE : ret; }
/* * iss_pipeline_disable - Disable streaming on a pipeline * @pipe: ISS pipeline * * Walk the entities chain starting at the pipeline output video node and stop * all modules in the chain. Wait synchronously for the modules to be stopped if * necessary. */ static int iss_pipeline_disable(struct iss_pipeline *pipe) { struct iss_device *iss = pipe->output->iss; struct media_entity *entity; struct media_pad *pad; struct v4l2_subdev *subdev; int failure = 0; int ret; entity = &pipe->output->video.entity; while (1) { pad = &entity->pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; pad = media_entity_remote_pad(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; entity = pad->entity; subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, 0); if (ret < 0) { dev_dbg(iss->dev, "%s: module stop timeout.\n", subdev->name); /* If the entity failed to stopped, assume it has * crashed. Mark it as such, the ISS will be reset when * applications will release it. */ iss->crashed |= 1U << subdev->entity.id; failure = -ETIMEDOUT; } } return failure; }
/* * iss_pipeline_pm_power_one - Apply power change to an entity * @entity: The entity * @change: Use count change * * Change the entity use count by @change. If the entity is a subdev update its * power state by calling the core::s_power operation when the use count goes * from 0 to != 0 or from != 0 to 0. * * Return 0 on success or a negative error code on failure. */ static int iss_pipeline_pm_power_one(struct media_entity *entity, int change) { struct v4l2_subdev *subdev; subdev = is_media_entity_v4l2_subdev(entity) ? media_entity_to_v4l2_subdev(entity) : NULL; if (entity->use_count == 0 && change > 0 && subdev) { int ret; ret = v4l2_subdev_call(subdev, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; } entity->use_count += change; WARN_ON(entity->use_count < 0); if (entity->use_count == 0 && change < 0 && subdev) v4l2_subdev_call(subdev, core, s_power, 0); return 0; }
/* * Validate a pipeline by checking both ends of all links for format * discrepancies. * * Compute the minimum time per frame value as the maximum of time per frame * limits reported by every block in the pipeline. * * Return 0 if all formats match, or -EPIPE if at least one link is found with * different formats on its two ends or if the pipeline doesn't start with a * video source (either a subdev with no input pad, or a non-subdev entity). */ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) { struct isp_device *isp = pipe->output->isp; struct media_pad *pad; struct v4l2_subdev *subdev; subdev = isp_video_remote_subdev(pipe->output, NULL); if (subdev == NULL) return -EPIPE; while (1) { /* Retrieve the sink format */ pad = &subdev->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; /* Update the maximum frame rate */ if (subdev == &isp->isp_res.subdev) omap3isp_resizer_max_rate(&isp->isp_res, &pipe->max_rate); /* Retrieve the source format. Return an error if no source * entity can be found, and stop checking the pipeline if the * source entity isn't a subdev. */ pad = media_entity_remote_source(pad); if (pad == NULL) return -EPIPE; if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; subdev = media_entity_to_v4l2_subdev(pad->entity); } return 0; }
/* updates external subdev(sensor/decoder) which is active */ static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video) { struct vpfe_device *vpfe_dev = video->vpfe_dev; struct vpfe_config *vpfe_cfg; struct v4l2_subdev *subdev; struct media_pad *remote; int i; remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]); if (remote == NULL) { pr_err("Invalid media connection to isif/ccdc\n"); return -EINVAL; } subdev = media_entity_to_v4l2_subdev(remote->entity); vpfe_cfg = vpfe_dev->pdev->platform_data; for (i = 0; i < vpfe_cfg->num_subdevs; i++) { if (!strcmp(vpfe_cfg->sub_devs[i].module_name, subdev->name)) { video->current_ext_subdev = &vpfe_cfg->sub_devs[i]; break; } } /* if user not linked decoder/sensor to isif/ccdc */ if (i == vpfe_cfg->num_subdevs) { pr_err("Invalid media chain connection to isif/ccdc\n"); return -EINVAL; } /* find the v4l2 subdev pointer */ for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) { if (!strcmp(video->current_ext_subdev->module_name, vpfe_dev->sd[i]->name)) video->current_ext_subdev->subdev = vpfe_dev->sd[i]; } return 0; }
/* * Validate a pipeline by checking both ends of all links for format * discrepancies. * * Compute the minimum time per frame value as the maximum of time per frame * limits reported by every block in the pipeline. * * Return 0 if all formats match, or -EPIPE if at least one link is found with * different formats on its two ends or if the pipeline doesn't start with a * video source (either a subdev with no input pad, or a non-subdev entity). */ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) { struct isp_device *isp = pipe->output->isp; struct v4l2_subdev_format fmt_source; struct v4l2_subdev_format fmt_sink; struct media_pad *pad; struct v4l2_subdev *subdev; int ret; pipe->max_rate = pipe->l3_ick; subdev = isp_video_remote_subdev(pipe->output, NULL); if (subdev == NULL) return -EPIPE; while (1) { unsigned int shifter_link; /* Retrieve the sink format */ pad = &subdev->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; fmt_sink.pad = pad->index; fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Update the maximum frame rate */ if (subdev == &isp->isp_res.subdev) omap3isp_resizer_max_rate(&isp->isp_res, &pipe->max_rate); /* Check ccdc maximum data rate when data comes from sensor * TODO: Include ccdc rate in pipe->max_rate and compare the * total pipe rate with the input data rate from sensor. */ if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) { unsigned int rate = UINT_MAX; omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); if (isp->isp_ccdc.vpcfg.pixelclk > rate) return -ENOSPC; } /* If sink pad is on CCDC, the link has the lane shifter * in the middle of it. */ shifter_link = subdev == &isp->isp_ccdc.subdev; /* Retrieve the source format. Return an error if no source * entity can be found, and stop checking the pipeline if the * source entity isn't a subdev. */ pad = media_entity_remote_source(pad); if (pad == NULL) return -EPIPE; if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; subdev = media_entity_to_v4l2_subdev(pad->entity); fmt_source.pad = pad->index; fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Check if the two ends match */ if (fmt_source.format.width != fmt_sink.format.width || fmt_source.format.height != fmt_sink.format.height) return -EPIPE; if (shifter_link) { unsigned int parallel_shift = 0; if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) { struct isp_parallel_platform_data *pdata = &((struct isp_v4l2_subdevs_group *) subdev->host_priv)->bus.parallel; parallel_shift = pdata->data_lane_shift * 2; } if (!isp_video_is_shiftable(fmt_source.format.code, fmt_sink.format.code, parallel_shift)) return -EPIPE; } else if (fmt_source.format.code != fmt_sink.format.code) return -EPIPE; } return 0; }
static int mxr_streamer_get(struct mxr_device *mdev, struct v4l2_subdev *sd) { int i; int ret = 0; int local = 1; struct sub_mxr_device *sub_mxr; struct mxr_layer *layer; struct media_pad *pad; struct v4l2_mbus_framefmt mbus_fmt; #if defined(CONFIG_CPU_EXYNOS4210) struct mxr_resources *res = &mdev->res; #endif struct v4l2_control ctrl; mutex_lock(&mdev->s_mutex); ++mdev->n_streamer; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer); /* If pipeline is started from Gscaler input video device, * TV basic configuration must be set before running mixer */ if (mdev->mxr_data_from == FROM_GSC_SD) { mxr_dbg(mdev, "%s: from gscaler\n", __func__); local = 0; /* enable mixer clock */ ret = mxr_power_get(mdev); if (ret) { mxr_err(mdev, "power on failed\n"); ret = -ENODEV; goto out; } /* turn on connected output device through link * with mixer */ mxr_output_get(mdev); for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) { sub_mxr = &mdev->sub_mxr[i]; if (sub_mxr->local) { layer = sub_mxr->layer[MXR_LAYER_VIDEO]; layer->pipe.state = MXR_PIPELINE_STREAMING; mxr_layer_geo_fix(layer); layer->ops.format_set(layer, layer->fmt, &layer->geo); layer->ops.stream_set(layer, 1); local += sub_mxr->local; } } if (local == 2) mxr_layer_sync(mdev, MXR_ENABLE); /* Set the TVOUT register about gsc-mixer local path */ mxr_reg_local_path_set(mdev, mdev->mxr0_gsc, mdev->mxr1_gsc, mdev->flags); } /* Alpha blending configuration always can be changed * whenever streaming */ mxr_set_alpha_blend(mdev); mxr_reg_set_color_range(mdev); mxr_reg_set_layer_prio(mdev); if ((mdev->n_streamer == 1 && local == 1) || (mdev->n_streamer == 2 && local == 2)) { for (i = MXR_PAD_SOURCE_GSCALER; i < MXR_PADS_NUM; ++i) { pad = &sd->entity.pads[i]; /* find sink pad of output via enabled link*/ pad = media_entity_remote_source(pad); if (pad) if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) break; if (i == MXR_PAD_SOURCE_GRP1) { ret = -ENODEV; goto out; } } sd = media_entity_to_v4l2_subdev(pad->entity); mxr_dbg(mdev, "cookie of current output = (%d)\n", to_output(mdev)->cookie); #if defined(CONFIG_CPU_EXYNOS4210) if (to_output(mdev)->cookie == 0) clk_set_parent(res->sclk_mixer, res->sclk_dac); else clk_set_parent(res->sclk_mixer, res->sclk_hdmi); #endif mxr_reg_s_output(mdev, to_output(mdev)->cookie); ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mbus_fmt); if (ret) { mxr_err(mdev, "failed to get mbus_fmt for output %s\n", sd->name); goto out; } ctrl.id = V4L2_CID_TV_GET_DVI_MODE; ret = v4l2_subdev_call(sd, core, g_ctrl, &ctrl); if (ret) { mxr_err(mdev, "failed to get DVI or HDMI mode %s\n", sd->name); goto out; } mxr_reg_set_mbus_fmt(mdev, &mbus_fmt, ctrl.value); ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mbus_fmt); if (ret) { mxr_err(mdev, "failed to set mbus_fmt for output %s\n", sd->name); goto out; } mxr_reg_streamon(mdev); ret = v4l2_subdev_call(sd, video, s_stream, 1); if (ret) { mxr_err(mdev, "starting stream failed for output %s\n", sd->name); goto out; } ret = mxr_reg_wait4update(mdev); if (ret) { mxr_err(mdev, "failed to get vsync (%d) from output\n", ret); goto out; } } out: mutex_unlock(&mdev->s_mutex); mxr_reg_dump(mdev); return ret; }
static int mxr_streamer_put(struct mxr_device *mdev, struct v4l2_subdev *sd) { int i; int ret = 0; int local = 1; struct media_pad *pad; struct sub_mxr_device *sub_mxr; struct mxr_layer *layer; struct v4l2_subdev *hdmi_sd; struct v4l2_subdev *gsc_sd; struct exynos_entity_data *md_data; mutex_lock(&mdev->s_mutex); --mdev->n_streamer; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer); /* distinction number of local path */ if (mdev->mxr_data_from == FROM_GSC_SD) { local = 0; for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) { sub_mxr = &mdev->sub_mxr[i]; if (sub_mxr->local) local += sub_mxr->local; } if (local == 2) mxr_layer_sync(mdev, MXR_DISABLE); } if ((mdev->n_streamer == 0 && local == 1) || (mdev->n_streamer == 1 && local == 2)) { for (i = MXR_PAD_SOURCE_GSCALER; i < MXR_PADS_NUM; ++i) { pad = &sd->entity.pads[i]; /* find sink pad of output via enabled link*/ pad = media_entity_remote_source(pad); if (pad) if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) break; if (i == MXR_PAD_SOURCE_GRP1) { ret = -ENODEV; goto out; } } hdmi_sd = media_entity_to_v4l2_subdev(pad->entity); mxr_reg_streamoff(mdev); /* vsync applies Mixer setup */ ret = mxr_reg_wait4update(mdev); if (ret) { mxr_err(mdev, "failed to get vsync (%d) from output\n", ret); goto out; } } /* When using local path between gscaler and mixer, below stop sequence * must be processed */ if (mdev->mxr_data_from == FROM_GSC_SD) { pad = &sd->entity.pads[MXR_PAD_SINK_GSCALER]; pad = media_entity_remote_source(pad); if (pad) { gsc_sd = media_entity_to_v4l2_subdev( pad->entity); mxr_dbg(mdev, "stop from %s\n", gsc_sd->name); md_data = (struct exynos_entity_data *) gsc_sd->dev_priv; md_data->media_ops->power_off(gsc_sd); } } if ((mdev->n_streamer == 0 && local == 1) || (mdev->n_streamer == 1 && local == 2)) { ret = v4l2_subdev_call(hdmi_sd, video, s_stream, 0); if (ret) { mxr_err(mdev, "stopping stream failed for output %s\n", hdmi_sd->name); goto out; } } /* turn off connected output device through link * with mixer */ if (mdev->mxr_data_from == FROM_GSC_SD) { for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) { sub_mxr = &mdev->sub_mxr[i]; if (sub_mxr->local) { layer = sub_mxr->layer[MXR_LAYER_VIDEO]; layer->ops.stream_set(layer, 0); layer->pipe.state = MXR_PIPELINE_IDLE; } } mxr_reg_local_path_clear(mdev); mxr_output_put(mdev); /* disable mixer clock */ mxr_power_put(mdev); } WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n", mdev->n_streamer); out: mutex_unlock(&mdev->s_mutex); mxr_reg_dump(mdev); return ret; }
/* * isp_subdev_link_setup - Setup isp subdev connections * @entity: ispsubdev media entity * @local: Pad at the local end of the link * @remote: Pad at the remote end of the link * @flags: Link flags * * return -EINVAL or zero on success */ static int isp_subdev_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); struct atomisp_device *isp = isp_sd->isp; unsigned int i; switch (local->index | media_entity_type(remote->entity)) { case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV: /* Read from the sensor CSI2-ports. */ if (!(flags & MEDIA_LNK_FL_ENABLED)) { isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE; break; } if (isp_sd->input != ATOMISP_SUBDEV_INPUT_NONE) return -EBUSY; for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) { if (remote->entity != &isp->csi2_port[i].subdev.entity) continue; isp_sd->input = ATOMISP_SUBDEV_INPUT_CSI2_PORT1 + i; return 0; } return -EINVAL; case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_T_DEVNODE: /* read from memory */ if (flags & MEDIA_LNK_FL_ENABLED) { if (isp_sd->input >= ATOMISP_SUBDEV_INPUT_CSI2_PORT1 && isp_sd->input < (ATOMISP_SUBDEV_INPUT_CSI2_PORT1 + ATOMISP_CAMERA_NR_PORTS)) return -EBUSY; isp_sd->input = ATOMISP_SUBDEV_INPUT_MEMORY; } else { if (isp_sd->input == ATOMISP_SUBDEV_INPUT_MEMORY) isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE; } break; case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW | MEDIA_ENT_T_DEVNODE: /* always write to memory */ break; case ATOMISP_SUBDEV_PAD_SOURCE_VF | MEDIA_ENT_T_DEVNODE: /* always write to memory */ break; case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE | MEDIA_ENT_T_DEVNODE: /* always write to memory */ break; default: return -EINVAL; } return 0; }
static int mxr_streamer_put(struct mxr_device *mdev, struct v4l2_subdev *sd) { int i; int ret = 0; int local = 1; struct media_pad *pad; struct sub_mxr_device *sub_mxr; struct mxr_layer *layer; struct v4l2_subdev *hdmi_sd; struct v4l2_subdev *gsc_sd; struct exynos_entity_data *md_data; struct s5p_mxr_platdata *pdata = mdev->pdata; struct v4l2_control ctrl; mutex_lock(&mdev->s_mutex); --mdev->n_streamer; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer); /* distinction number of local path */ if (mdev->mxr_data_from == FROM_GSC_SD) { local = 0; for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) { sub_mxr = &mdev->sub_mxr[i]; if (sub_mxr->local) local += sub_mxr->local; } if (local == 2) mxr_layer_sync(mdev, MXR_DISABLE); /* stop gscaler --> waiting for frame done */ pad = &sd->entity.pads[MXR_PAD_SINK_GSCALER]; pad = media_entity_remote_source(pad); if (pad) { gsc_sd = media_entity_to_v4l2_subdev( pad->entity); mxr_dbg(mdev, "stop from %s\n", gsc_sd->name); md_data = (struct exynos_entity_data *) gsc_sd->dev_priv; if (is_ip_ver_5g_1 || is_ip_ver_5a_0) md_data->media_ops->power_off(gsc_sd); } /* disable video layer */ for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) { sub_mxr = &mdev->sub_mxr[i]; if (sub_mxr->local) { layer = sub_mxr->layer[MXR_LAYER_VIDEO]; layer->ops.stream_set(layer, 0); layer->pipe.state = MXR_PIPELINE_IDLE; } } } if ((mdev->n_streamer == 0 && local == 1) || (mdev->n_streamer == 1 && local == 2)) { for (i = MXR_PAD_SOURCE_GSCALER; i < MXR_PADS_NUM; ++i) { pad = &sd->entity.pads[i]; /* find sink pad of output via enabled link*/ pad = media_entity_remote_source(pad); if (pad) if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) break; if (i == MXR_PAD_SOURCE_GRP1) { ret = -ENODEV; goto out; } } hdmi_sd = media_entity_to_v4l2_subdev(pad->entity); mxr_reg_streamoff(mdev); /* vsync applies Mixer setup */ ret = mxr_reg_wait4update(mdev); if (ret) { mxr_err(mdev, "failed to get vsync (%d) from output\n", ret); goto out; } /* stop hdmi */ ctrl.id = V4L2_CID_TV_HDMI_STATUS; ret = v4l2_subdev_call(hdmi_sd, core, g_ctrl, &ctrl); if (ret) { mxr_err(mdev, "failed to get output %s status for stop\n", hdmi_sd->name); goto out; } /* * HDMI should be turn off only when not in use. * 1. cable out * 2. suspend (blank is called at suspend) */ if (ctrl.value == (HDMI_STREAMING | HPD_LOW) || mdev->blank) { ret = v4l2_subdev_call(hdmi_sd, video, s_stream, 0); if (ret) { mxr_err(mdev, "stopping stream failed for output %s\n", hdmi_sd->name); goto out; } ret = v4l2_subdev_call(hdmi_sd, core, s_power, 0); if (ret) { mxr_err(mdev, "failed to put power for output %s\n", hdmi_sd->name); goto out; } mdev->blank = 0; } } /* disable mixer clock */ if (mdev->mxr_data_from == FROM_GSC_SD) mxr_power_put(mdev); WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n", mdev->n_streamer); out: #if defined(CONFIG_TV_USE_BUS_DEVFREQ) if ((mdev->n_streamer == 0 && local == 1) || (mdev->n_streamer == 1 && local == 2)) { if (is_ip_ver_5a) pm_qos_remove_request(&exynos5_tv_mif_qos); pm_qos_remove_request(&exynos5_tv_int_qos); } #endif mutex_unlock(&mdev->s_mutex); mxr_reg_dump(mdev); return ret; }
static int vdic_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct vdic_priv *priv = v4l2_get_subdevdata(sd); struct v4l2_subdev *remote_sd; int ret = 0; dev_dbg(priv->dev, "link setup %s -> %s", remote->entity->name, local->entity->name); mutex_lock(&priv->lock); if (local->flags & MEDIA_PAD_FL_SOURCE) { if (!is_media_entity_v4l2_subdev(remote->entity)) { ret = -EINVAL; goto out; } remote_sd = media_entity_to_v4l2_subdev(remote->entity); if (flags & MEDIA_LNK_FL_ENABLED) { if (priv->sink_sd) { ret = -EBUSY; goto out; } priv->sink_sd = remote_sd; } else { priv->sink_sd = NULL; } goto out; } /* this is a sink pad */ if (flags & MEDIA_LNK_FL_ENABLED) { if (priv->src) { ret = -EBUSY; goto out; } } else { priv->src = NULL; goto out; } if (local->index == VDIC_SINK_PAD_IDMAC) { struct imx_media_video_dev *vdev = priv->vdev; if (!is_media_entity_v4l2_video_device(remote->entity)) { ret = -EINVAL; goto out; } if (!vdev) { ret = -ENODEV; goto out; } priv->csi_direct = false; } else { if (!is_media_entity_v4l2_subdev(remote->entity)) { ret = -EINVAL; goto out; } remote_sd = media_entity_to_v4l2_subdev(remote->entity); /* direct pad must connect to a CSI */ if (!(remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) || remote->index != CSI_SRC_PAD_DIRECT) { ret = -EINVAL; goto out; } priv->csi_direct = true; } priv->src = remote->entity; /* record which input pad is now active */ priv->active_input_pad = local->index; out: mutex_unlock(&priv->lock); return ret; }
static int csi2_configure(struct isp_csi2_device *csi2) { const struct isp_v4l2_subdevs_group *pdata; struct isp_device *isp = csi2->isp; struct isp_csi2_timing_cfg *timing = &csi2->timing[0]; struct v4l2_subdev *sensor; struct media_pad *pad; /* * CSI2 fields that can be updated while the context has * been enabled or the interface has been enabled are not * updated dynamically currently. So we do not allow to * reconfigure if either has been enabled */ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable) return -EBUSY; pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); pdata = sensor->host_priv; csi2->frame_skip = 0; v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip); csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div; csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE; csi2->ctrl.ecc_enable = pdata->bus.csi2.crc; timing->ionum = 1; timing->force_rx_mode = 1; timing->stop_state_16x = 1; timing->stop_state_4x = 1; timing->stop_state_counter = 0x1FF; /* * The CSI2 receiver can't do any format conversion except DPCM * decompression, so every set_format call configures both pads * and enables DPCM decompression as a special case: */ if (csi2->formats[CSI2_PAD_SINK].code != csi2->formats[CSI2_PAD_SOURCE].code) csi2->dpcm_decompress = true; else csi2->dpcm_decompress = false; csi2->contexts[0].format_id = csi2_ctx_map_format(csi2); if (csi2->video_out.bpl_padding == 0) csi2->contexts[0].data_offset = 0; else csi2->contexts[0].data_offset = csi2->video_out.bpl_value; /* * Enable end of frame and end of line signals generation for * context 0. These signals are generated from CSI2 receiver to * qualify the last pixel of a frame and the last pixel of a line. * Without enabling the signals CSI2 receiver writes data to memory * beyond buffer size and/or data line offset is not handled correctly. */ csi2->contexts[0].eof_enabled = 1; csi2->contexts[0].eol_enabled = 1; csi2_irq_complexio1_set(isp, csi2, 1); csi2_irq_ctx_set(isp, csi2, 1); csi2_irq_status_set(isp, csi2, 1); /* Set configuration (timings, format and links) */ csi2_timing_config(isp, csi2, timing); csi2_recv_config(isp, csi2, &csi2->ctrl); csi2_ctx_config(isp, csi2, &csi2->contexts[0]); return 0; }
static int mxr_streamer_get(struct mxr_device *mdev, struct v4l2_subdev *sd) { int i; int ret = 0; int local = 1; struct sub_mxr_device *sub_mxr; struct mxr_layer *layer; struct media_pad *pad; struct s5p_mxr_platdata *pdata = mdev->pdata; struct v4l2_mbus_framefmt mbus_fmt; struct v4l2_control ctrl; mutex_lock(&mdev->s_mutex); ++mdev->n_streamer; mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer); /* If pipeline is started from Gscaler input video device, * TV basic configuration must be set before running mixer */ if (mdev->mxr_data_from == FROM_GSC_SD) { mxr_dbg(mdev, "%s: from gscaler\n", __func__); local = 0; /* enable mixer clock */ ret = mxr_power_get(mdev); if (ret < 0) { mxr_err(mdev, "power on failed for video layer\n"); ret = -ENODEV; goto out; } for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) { sub_mxr = &mdev->sub_mxr[i]; if (sub_mxr->local) { layer = sub_mxr->layer[MXR_LAYER_VIDEO]; layer->pipe.state = MXR_PIPELINE_STREAMING; mxr_layer_geo_fix(layer); layer->ops.format_set(layer, layer->fmt, &layer->geo); layer->ops.stream_set(layer, 1); local += sub_mxr->local; } } if (local == 2) mxr_layer_sync(mdev, MXR_ENABLE); /* Set the TVOUT register about gsc-mixer local path */ mxr_reg_local_path_set(mdev); } /* Alpha blending configuration always can be changed * whenever streaming */ mxr_set_alpha_blend(mdev); mxr_reg_set_color_range(mdev); mxr_reg_set_layer_prio(mdev); if (is_ip_ver_5s || is_ip_ver_5s2) mxr_reg_set_resolution(mdev); if ((mdev->n_streamer == 1 && local == 1) || (mdev->n_streamer == 2 && local == 2)) { #if defined(CONFIG_TV_USE_BUS_DEVFREQ) if (is_ip_ver_5a) pm_qos_add_request(&exynos5_tv_mif_qos, PM_QOS_BUS_THROUGHPUT, 800000); pm_qos_add_request(&exynos5_tv_int_qos, PM_QOS_DEVICE_THROUGHPUT, 400000); #endif for (i = MXR_PAD_SOURCE_GSCALER; i < MXR_PADS_NUM; ++i) { pad = &sd->entity.pads[i]; /* find sink pad of output via enabled link*/ pad = media_entity_remote_source(pad); if (pad) if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) break; if (i == MXR_PAD_SOURCE_GRP1) { ret = -ENODEV; goto out; } } sd = media_entity_to_v4l2_subdev(pad->entity); mxr_dbg(mdev, "cookie of current output = (%d)\n", to_output(mdev)->cookie); mxr_reg_s_output(mdev, to_output(mdev)->cookie); ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mbus_fmt); if (ret) { mxr_err(mdev, "failed to get mbus_fmt for output %s\n", sd->name); goto out; } ctrl.id = V4L2_CID_TV_GET_DVI_MODE; ret = v4l2_subdev_call(sd, core, g_ctrl, &ctrl); if (ret) { mxr_err(mdev, "failed to get DVI or HDMI mode %s\n", sd->name); goto out; } mxr_reg_set_mbus_fmt(mdev, &mbus_fmt, ctrl.value); ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mbus_fmt); if (ret) { mxr_err(mdev, "failed to set mbus_fmt for output %s\n", sd->name); goto out; } mxr_reg_streamon(mdev); /* start hdmi */ ctrl.id = V4L2_CID_TV_HDMI_STATUS; ret = v4l2_subdev_call(sd, core, g_ctrl, &ctrl); if (ret) { mxr_err(mdev, "failed to get output %s status for start\n", sd->name); goto out; } if (ctrl.value == (HDMI_STOP | HPD_HIGH)) { ret = v4l2_subdev_call(sd, core, s_power, 1); if (ret) { mxr_err(mdev, "failed to get power for output %s\n", sd->name); goto out; } ret = v4l2_subdev_call(sd, video, s_stream, 1); if (ret) { mxr_err(mdev, "starting stream failed for output %s\n", sd->name); goto out; } } ret = mxr_reg_wait4update(mdev); if (ret) { mxr_err(mdev, "failed to get vsync (%d) from output\n", ret); goto out; } } out: mutex_unlock(&mdev->s_mutex); mxr_reg_dump(mdev); return ret; }