static int gsc_capture_close(struct file *file) { struct gsc_dev *gsc = video_drvdata(file); struct vb2_queue *q = &gsc->cap.vbq; int ret = 0; gsc_dbg("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state); if (q->streaming) gsc_capture_stop_streaming(q); if (--gsc->cap.refcnt == 0) { clear_bit(ST_CAPT_OPEN, &gsc->state); gsc_dbg("G-Scaler h/w disable control"); clear_bit(ST_CAPT_RUN, &gsc->state); vb2_queue_release(&gsc->cap.vbq); gsc_ctrls_delete(gsc->cap.ctx); ret = gsc_clk_disable_for_wb(gsc); if (ret) return ret; } pm_runtime_put_sync(&gsc->pdev->dev); return v4l2_fh_release(file); }
static int gsc_m2m_open(struct file *file) { struct gsc_dev *gsc = video_drvdata(file); struct gsc_ctx *ctx = NULL; int ret; gsc_dbg("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state); if (gsc_out_opened(gsc) || gsc_cap_opened(gsc)) return -EBUSY; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; v4l2_fh_init(&ctx->fh, gsc->m2m.vfd); ret = gsc_ctrls_create(ctx); if (ret) goto error_fh; /* Use separate control handler per file handle */ ctx->fh.ctrl_handler = &ctx->ctrl_handler; file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->gsc_dev = gsc; /* Default color format */ ctx->s_frame.fmt = get_format(0); ctx->d_frame.fmt = get_format(0); /* Setup the device context for mem2mem mode. */ ctx->state |= GSC_CTX_M2M; ctx->flags = 0; ctx->in_path = GSC_DMA; ctx->out_path = GSC_DMA; spin_lock_init(&ctx->slock); INIT_LIST_HEAD(&ctx->fence_wait_list); INIT_WORK(&ctx->fence_work, gsc_m2m_fence_work); ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init); if (IS_ERR(ctx->m2m_ctx)) { gsc_err("Failed to initialize m2m context"); ret = PTR_ERR(ctx->m2m_ctx); goto error_fh; } if (gsc->m2m.refcnt++ == 0) set_bit(ST_M2M_OPEN, &gsc->state); gsc_dbg("gsc m2m driver is opened, ctx(0x%p)", ctx); return 0; error_fh: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return ret; }
static int gsc_capture_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct gsc_dev *gsc = v4l2_get_subdevdata(sd); struct gsc_capture_device *cap = &gsc->cap; switch (local->index | media_entity_type(remote->entity)) { case GSC_PAD_SINK | MEDIA_ENT_T_V4L2_SUBDEV: if (flags & MEDIA_LNK_FL_ENABLED) { gsc_dbg("local to gsc-subdev link enable"); if (cap->input != 0) return -EBUSY; /* Write-Back link enabled */ if (!strcmp(remote->entity->name, FIMD_MODULE_NAME)) { gsc->cap.sd_disp = media_entity_to_v4l2_subdev(remote->entity); gsc->cap.sd_disp->grp_id = FIMD_GRP_ID; cap->ctx->in_path = GSC_WRITEBACK; cap->input = GSC_IN_FIMD_WRITEBACK; } else if (remote->index == FLITE_PAD_SOURCE_PREV) { cap->ctx->in_path = GSC_CAMERA; cap->input = GSC_IN_FLITE_PREVIEW; } else { cap->ctx->in_path = GSC_CAMERA; cap->input = GSC_IN_FLITE_CAMCORDING; } } else { if (cap->input == GSC_IN_FIMD_WRITEBACK) gsc->pipeline.disp = NULL; else if ((cap->input == GSC_IN_FLITE_PREVIEW) || (cap->input == GSC_IN_FLITE_CAMCORDING)) gsc->pipeline.flite = NULL; gsc_dbg("local to gsc-subdev link disable"); cap->input = GSC_IN_NONE; } break; case GSC_PAD_SOURCE | MEDIA_ENT_T_DEVNODE: if (flags & MEDIA_LNK_FL_ENABLED) gsc_dbg("gsc-subdev to gsc-video link enable"); else gsc_dbg("gsc-subdev to gsc-video link disable"); break; } return 0; }
static int gsc_m2m_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct gsc_dev *gsc = ctx->gsc_dev; struct exynos_platform_gscaler *pdata = gsc->pdata; /* The source and target color format need to be set */ if (V4L2_TYPE_IS_OUTPUT(type)) { if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx)) return -EINVAL; } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) { return -EINVAL; } gsc_pm_qos_ctrl(gsc, GSC_QOS_ON, pdata->mif_min, pdata->int_min); if (gsc->protected_content) { int id = gsc->id + 3; exynos_smc(SMC_PROTECTION_SET, 0, id, 1); gsc_dbg("DRM enable"); } return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); }
static int gsc_capture_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { gsc_dbg(""); return 0; }
static int gsc_m2m_release(struct file *file) { struct gsc_ctx *ctx = fh_to_ctx(file->private_data); struct gsc_dev *gsc = ctx->gsc_dev; gsc_dbg("pid: %d, state: 0x%lx, refcnt= %d", task_pid_nr(current), gsc->state, gsc->m2m.refcnt); /* if we didn't properly sequence with the secure side to turn off * content protection, we may be left in a very bad state and the * only way to recover this reliably is to reboot. */ BUG_ON(gsc->protected_content); kfree(ctx->m2m_ctx->cap_q_ctx.q.name); kfree(ctx->m2m_ctx->out_q_ctx.q.name); v4l2_m2m_ctx_release(ctx->m2m_ctx); gsc_ctrls_delete(ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); if (--gsc->m2m.refcnt <= 0) clear_bit(ST_M2M_OPEN, &gsc->state); kfree(ctx); return 0; }
static int gsc_capture_s_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct gsc_dev *gsc = video_drvdata(file); struct gsc_ctx *ctx = gsc->cap.ctx; struct gsc_frame *frame; struct v4l2_pix_format_mplane *pix; int i, ret = 0; ret = gsc_capture_try_fmt_mplane(file, fh, f); if (ret) return ret; if (vb2_is_streaming(&gsc->cap.vbq)) { gsc_err("queue (%d) busy", f->type); return -EBUSY; } frame = &ctx->d_frame; pix = &f->fmt.pix_mp; frame->fmt = find_format(&pix->pixelformat, NULL, 0); if (!frame->fmt) return -EINVAL; for (i = 0; i < frame->fmt->nr_comp; i++) frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; gsc_set_frame_size(frame, pix->width, pix->height); gsc_dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height); return 0; }
static int gsc_m2m_release(struct file *file) { struct gsc_ctx *ctx = fh_to_ctx(file->private_data); struct gsc_dev *gsc = ctx->gsc_dev; gsc_dbg("pid: %d, state: 0x%lx, refcnt= %d", task_pid_nr(current), gsc->state, gsc->m2m.refcnt); v4l2_m2m_ctx_release(ctx->m2m_ctx); gsc_ctrls_delete(ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); if (--gsc->m2m.refcnt <= 0) clear_bit(ST_M2M_OPEN, &gsc->state); /* This is unnormal case */ if (gsc->protected_content) { int id = gsc->id + 3; gsc_err("DRM should be disabled before device close"); exynos_smc(SMC_PROTECTION_SET, 0, id, 0); gsc_set_protected_content(gsc, false); } kfree(ctx); return 0; }
int gsc_cap_pipeline_s_stream(struct gsc_dev *gsc, int on) { struct gsc_pipeline *p = &gsc->pipeline; int ret = 0; if ((!p->sensor || !p->flite) && (!p->disp)) return -ENODEV; if (on) { gsc_dbg("start stream"); ret = v4l2_subdev_call(p->sd_gsc, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; if (p->disp) { ret = v4l2_subdev_call(p->disp, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; } else { ret = v4l2_subdev_call(p->flite, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; ret = v4l2_subdev_call(p->csis, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; ret = v4l2_subdev_call(p->sensor, video, s_stream, 1); } } else { gsc_dbg("stop stream"); if (p->disp) { ret = v4l2_subdev_call(p->disp, video, s_stream, 0); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; } else { ret = v4l2_subdev_call(p->sensor, video, s_stream, 0); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; ret = v4l2_subdev_call(p->csis, video, s_stream, 0); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; ret = v4l2_subdev_call(p->flite, video, s_stream, 0); } ret = v4l2_subdev_call(p->sd_gsc, video, s_stream, 0); } return ret == -ENOIOCTLCMD ? 0 : ret; }
void gsc_hw_set_output_addr(struct gsc_dev *dev, struct gsc_addr *addr, int index) { gsc_dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X", index, addr->y, addr->cb, addr->cr); writel(addr->y, dev->regs + GSC_OUT_BASE_ADDR_Y(index)); writel(addr->cb, dev->regs + GSC_OUT_BASE_ADDR_CB(index)); writel(addr->cr, dev->regs + GSC_OUT_BASE_ADDR_CR(index)); }
static void gsc_m2m_buf_queue(struct vb2_buffer *vb) { struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); gsc_dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state); if (ctx->m2m_ctx) v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); }
static int gsc_capture_subdev_s_stream(struct v4l2_subdev *sd, int enable) { struct gsc_dev *gsc = v4l2_get_subdevdata(sd); struct gsc_capture_device *cap = &gsc->cap; struct gsc_ctx *ctx = cap->ctx; if (enable) { gsc_dbg("start"); gsc_hw_set_frm_done_irq_mask(gsc, false); gsc_hw_set_overflow_irq_mask(gsc, false); gsc_hw_set_one_frm_mode(gsc, false); gsc_hw_set_gsc_irq_enable(gsc, true); if (gsc->pipeline.disp) { gsc_hw_set_sysreg_writeback(gsc); gsc_hw_set_pixelasync_reset_wb(gsc); } else { gsc_hw_set_pxlasync_camif_lo_mask(gsc, true); } gsc_hw_set_input_path(ctx); gsc_hw_set_in_size(ctx); gsc_hw_set_in_image_format(ctx); gsc_hw_set_output_path(ctx); gsc_hw_set_out_size(ctx); gsc_hw_set_out_image_format(ctx); gsc_hw_set_global_alpha(ctx); gsc_capture_scaler_info(ctx); gsc_hw_set_prescaler(ctx); gsc_hw_set_mainscaler(ctx); gsc_hw_set_h_coef(ctx); gsc_hw_set_v_coef(ctx); set_bit(ST_CAPT_PEND, &gsc->state); gsc_hw_enable_control(gsc, true); set_bit(ST_CAPT_STREAM, &gsc->state); } else { gsc_dbg("stop"); } return 0; }
int gsc_register_m2m_device(struct gsc_dev *gsc) { struct video_device *vfd; struct platform_device *pdev; int ret = 0; if (!gsc) return -ENODEV; pdev = gsc->pdev; vfd = video_device_alloc(); if (!vfd) { dev_err(&pdev->dev, "Failed to allocate video device\n"); return -ENOMEM; } vfd->fops = &gsc_m2m_fops; vfd->ioctl_ops = &gsc_m2m_ioctl_ops; vfd->release = video_device_release; vfd->lock = &gsc->lock; vfd->vfl_dir = VFL_DIR_M2M; snprintf(vfd->name, sizeof(vfd->name), "%s:m2m", dev_name(&pdev->dev)); video_set_drvdata(vfd, gsc); gsc->m2m.vfd = vfd; gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops); if (IS_ERR(gsc->m2m.m2m_dev)) { dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n"); ret = PTR_ERR(gsc->m2m.m2m_dev); goto err_m2m_r1; } ret = video_register_device(vfd, VFL_TYPE_GRABBER, EXYNOS_VIDEONODE_GSC_M2M(gsc->id)); if (ret) { dev_err(&pdev->dev, "%s(): failed to register video device\n", __func__); goto err_m2m_r2; } setup_timer(&gsc->op_timer, gsc_op_timer_handler, (unsigned long)gsc); gsc_dbg("gsc m2m driver registered as /dev/video%d", vfd->num); return 0; err_m2m_r2: v4l2_m2m_release(gsc->m2m.m2m_dev); err_m2m_r1: video_device_release(gsc->m2m.vfd); return ret; }
static int gsc_cap_stop_capture(struct gsc_dev *gsc) { int ret; gsc_dbg("G-Scaler h/w disable control"); ret = wait_event_timeout(gsc->irq_queue, !test_bit(ST_CAPT_RUN, &gsc->state), GSC_SHUTDOWN_TIMEOUT); if (ret == 0) { gsc_err("wait timeout"); return -EBUSY; } return 0; }
static int gsc_capture_close(struct file *file) { struct gsc_dev *gsc = video_drvdata(file); gsc_dbg("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state); if (--gsc->cap.refcnt == 0) { clear_bit(ST_CAPT_OPEN, &gsc->state); gsc_dbg("G-Scaler h/w disable control"); gsc_hw_enable_control(gsc, false); clear_bit(ST_CAPT_STREAM, &gsc->state); gsc_cap_pipeline_shutdown(gsc); } if (gsc->cap.refcnt == 0) { vb2_queue_release(&gsc->cap.vbq); gsc_ctrls_delete(gsc->cap.ctx); } pm_runtime_put_sync(&gsc->pdev->dev); return v4l2_fh_release(file); }
int gsc_wait_reset(struct gsc_dev *dev) { unsigned long timeo = jiffies + 10; /* timeout of 50ms */ u32 cfg; while (time_before(jiffies, timeo)) { cfg = readl(dev->regs + GSC_SW_RESET); if (!cfg) return 0; usleep_range(10, 20); } gsc_dbg("wait time : %d ms", jiffies_to_msecs(jiffies - timeo + 20)); return -EBUSY; }
int gsc_wait_stop(struct gsc_dev *dev) { unsigned long timeo = jiffies + 10; /* timeout of 50ms */ u32 cfg; while (time_before(jiffies, timeo)) { cfg = readl(dev->regs + GSC_ENABLE); if (!(cfg & GSC_ENABLE_OP_STATUS)) return 0; usleep_range(10, 20); } gsc_dbg("wait time : %d ms", jiffies_to_msecs(jiffies - timeo + 20)); return -EBUSY; }
static int gsc_m2m_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct gsc_dev *gsc = ctx->gsc_dev; gsc_pm_qos_ctrl(gsc, GSC_QOS_OFF, 0, 0); if (gsc->protected_content) { int id = gsc->id + 3; exynos_smc(SMC_PROTECTION_SET, 0, id, 0); gsc_dbg("DRM disable"); } return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); }
void gsc_hw_set_global_alpha(struct gsc_ctx *ctx) { struct gsc_dev *dev = ctx->gsc_dev; struct gsc_frame *frame = &ctx->d_frame; u32 cfg; cfg = readl(dev->regs + GSC_OUT_CON); cfg &= ~GSC_OUT_GLOBAL_ALPHA_MASK; if (!is_rgb(frame->fmt->pixelformat)) { gsc_dbg("Not a RGB format"); return; } cfg |= GSC_OUT_GLOBAL_ALPHA(ctx->gsc_ctrls.global_alpha->val); writel(cfg, dev->regs + GSC_OUT_CON); }
static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct vb2_queue *vq; struct gsc_frame *frame; struct v4l2_pix_format_mplane *pix; int i, ret = 0; ret = gsc_m2m_try_fmt_mplane(file, fh, f); if (ret) return ret; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (vb2_is_streaming(vq)) { gsc_err("queue (%d) busy", f->type); return -EBUSY; } if (V4L2_TYPE_IS_OUTPUT(f->type)) { frame = &ctx->s_frame; } else { frame = &ctx->d_frame; } pix = &f->fmt.pix_mp; frame->fmt = find_format(&pix->pixelformat, NULL, 0); if (!frame->fmt) return -EINVAL; for (i = 0; i < frame->fmt->num_planes; i++) frame->payload[i] = pix->plane_fmt[i].sizeimage; gsc_set_frame_size(frame, pix->width, pix->height); if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx); else gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx); gsc_dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height); return 0; }
static int gsc_cap_stop_capture(struct gsc_dev *gsc) { int ret; if (!gsc_cap_active(gsc)) { gsc_warn("already stopped\n"); return 0; } gsc_dbg("G-Scaler h/w disable control"); gsc_hw_enable_control(gsc, false); clear_bit(ST_CAPT_STREAM, &gsc->state); ret = gsc_wait_stop(gsc); if (ret) { gsc_err("GSCALER_OP_STATUS is operating\n"); return ret; } return gsc_capture_state_cleanup(gsc); }
static int gsc_m2m_release(struct file *file) { struct gsc_ctx *ctx = fh_to_ctx(file->private_data); struct gsc_dev *gsc = ctx->gsc_dev; gsc_dbg("pid: %d, state: 0x%lx, refcnt= %d", task_pid_nr(current), gsc->state, gsc->m2m.refcnt); v4l2_m2m_ctx_release(ctx->m2m_ctx); gsc_ctrls_delete(ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); if (--gsc->m2m.refcnt <= 0) clear_bit(ST_M2M_OPEN, &gsc->state); kfree(ctx); return 0; }
static void gsc_capture_buf_queue(struct vb2_buffer *vb) { struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct gsc_dev *gsc = ctx->gsc_dev; struct gsc_capture_device *cap = &gsc->cap; struct exynos_md *mdev = gsc->mdev[MDEV_CAPTURE]; int min_bufs, ret; unsigned long flags; spin_lock_irqsave(&gsc->slock, flags); ret = gsc_capture_set_addr(vb); if (ret) gsc_err("Failed to prepare output addr"); gsc_hw_set_output_buf_masking(gsc, vb->v4l2_buf.index, 0); min_bufs = cap->reqbufs_cnt > 1 ? 2 : 1; if (vb2_is_streaming(&cap->vbq) && (gsc_hw_get_nr_unmask_bits(gsc) >= min_bufs) && !test_bit(ST_CAPT_STREAM, &gsc->state)) { if (!test_and_set_bit(ST_CAPT_PIPE_STREAM, &gsc->state)) { spin_unlock_irqrestore(&gsc->slock, flags); if (!mdev->is_flite_on) gsc_cap_pipeline_s_stream(gsc, 1); else v4l2_subdev_call(gsc->cap.sd_cap, video, s_stream, 1); return; } if (!test_bit(ST_CAPT_STREAM, &gsc->state)) { gsc_dbg("G-Scaler h/w enable control"); gsc_hw_enable_control(gsc, true); set_bit(ST_CAPT_STREAM, &gsc->state); } } spin_unlock_irqrestore(&gsc->slock, flags); return; }
static int gsc_capture_open(struct file *file) { struct gsc_dev *gsc = video_drvdata(file); int ret = v4l2_fh_open(file); if (ret) return ret; if (gsc_m2m_opened(gsc) || gsc_out_opened(gsc) || gsc_cap_opened(gsc)) { v4l2_fh_release(file); return -EBUSY; } set_bit(ST_CAPT_OPEN, &gsc->state); pm_runtime_get_sync(&gsc->pdev->dev); if (++gsc->cap.refcnt == 1) { ret = gsc_cap_pipeline_initialize(gsc, &gsc->cap.vfd->entity, true); if (ret < 0) { gsc_err("gsc pipeline initialization failed\n"); goto err; } ret = gsc_capture_ctrls_create(gsc); if (ret) { gsc_err("failed to create controls\n"); goto err; } } gsc_dbg("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state); return 0; err: pm_runtime_put_sync(&gsc->pdev->dev); v4l2_fh_release(file); clear_bit(ST_CAPT_OPEN, &gsc->state); return ret; }
static void gsc_m2m_buf_queue(struct vb2_buffer *vb) { struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); unsigned long flags; struct sync_fence *fence; gsc_dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state); fence = vb->acquire_fence; if (fence) { spin_lock_irqsave(&ctx->slock, flags); list_add_tail(&b->wait, &ctx->fence_wait_list); spin_unlock_irqrestore(&ctx->slock, flags); queue_work(ctx->gsc_dev->irq_workqueue, &ctx->fence_work); } else { if (ctx->m2m_ctx) v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); } }
static int gsc_capture_start_streaming(struct vb2_queue *q, unsigned int count) { struct gsc_ctx *ctx = q->drv_priv; struct gsc_dev *gsc = ctx->gsc_dev; struct gsc_capture_device *cap = &gsc->cap; struct exynos_md *mdev = gsc->mdev[MDEV_CAPTURE]; int min_bufs; min_bufs = cap->reqbufs_cnt > 1 ? 2 : 1; if ((gsc_hw_get_nr_unmask_bits(gsc) >= min_bufs) && !test_bit(ST_CAPT_STREAM, &gsc->state)) { if (!test_and_set_bit(ST_CAPT_PIPE_STREAM, &gsc->state)) { gsc_dbg(""); if (!mdev->is_flite_on) gsc_cap_pipeline_s_stream(gsc, 1); else v4l2_subdev_call(gsc->cap.sd_cap, video, s_stream, 1); } } return 0; }
int gsc_hw_get_done_output_buf_index(struct gsc_dev *dev) { u32 cfg, curr_index, done_buf_index; unsigned long state_mask; u32 reqbufs_cnt = dev->cap.reqbufs_cnt; cfg = readl(dev->regs + GSC_OUT_BASE_ADDR_Y_MASK); curr_index = GSC_OUT_CURR_GET_INDEX(cfg); gsc_dbg("curr_index : %d", curr_index); state_mask = cfg & GSC_OUT_BASE_ADDR_MASK; done_buf_index = (curr_index == 0) ? reqbufs_cnt - 1 : curr_index - 1; do { /* Test done_buf_index whether masking or not */ if (test_bit(done_buf_index, &state_mask)) done_buf_index = (done_buf_index == 0) ? reqbufs_cnt - 1 : done_buf_index - 1; else return done_buf_index; } while (done_buf_index != curr_index); return -EBUSY; }
void gsc_cap_pipeline_prepare(struct gsc_dev *gsc, struct media_entity *me) { struct media_entity_graph graph; struct v4l2_subdev *sd; media_entity_graph_walk_start(&graph, me); while ((me = media_entity_graph_walk_next(&graph))) { gsc_dbg("me->name : %s", me->name); if (media_entity_type(me) != MEDIA_ENT_T_V4L2_SUBDEV) continue; sd = media_entity_to_v4l2_subdev(me); switch (sd->grp_id) { case GSC_CAP_GRP_ID: gsc->pipeline.sd_gsc = sd; break; case FLITE_GRP_ID: gsc->pipeline.flite = sd; break; case SENSOR_GRP_ID: gsc->pipeline.sensor = sd; break; case CSIS_GRP_ID: gsc->pipeline.csis = sd; break; case FIMD_GRP_ID: gsc->pipeline.disp = sd; break; default: gsc_err("Unsupported group id"); break; } } gsc_dbg("gsc->pipeline.sd_gsc : 0x%p", gsc->pipeline.sd_gsc); gsc_dbg("gsc->pipeline.flite : 0x%p", gsc->pipeline.flite); gsc_dbg("gsc->pipeline.sensor : 0x%p", gsc->pipeline.sensor); gsc_dbg("gsc->pipeline.csis : 0x%p", gsc->pipeline.csis); gsc_dbg("gsc->pipeline.disp : 0x%p", gsc->pipeline.disp); }
static void gsc_m2m_device_run(void *priv) { struct gsc_ctx *ctx = priv; struct gsc_dev *gsc; unsigned long flags; int ret; bool is_set = false; if (WARN(!ctx, "null hardware context\n")) return; gsc = ctx->gsc_dev; if (!in_irq()) { pm_runtime_get_sync(&gsc->pdev->dev); gsc->runtime_get_cnt++; } else { pm_runtime_get(&gsc->pdev->dev); gsc->runtime_get_cnt++; gsc_info("irq context"); } #ifdef CONFIG_SOC_EXYNOS5420 if (!(readl(EXYNOS5_CLKSRC_TOP5) & (1 << 28))) { if (clk_set_parent(gsc->clock[CLK_CHILD], gsc->clock[CLK_PARENT])) { u32 reg = readl(EXYNOS5_CLKSRC_TOP5); reg |= (1 << 28); writel(reg, EXYNOS5_CLKSRC_TOP5); gsc_err("Unable to set parent of gsc"); } gsc_err("get_cnt : %d, put_cnt : %d", gsc->runtime_get_cnt, gsc->runtime_put_cnt); gsc_err("state : 0x%lx", gsc->state); } #endif spin_lock_irqsave(&ctx->slock, flags); /* Reconfigure hardware if the context has changed. */ if (gsc->m2m.ctx != ctx) { gsc_dbg("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p", gsc->m2m.ctx, ctx); ctx->state |= GSC_PARAMS; gsc->m2m.ctx = ctx; } is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0; ctx->state &= ~GSC_CTX_STOP_REQ; if (is_set) { wake_up(&gsc->irq_queue); goto put_device; } ret = gsc_fill_addr(ctx); if (ret) { gsc_err("Wrong address"); goto put_device; } if (!gsc->protected_content) { struct gsc_frame *frame = &ctx->s_frame; exynos_sysmmu_set_pbuf(&gsc->pdev->dev, frame->fmt->nr_comp, ctx->prebuf); } if (ctx->state & GSC_PARAMS) { gsc_hw_set_sw_reset(gsc); ret = gsc_wait_reset(gsc); if (ret < 0) { gsc_err("gscaler s/w reset timeout"); goto put_device; } gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_frm_done_irq_mask(gsc, false); gsc_hw_set_gsc_irq_enable(gsc, true); gsc_hw_set_one_frm_mode(gsc, true); gsc_hw_set_freerun_clock_mode(gsc, false); if (gsc_set_scaler_info(ctx)) { gsc_err("Scaler setup error"); goto put_device; } gsc_hw_set_input_path(ctx); gsc_hw_set_in_size(ctx); gsc_hw_set_in_image_format(ctx); gsc_hw_set_output_path(ctx); gsc_hw_set_out_size(ctx); gsc_hw_set_out_image_format(ctx); gsc_hw_set_prescaler(ctx); gsc_hw_set_mainscaler(ctx); gsc_hw_set_h_coef(ctx); gsc_hw_set_v_coef(ctx); gsc_hw_set_rotation(ctx); gsc_hw_set_global_alpha(ctx); } gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM); gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM); ctx->state &= ~GSC_PARAMS; if (!test_and_set_bit(ST_M2M_RUN, &gsc->state)) { /* One frame mode sequence GSCALER_ON on -> GSCALER_OP_STATUS is operating -> GSCALER_ON off */ gsc_hw_enable_control(gsc, true); #ifdef GSC_PERF gsc->start_time = sched_clock(); #endif ret = gsc_wait_operating(gsc); if (ret < 0) { gsc_err("gscaler wait operating timeout"); goto put_device; } gsc->op_timer.expires = (jiffies + 2 * HZ); add_timer(&gsc->op_timer); } spin_unlock_irqrestore(&ctx->slock, flags); return; put_device: ctx->state &= ~GSC_PARAMS; spin_unlock_irqrestore(&ctx->slock, flags); pm_runtime_put_sync(&gsc->pdev->dev); }
static void gsc_m2m_device_run(void *priv) { struct gsc_ctx *ctx = priv; struct gsc_dev *gsc; unsigned long flags; int ret; bool is_set = false; if (WARN(!ctx, "null hardware context\n")) return; gsc = ctx->gsc_dev; if (in_irq()) ret = pm_runtime_get(&gsc->pdev->dev); else ret = pm_runtime_get_sync(&gsc->pdev->dev); if (ret < 0) { gsc_err("fail to pm_runtime_get"); return; } spin_lock_irqsave(&ctx->slock, flags); /* Reconfigure hardware if the context has changed. */ if (gsc->m2m.ctx != ctx) { gsc_dbg("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p", gsc->m2m.ctx, ctx); ctx->state |= GSC_PARAMS; gsc->m2m.ctx = ctx; } is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0; ctx->state &= ~GSC_CTX_STOP_REQ; if (is_set) { wake_up(&gsc->irq_queue); goto put_device; } ret = gsc_fill_addr(ctx); if (ret) { gsc_err("Wrong address"); goto put_device; } if (ctx->state & GSC_PARAMS) { gsc_hw_set_sw_reset(gsc); ret = gsc_wait_reset(gsc); if (ret < 0) { gsc_err("gscaler s/w reset timeout"); goto put_device; } gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_frm_done_irq_mask(gsc, false); gsc_hw_set_deadlock_irq_mask(gsc, false); gsc_hw_set_read_slave_error_mask(gsc, false); gsc_hw_set_write_slave_error_mask(gsc, false); gsc_hw_set_gsc_irq_enable(gsc, true); gsc_hw_set_one_frm_mode(gsc, true); gsc_hw_set_freerun_clock_mode(gsc, false); if (gsc_set_scaler_info(ctx)) { gsc_err("Scaler setup error"); goto put_device; } gsc_hw_set_input_path(ctx); gsc_hw_set_in_size(ctx); gsc_hw_set_in_image_format(ctx); gsc_hw_set_output_path(ctx); gsc_hw_set_out_size(ctx); gsc_hw_set_out_image_format(ctx); gsc_hw_set_prescaler(ctx); gsc_hw_set_mainscaler(ctx); gsc_hw_set_h_coef(ctx); gsc_hw_set_v_coef(ctx); if (ctx->scaler.is_scaled_down) gsc_hw_set_output_rotation(ctx); else gsc_hw_set_input_rotation(ctx); gsc_hw_set_global_alpha(ctx); if (is_rotation) { ret = gsc_check_rotation_size(ctx); if (ret < 0) goto put_device; } } gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM); gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM); ctx->state &= ~GSC_PARAMS; if (!test_and_set_bit(ST_M2M_RUN, &gsc->state)) { /* One frame mode sequence GSCALER_ON on -> GSCALER_OP_STATUS is operating -> GSCALER_ON off */ gsc_hw_enable_control(gsc, true); #ifdef GSC_PERF gsc->start_time = sched_clock(); #endif ret = gsc_wait_operating(gsc); if (ret < 0) { gsc_err("gscaler wait operating timeout"); goto put_device; } gsc->op_timer.expires = (jiffies + 2 * HZ); mod_timer(&gsc->op_timer, gsc->op_timer.expires); } spin_unlock_irqrestore(&ctx->slock, flags); iovmm_set_fault_handler(&gsc->pdev->dev, gsc_sysmmu_m2m_fault_handler, ctx); return; put_device: ctx->state &= ~GSC_PARAMS; spin_unlock_irqrestore(&ctx->slock, flags); pm_runtime_put_sync(&gsc->pdev->dev); }