Example #1
0
int gsc_wait_stop(struct gsc_dev *dev)
{
	unsigned long timeo = jiffies + 10; /* timeout of 50ms */
	u32 cfg;
	int ret;

	while (time_before(jiffies, timeo)) {
		cfg = readl(dev->regs + GSC_ENABLE);
		if (!(cfg & GSC_ENABLE_OP_STATUS))
			return 0;
		usleep_range(10, 20);
	}
	/* This is workaround until next chips.
	 * If fimd is stop than gsc, gsc didn't work complete
	 */
	gsc_hw_set_sw_reset(dev);
	ret = gsc_wait_reset(dev);
	if (ret < 0) {
		gsc_err("gscaler s/w reset timeout");
		return ret;
	}
	gsc_hw_set_pixelasync_reset_output(dev);
	gsc_info("wait time : %d ms", jiffies_to_msecs(jiffies - timeo + 10));

	return 0;
}
Example #2
0
static int gsc_capture_streamon(struct file *file, void *priv,
				enum v4l2_buf_type type)
{
	struct gsc_dev *gsc = video_drvdata(file);
	struct gsc_pipeline *p = &gsc->pipeline;
	int ret;

	if (gsc_cap_active(gsc))
		return -EBUSY;

	if (p->disp) {
		gsc_pm_qos_ctrl(gsc, GSC_QOS_ON, 267000, 200000);
		media_entity_pipeline_start(&p->disp->entity, p->pipe);
	} else if (p->sensor) {
		media_entity_pipeline_start(&p->sensor->entity, p->pipe);
	} else {
		gsc_err("Error pipeline");
		return -EPIPE;
	}

	ret = gsc_cap_link_validate(gsc);
	if (ret)
		return ret;

	gsc_hw_set_sw_reset(gsc);
	ret= gsc_wait_reset(gsc);
	if (ret < 0) {
		gsc_err("gscaler s/w reset timeout");
		return ret;
	}
	gsc_hw_set_output_buf_mask_all(gsc);
	return vb2_streamon(&gsc->cap.vbq, type);
}
Example #3
0
static int gsc_capture_start_streaming(struct vb2_queue *q, unsigned int count)
{
	struct gsc_ctx *ctx = q->drv_priv;
	struct gsc_dev *gsc = ctx->gsc_dev;
	struct gsc_capture_device *cap = &gsc->cap;
	int min_bufs;

	gsc_hw_set_sw_reset(gsc);
	gsc_wait_reset(gsc);
	gsc_hw_set_output_buf_mask_all(gsc);

	min_bufs = cap->reqbufs_cnt > 1 ? 2 : 1;
	if ((gsc_hw_get_nr_unmask_bits(gsc) >= min_bufs) &&
		!test_bit(ST_CAPT_STREAM, &gsc->state)) {
		if (!test_and_set_bit(ST_CAPT_PIPE_STREAM, &gsc->state)) {
			gsc_info("");
			gsc_cap_pipeline_s_stream(gsc, 1);
		}
	}

	return 0;
}
Example #4
0
static void gsc_m2m_device_run(void *priv)
{
	struct gsc_ctx *ctx = priv;
	struct gsc_dev *gsc;
	unsigned long flags;
	int ret;
	bool is_set = false;

	if (WARN(!ctx, "null hardware context\n"))
		return;

	gsc = ctx->gsc_dev;

	if (in_irq())
		ret = pm_runtime_get(&gsc->pdev->dev);
	else
		ret = pm_runtime_get_sync(&gsc->pdev->dev);

	if (ret < 0) {
		gsc_err("fail to pm_runtime_get");
		return;
	}

	spin_lock_irqsave(&ctx->slock, flags);
	/* Reconfigure hardware if the context has changed. */
	if (gsc->m2m.ctx != ctx) {
		gsc_dbg("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
			  gsc->m2m.ctx, ctx);
		ctx->state |= GSC_PARAMS;
		gsc->m2m.ctx = ctx;
	}

	is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0;
	ctx->state &= ~GSC_CTX_STOP_REQ;
	if (is_set) {
		wake_up(&gsc->irq_queue);
		goto put_device;
	}

	ret = gsc_fill_addr(ctx);
	if (ret) {
		gsc_err("Wrong address");
		goto put_device;
	}

	if (ctx->state & GSC_PARAMS) {
		gsc_hw_set_sw_reset(gsc);
		ret = gsc_wait_reset(gsc);
		if (ret < 0) {
			gsc_err("gscaler s/w reset timeout");
			goto put_device;
		}
		gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
		gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
		gsc_hw_set_frm_done_irq_mask(gsc, false);
		gsc_hw_set_deadlock_irq_mask(gsc, false);
		gsc_hw_set_read_slave_error_mask(gsc, false);
		gsc_hw_set_write_slave_error_mask(gsc, false);
		gsc_hw_set_gsc_irq_enable(gsc, true);
		gsc_hw_set_one_frm_mode(gsc, true);
		gsc_hw_set_freerun_clock_mode(gsc, false);

		if (gsc_set_scaler_info(ctx)) {
			gsc_err("Scaler setup error");
			goto put_device;
		}

		gsc_hw_set_input_path(ctx);
		gsc_hw_set_in_size(ctx);
		gsc_hw_set_in_image_format(ctx);

		gsc_hw_set_output_path(ctx);
		gsc_hw_set_out_size(ctx);
		gsc_hw_set_out_image_format(ctx);

		gsc_hw_set_prescaler(ctx);
		gsc_hw_set_mainscaler(ctx);
		gsc_hw_set_h_coef(ctx);
		gsc_hw_set_v_coef(ctx);
		if (ctx->scaler.is_scaled_down)
			gsc_hw_set_output_rotation(ctx);
		else
			gsc_hw_set_input_rotation(ctx);
		gsc_hw_set_global_alpha(ctx);
		if (is_rotation) {
			ret = gsc_check_rotation_size(ctx);
			if (ret < 0)
				goto put_device;
		}
	}

	gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
	gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);

	ctx->state &= ~GSC_PARAMS;

	if (!test_and_set_bit(ST_M2M_RUN, &gsc->state)) {
		/* One frame mode sequence
		 GSCALER_ON on -> GSCALER_OP_STATUS is operating ->
		 GSCALER_ON off */
		gsc_hw_enable_control(gsc, true);
#ifdef GSC_PERF
		gsc->start_time = sched_clock();
#endif
		ret = gsc_wait_operating(gsc);
		if (ret < 0) {
			gsc_err("gscaler wait operating timeout");
			goto put_device;
		}
		gsc->op_timer.expires = (jiffies + 2 * HZ);
		mod_timer(&gsc->op_timer, gsc->op_timer.expires);
	}

	spin_unlock_irqrestore(&ctx->slock, flags);

	iovmm_set_fault_handler(&gsc->pdev->dev,
			gsc_sysmmu_m2m_fault_handler, ctx);

	return;

put_device:
	ctx->state &= ~GSC_PARAMS;
	spin_unlock_irqrestore(&ctx->slock, flags);
	pm_runtime_put_sync(&gsc->pdev->dev);
}
Example #5
0
static void gsc_m2m_device_run(void *priv)
{
	struct gsc_ctx *ctx = priv;
	struct gsc_dev *gsc;
	unsigned long flags;
	int ret;
	bool is_set = false;

	if (WARN(!ctx, "null hardware context\n"))
		return;

	gsc = ctx->gsc_dev;

	if (!in_irq()) {
		pm_runtime_get_sync(&gsc->pdev->dev);
		gsc->runtime_get_cnt++;
	} else {
		pm_runtime_get(&gsc->pdev->dev);
		gsc->runtime_get_cnt++;
		gsc_info("irq context");
	}

#ifdef CONFIG_SOC_EXYNOS5420
	if (!(readl(EXYNOS5_CLKSRC_TOP5) & (1 << 28))) {
		if (clk_set_parent(gsc->clock[CLK_CHILD],
			gsc->clock[CLK_PARENT])) {
			u32 reg = readl(EXYNOS5_CLKSRC_TOP5);
			reg |= (1 << 28);
			writel(reg, EXYNOS5_CLKSRC_TOP5);
			gsc_err("Unable to set parent of gsc");
		}
		gsc_err("get_cnt : %d, put_cnt : %d",
			gsc->runtime_get_cnt, gsc->runtime_put_cnt);
		gsc_err("state : 0x%lx", gsc->state);
	}
#endif
	spin_lock_irqsave(&ctx->slock, flags);
	/* Reconfigure hardware if the context has changed. */
	if (gsc->m2m.ctx != ctx) {
		gsc_dbg("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
			  gsc->m2m.ctx, ctx);
		ctx->state |= GSC_PARAMS;
		gsc->m2m.ctx = ctx;
	}

	is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0;
	ctx->state &= ~GSC_CTX_STOP_REQ;
	if (is_set) {
		wake_up(&gsc->irq_queue);
		goto put_device;
	}

	ret = gsc_fill_addr(ctx);
	if (ret) {
		gsc_err("Wrong address");
		goto put_device;
	}

	if (!gsc->protected_content) {
		struct gsc_frame *frame = &ctx->s_frame;
		exynos_sysmmu_set_pbuf(&gsc->pdev->dev, frame->fmt->nr_comp,
				ctx->prebuf);
	}

	if (ctx->state & GSC_PARAMS) {
		gsc_hw_set_sw_reset(gsc);
		ret = gsc_wait_reset(gsc);
		if (ret < 0) {
			gsc_err("gscaler s/w reset timeout");
			goto put_device;
		}
		gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
		gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
		gsc_hw_set_frm_done_irq_mask(gsc, false);
		gsc_hw_set_gsc_irq_enable(gsc, true);
		gsc_hw_set_one_frm_mode(gsc, true);
		gsc_hw_set_freerun_clock_mode(gsc, false);

		if (gsc_set_scaler_info(ctx)) {
			gsc_err("Scaler setup error");
			goto put_device;
		}

		gsc_hw_set_input_path(ctx);
		gsc_hw_set_in_size(ctx);
		gsc_hw_set_in_image_format(ctx);

		gsc_hw_set_output_path(ctx);
		gsc_hw_set_out_size(ctx);
		gsc_hw_set_out_image_format(ctx);

		gsc_hw_set_prescaler(ctx);
		gsc_hw_set_mainscaler(ctx);
		gsc_hw_set_h_coef(ctx);
		gsc_hw_set_v_coef(ctx);
		gsc_hw_set_rotation(ctx);
		gsc_hw_set_global_alpha(ctx);
	}

	gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
	gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);

	ctx->state &= ~GSC_PARAMS;

	if (!test_and_set_bit(ST_M2M_RUN, &gsc->state)) {
		/* One frame mode sequence
		 GSCALER_ON on -> GSCALER_OP_STATUS is operating ->
		 GSCALER_ON off */
		gsc_hw_enable_control(gsc, true);
#ifdef GSC_PERF
		gsc->start_time = sched_clock();
#endif
		ret = gsc_wait_operating(gsc);
		if (ret < 0) {
			gsc_err("gscaler wait operating timeout");
			goto put_device;
		}
		gsc->op_timer.expires = (jiffies + 2 * HZ);
		add_timer(&gsc->op_timer);
	}

	spin_unlock_irqrestore(&ctx->slock, flags);
	return;

put_device:
	ctx->state &= ~GSC_PARAMS;
	spin_unlock_irqrestore(&ctx->slock, flags);
	pm_runtime_put_sync(&gsc->pdev->dev);
}
static int gsc_capture_subdev_s_stream(struct v4l2_subdev *sd, int enable)
{
	struct gsc_dev *gsc = v4l2_get_subdevdata(sd);
	struct gsc_capture_device *cap = &gsc->cap;
	struct gsc_ctx *ctx = cap->ctx;
	int ret;

	if (enable) {
		gsc_hw_set_local_src(gsc, true);
		gsc_hw_set_sysreg_writeback(gsc, true);
		gsc_hw_set_sw_reset(gsc);
		ret = gsc_wait_reset(gsc);
		if (ret < 0) {
			gsc_err("gscaler s/w reset timeout");
			return ret;
		}
		ret = gsc_set_scaler_info(ctx);
		if (ret) {
			gsc_err("Scaler setup error");
			return ret;
		}
		gsc_hw_set_output_buf_fixed(gsc);
		gsc_hw_set_output_buf_masking(gsc, 0, false);
		gsc_hw_set_frm_done_irq_mask(gsc, false);
		gsc_hw_set_deadlock_irq_mask(gsc, false);
		gsc_hw_set_read_slave_error_mask(gsc, false);
		gsc_hw_set_write_slave_error_mask(gsc, false);
		gsc_hw_set_overflow_irq_mask(gsc, true);
		gsc_hw_set_gsc_irq_enable(gsc, true);
		gsc_hw_set_one_frm_mode(gsc, true);
		gsc_hw_set_freerun_clock_mode(gsc, false);

		gsc_hw_set_input_path(ctx);
		gsc_hw_set_in_size(ctx);
		gsc_hw_set_in_image_format(ctx);

		gsc_hw_set_output_path(ctx);
		gsc_hw_set_out_size(ctx);
		gsc_hw_set_out_image_format(ctx);

		gsc_hw_set_prescaler(ctx);
		gsc_hw_set_mainscaler(ctx);
		gsc_hw_set_h_coef(ctx);
		gsc_hw_set_v_coef(ctx);

		gsc_hw_set_output_rotation(ctx);

		gsc_hw_set_global_alpha(ctx);
		if (is_rotation) {
			ret = gsc_check_rotation_size(ctx);
			if (ret < 0) {
				gsc_err("Scaler setup error");
				return ret;
			}
		}

		gsc_hw_set_for_wb(gsc);
		gsc_hw_set_lookup_table(gsc);
		gsc_hw_set_smart_if_con(gsc, true);
		gsc_hw_set_buscon_realtime(gsc);
		gsc_hw_set_qos_enable(gsc);
	}

	return 0;
}