示例#1
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
	u32 ctl_blend_op_flags)
{
	unsigned long flags;
	u32 blend_cfg = 0, blend_ext_cfg = 0;
	int i, start_stage;

	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
		start_stage = STAGE0;
		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
	} else {
		start_stage = STAGE_BASE;
	}

	for (i = start_stage; i < start_stage + stage_cnt; i++) {
		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
	if (ctl->cursor_on)
		blend_ext_cfg |= cursor_blend_value(ctl->cursor_id, STAGE6);

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);

	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
		blend_cfg, blend_ext_cfg);

	return 0;
}
示例#2
0
static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
{
	unsigned long flags;
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	int i;

	spin_lock_irqsave(&ctl->hw_lock, flags);

	for (i = 0; i < ctl_mgr->nlm; i++) {
		ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
		ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
	}

	spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
示例#3
0
static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
	unsigned long flags;
	struct mdp5_interface *intf = pipeline->intf;
	u32 ctl_op = 0;

	if (!mdp5_cfg_intf_is_virtual(intf->type))
		ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);

	switch (intf->type) {
	case INTF_DSI:
		if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
			ctl_op |= MDP5_CTL_OP_CMD_MODE;
		break;

	case INTF_WB:
		if (intf->mode == MDP5_INTF_WB_MODE_LINE)
			ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
		break;

	default:
		break;
	}

	if (pipeline->r_mixer)
		ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
			  MDP5_CTL_OP_PACK_3D(1);

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
示例#4
0
/**
 * mdp5_ctl_commit() - Register Flush
 *
 * The flush register is used to indicate several registers are all
 * programmed, and are safe to update to the back copy of the double
 * buffered registers.
 *
 * Some registers FLUSH bits are shared when the hardware does not have
 * dedicated bits for them; handling these is the job of fix_sw_flush().
 *
 * CTL registers need to be flushed in some circumstances; if that is the
 * case, some trigger bits will be present in both flush mask and
 * ctl->pending_ctl_trigger.
 *
 * Return H/W flushed bit mask.
 */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	struct op_mode *pipeline = &ctl->pipeline;
	unsigned long flags;

	pipeline->start_mask &= ~flush_mask;

	VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
			pipeline->start_mask, ctl->pending_ctl_trigger);

	if (ctl->pending_ctl_trigger & flush_mask) {
		flush_mask |= MDP5_CTL_FLUSH_CTL;
		ctl->pending_ctl_trigger = 0;
	}

	flush_mask |= fix_sw_flush(ctl, flush_mask);

	flush_mask &= ctl_mgr->flush_hw_mask;

	if (flush_mask) {
		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}

	if (start_signal_needed(ctl)) {
		send_start_signal(ctl);
		refill_start_mask(ctl);
	}

	return flush_mask;
}
示例#5
0
/*
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 blend_cfg;
	int lm = ctl->lm;

	if (unlikely(WARN_ON(lm < 0))) {
		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
				ctl->id, lm);
		return -EINVAL;
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);

	blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));

	if (enable)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
	else
		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
	ctl->cursor_on = enable;

	return 0;
}
示例#6
0
/*
 * send_start_signal() - Overlay Processor Start Signal
 *
 * For a given control operation (display pipeline), a START signal needs to be
 * executed in order to kick off operation and activate all layers.
 * e.g.: DSI command mode, Writeback
 */
static void send_start_signal(struct mdp5_ctl *ctl)
{
	unsigned long flags;

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
示例#7
0
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
{
	unsigned long flags;
	int c;

	for (c = 0; c < ctl_mgr->nctl; c++) {
		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];

		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}
}
示例#8
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
	unsigned long flags;

	if (ctl->cursor_on)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
	else
		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);

	return 0;
}
示例#9
0
/**
 * mdp5_ctl_commit() - Register Flush
 *
 * The flush register is used to indicate several registers are all
 * programmed, and are safe to update to the back copy of the double
 * buffered registers.
 *
 * Some registers FLUSH bits are shared when the hardware does not have
 * dedicated bits for them; handling these is the job of fix_sw_flush().
 *
 * CTL registers need to be flushed in some circumstances; if that is the
 * case, some trigger bits will be present in both flush mask and
 * ctl->pending_ctl_trigger.
 *
 * Return H/W flushed bit mask.
 */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
		    struct mdp5_pipeline *pipeline,
		    u32 flush_mask, bool start)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 flush_id = ctl->id;
	u32 curr_ctl_flush_mask;

	VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);

	if (ctl->pending_ctl_trigger & flush_mask) {
		flush_mask |= MDP5_CTL_FLUSH_CTL;
		ctl->pending_ctl_trigger = 0;
	}

	flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);

	flush_mask &= ctl_mgr->flush_hw_mask;

	curr_ctl_flush_mask = flush_mask;

	fix_for_single_flush(ctl, &flush_mask, &flush_id);

	if (!start) {
		ctl->flush_mask |= flush_mask;
		return curr_ctl_flush_mask;
	} else {
		flush_mask |= ctl->flush_mask;
		ctl->flush_mask = 0;
	}

	if (flush_mask) {
		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}

	if (start_signal_needed(ctl, pipeline)) {
		send_start_signal(ctl);
	}

	return curr_ctl_flush_mask;
}
示例#10
0
/*
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 blend_ext_cfg;
	int lm = ctl->lm;

	if (unlikely(WARN_ON(lm < 0))) {
		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
				ctl->id, lm);
		return -EINVAL;
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);

	blend_ext_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm));

	/*
	 * For now, just use toppest stage FG for cursor layer, need to report
	 * this information back to pipe list and make usre blend strategy is
	 * aware of this and don't reuse this stage for other pipe's blending.
	 */
	if (enable)
		blend_ext_cfg |= cursor_blend_value(cursor_id, STAGE6);
	else
		blend_ext_cfg &= ~(cursor_blend_mask(cursor_id));

	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm), blend_ext_cfg);
	ctl->cursor_on = enable;
	ctl->cursor_id = cursor_id;

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = (mdp_ctl_flush_mask_cursor(cursor_id)|
					mdp_ctl_flush_mask_lm(ctl->lm));

	return 0;
}
示例#11
0
/*
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
			int cursor_id, bool enable)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 blend_cfg;
	struct mdp5_hw_mixer *mixer = pipeline->mixer;

	if (unlikely(WARN_ON(!mixer))) {
		DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
			ctl->id);
		return -EINVAL;
	}

	if (pipeline->r_mixer) {
		DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
		return -EINVAL;
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);

	blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));

	if (enable)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
	else
		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
	ctl->cursor_on = enable;

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);

	return 0;
}
示例#12
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
		   enum mdp5_pipe stage[][MAX_PIPE_STAGE],
		   enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
		   u32 stage_cnt, u32 ctl_blend_op_flags)
{
	struct mdp5_hw_mixer *mixer = pipeline->mixer;
	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
	unsigned long flags;
	u32 blend_cfg = 0, blend_ext_cfg = 0;
	u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
	int i, start_stage;

	mdp5_ctl_reset_blend_regs(ctl);

	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
		start_stage = STAGE0;
		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
		if (r_mixer)
			r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
	} else {
		start_stage = STAGE_BASE;
	}

	for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
		blend_cfg |=
			mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
			mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
		blend_ext_cfg |=
			mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
			mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
		if (r_mixer) {
			r_blend_cfg |=
				mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
				mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
			r_blend_ext_cfg |=
			     mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
			     mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
		}
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
	if (ctl->cursor_on)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
		  blend_ext_cfg);
	if (r_mixer) {
		ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
			  r_blend_cfg);
		ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
			  r_blend_ext_cfg);
	}
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
	if (r_mixer)
		ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);

	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
		blend_cfg, blend_ext_cfg);
	if (r_mixer)
		DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
		    r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);

	return 0;
}