示例#1
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
	u32 ctl_blend_op_flags)
{
	unsigned long flags;
	u32 blend_cfg = 0, blend_ext_cfg = 0;
	int i, start_stage;

	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
		start_stage = STAGE0;
		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
	} else {
		start_stage = STAGE_BASE;
	}

	for (i = start_stage; i < start_stage + stage_cnt; i++) {
		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
	if (ctl->cursor_on)
		blend_ext_cfg |= cursor_blend_value(ctl->cursor_id, STAGE6);

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);

	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
		blend_cfg, blend_ext_cfg);

	return 0;
}
示例#2
0
/*
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 blend_ext_cfg;
	int lm = ctl->lm;

	if (unlikely(WARN_ON(lm < 0))) {
		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
				ctl->id, lm);
		return -EINVAL;
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);

	blend_ext_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm));

	/*
	 * For now, just use toppest stage FG for cursor layer, need to report
	 * this information back to pipe list and make usre blend strategy is
	 * aware of this and don't reuse this stage for other pipe's blending.
	 */
	if (enable)
		blend_ext_cfg |= cursor_blend_value(cursor_id, STAGE6);
	else
		blend_ext_cfg &= ~(cursor_blend_mask(cursor_id));

	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm), blend_ext_cfg);
	ctl->cursor_on = enable;
	ctl->cursor_id = cursor_id;

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = (mdp_ctl_flush_mask_cursor(cursor_id)|
					mdp_ctl_flush_mask_lm(ctl->lm));

	return 0;
}
示例#3
0
static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
{
	unsigned long flags;
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	int i;

	spin_lock_irqsave(&ctl->hw_lock, flags);

	for (i = 0; i < ctl_mgr->nlm; i++) {
		ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
		ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
	}

	spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
示例#4
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
		   enum mdp5_pipe stage[][MAX_PIPE_STAGE],
		   enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
		   u32 stage_cnt, u32 ctl_blend_op_flags)
{
	struct mdp5_hw_mixer *mixer = pipeline->mixer;
	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
	unsigned long flags;
	u32 blend_cfg = 0, blend_ext_cfg = 0;
	u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
	int i, start_stage;

	mdp5_ctl_reset_blend_regs(ctl);

	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
		start_stage = STAGE0;
		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
		if (r_mixer)
			r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
	} else {
		start_stage = STAGE_BASE;
	}

	for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
		blend_cfg |=
			mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
			mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
		blend_ext_cfg |=
			mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
			mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
		if (r_mixer) {
			r_blend_cfg |=
				mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
				mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
			r_blend_ext_cfg |=
			     mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
			     mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
		}
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
	if (ctl->cursor_on)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
		  blend_ext_cfg);
	if (r_mixer) {
		ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
			  r_blend_cfg);
		ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
			  r_blend_ext_cfg);
	}
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
	if (r_mixer)
		ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);

	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
		blend_cfg, blend_ext_cfg);
	if (r_mixer)
		DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
		    r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);

	return 0;
}