Exemple #1
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
	u32 ctl_blend_op_flags)
{
	unsigned long flags;
	u32 blend_cfg = 0, blend_ext_cfg = 0;
	int i, start_stage;

	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
		start_stage = STAGE0;
		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
	} else {
		start_stage = STAGE_BASE;
	}

	for (i = start_stage; i < start_stage + stage_cnt; i++) {
		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
	if (ctl->cursor_on)
		blend_ext_cfg |= cursor_blend_value(ctl->cursor_id, STAGE6);

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);

	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
		blend_cfg, blend_ext_cfg);

	return 0;
}
Exemple #2
0
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
		struct mdp5_interface *intf, int lm)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);

	if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
		dev_err(mdp5_kms->dev->dev,
			"CTL %d is allocated by INTF %d, but used by INTF %d\n",
			ctl->id, ctl->pipeline.intf.num, intf->num);
		return -EINVAL;
	}

	ctl->lm = lm;

	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));

	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
				   mdp_ctl_flush_mask_encoder(intf);

	/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
	if (!mdp5_cfg_intf_is_virtual(intf->type))
		set_display_intf(mdp5_kms, intf);

	set_ctl_op(ctl, intf);

	return 0;
}
Exemple #3
0
static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	u32 sw_mask = 0;
#define BIT_NEEDS_SW_FIX(bit) \
	(!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))

	/* for some targets, cursor bit is the same as LM bit */
	if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
		sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);

	return sw_mask;
}
Exemple #4
0
static void refill_start_mask(struct mdp5_ctl *ctl)
{
	struct op_mode *pipeline = &ctl->pipeline;
	struct mdp5_interface *intf = &ctl->pipeline.intf;

	pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);

	/*
	 * Writeback encoder needs to program & flush
	 * address registers for each page flip..
	 */
	if (intf->type == INTF_WB)
		pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
}
Exemple #5
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
	unsigned long flags;

	if (ctl->cursor_on)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
	else
		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);

	return 0;
}
Exemple #6
0
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);

	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));

	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
				   mdp_ctl_flush_mask_encoder(intf);

	/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
	if (!mdp5_cfg_intf_is_virtual(intf->type))
		set_display_intf(mdp5_kms, intf);

	set_ctl_op(ctl, intf);

	return 0;
}
Exemple #7
0
/*
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 blend_ext_cfg;
	int lm = ctl->lm;

	if (unlikely(WARN_ON(lm < 0))) {
		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
				ctl->id, lm);
		return -EINVAL;
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);

	blend_ext_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm));

	/*
	 * For now, just use toppest stage FG for cursor layer, need to report
	 * this information back to pipe list and make usre blend strategy is
	 * aware of this and don't reuse this stage for other pipe's blending.
	 */
	if (enable)
		blend_ext_cfg |= cursor_blend_value(cursor_id, STAGE6);
	else
		blend_ext_cfg &= ~(cursor_blend_mask(cursor_id));

	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, lm), blend_ext_cfg);
	ctl->cursor_on = enable;
	ctl->cursor_id = cursor_id;

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = (mdp_ctl_flush_mask_cursor(cursor_id)|
					mdp_ctl_flush_mask_lm(ctl->lm));

	return 0;
}
Exemple #8
0
int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
		   enum mdp5_pipe stage[][MAX_PIPE_STAGE],
		   enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
		   u32 stage_cnt, u32 ctl_blend_op_flags)
{
	struct mdp5_hw_mixer *mixer = pipeline->mixer;
	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
	unsigned long flags;
	u32 blend_cfg = 0, blend_ext_cfg = 0;
	u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
	int i, start_stage;

	mdp5_ctl_reset_blend_regs(ctl);

	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
		start_stage = STAGE0;
		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
		if (r_mixer)
			r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
	} else {
		start_stage = STAGE_BASE;
	}

	for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
		blend_cfg |=
			mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
			mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
		blend_ext_cfg |=
			mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
			mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
		if (r_mixer) {
			r_blend_cfg |=
				mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
				mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
			r_blend_ext_cfg |=
			     mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
			     mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
		}
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
	if (ctl->cursor_on)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
		  blend_ext_cfg);
	if (r_mixer) {
		ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
			  r_blend_cfg);
		ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
			  r_blend_ext_cfg);
	}
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
	if (r_mixer)
		ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);

	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
		blend_cfg, blend_ext_cfg);
	if (r_mixer)
		DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
		    r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);

	return 0;
}