Beispiel #1
0
void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
		uint32_t old_irqmask)
{
	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
		irqmask ^ (irqmask & old_irqmask));
	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
}
Beispiel #2
0
void mdp4_irq_uninstall(struct msm_kms *kms)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	mdp4_enable(mdp4_kms);
	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
	mdp4_disable(mdp4_kms);
}
Beispiel #3
0
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));

	mdp4_enable(mdp4_kms);
	mdp_update_vblank_mask(to_mdp_kms(kms),
			mdp4_crtc_vblank(crtc), false);
	mdp4_disable(mdp4_kms);
}
Beispiel #4
0
static void mdp4_destroy(struct msm_kms *kms)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	if (mdp4_kms->blank_cursor_iova)
		msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
	if (mdp4_kms->blank_cursor_bo)
		drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
	kfree(mdp4_kms);
}
Beispiel #5
0
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
	unsigned i;

	for (i = 0; i < priv->num_crtcs; i++)
		mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
}
Beispiel #6
0
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	int i;
	struct drm_crtc *crtc;
	struct drm_crtc_state *crtc_state;

	mdp4_enable(mdp4_kms);

	/* see 119ecb7fd */
	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
		drm_crtc_vblank_get(crtc);
}
Beispiel #7
0
int mdp4_irq_postinstall(struct msm_kms *kms)
{
	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
	struct mdp_irq *error_handler = &mdp4_kms->error_handler;

	error_handler->irq = mdp4_irq_error_handler;
	error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
			MDP4_IRQ_EXTERNAL_INTF_UDERRUN;

	mdp_irq_register(mdp_kms, error_handler);

	return 0;
}
Beispiel #8
0
static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	int i;
	struct drm_crtc *crtc;
	struct drm_crtc_state *crtc_state;

	drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);

	/* see 119ecb7fd */
	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
		drm_crtc_vblank_put(crtc);

	mdp4_disable(mdp4_kms);
}
Beispiel #9
0
static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	int i, ncrtcs = state->dev->mode_config.num_crtc;

	/* see 119ecb7fd */
	for (i = 0; i < ncrtcs; i++) {
		struct drm_crtc *crtc = state->crtcs[i];
		if (!crtc)
			continue;
		drm_crtc_vblank_put(crtc);
	}

	mdp4_disable(mdp4_kms);
}
Beispiel #10
0
static void mdp4_destroy(struct msm_kms *kms)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	struct msm_mmu *mmu = mdp4_kms->mmu;

	if (mmu) {
		mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
		mmu->funcs->destroy(mmu);
	}

	if (mdp4_kms->blank_cursor_iova)
		msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
	if (mdp4_kms->blank_cursor_bo)
		drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
	kfree(mdp4_kms);
}
Beispiel #11
0
static void mdp4_destroy(struct msm_kms *kms)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	struct device *dev = mdp4_kms->dev->dev;
	struct msm_gem_address_space *aspace = kms->aspace;

	if (mdp4_kms->blank_cursor_iova)
		msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
	drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);

	if (aspace) {
		aspace->mmu->funcs->detach(aspace->mmu,
				iommu_ports, ARRAY_SIZE(iommu_ports));
		msm_gem_address_space_put(aspace);
	}

	if (mdp4_kms->rpm_enabled)
		pm_runtime_disable(dev);

	kfree(mdp4_kms);
}
Beispiel #12
0
irqreturn_t mdp4_irq(struct msm_kms *kms)
{
	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
	struct drm_device *dev = mdp4_kms->dev;
	struct msm_drm_private *priv = dev->dev_private;
	unsigned int id;
	uint32_t status;

	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);

	VERB("status=%08x", status);

	mdp_dispatch_irqs(mdp_kms, status);

	for (id = 0; id < priv->num_crtcs; id++)
		if (status & mdp4_crtc_vblank(priv->crtcs[id]))
			drm_handle_vblank(dev, id);

	return IRQ_HANDLED;
}
Beispiel #13
0
static int mdp4_hw_init(struct msm_kms *kms)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	struct drm_device *dev = mdp4_kms->dev;
	uint32_t version, major, minor, dmap_cfg, vg_cfg;
	unsigned long clk;
	int ret = 0;

	pm_runtime_get_sync(dev->dev);

	mdp4_enable(mdp4_kms);
	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
	mdp4_disable(mdp4_kms);

	major = FIELD(version, MDP4_VERSION_MAJOR);
	minor = FIELD(version, MDP4_VERSION_MINOR);

	DBG("found MDP4 version v%d.%d", major, minor);

	if (major != 4) {
		dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
				major, minor);
		ret = -ENXIO;
		goto out;
	}

	mdp4_kms->rev = minor;

	if (mdp4_kms->rev > 1) {
		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
	}

	mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);

	/* max read pending cmd config, 3 pending requests: */
	mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);

	clk = clk_get_rate(mdp4_kms->clk);

	if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
		dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
		vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
	} else {
		dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
		vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
	}

	DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);

	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);

	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);

	if (mdp4_kms->rev >= 2)
		mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);

	/* disable CSC matrix / YUV by default: */
	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
	mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
	mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);

	if (mdp4_kms->rev > 1)
		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);

	dev->mode_config.allow_fb_modifiers = true;

out:
	pm_runtime_put_sync(dev->dev);

	return ret;
}
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
{
	struct msm_drm_private *priv = encoder->dev->dev_private;
	return to_mdp4_kms(to_mdp_kms(priv->kms));
}
Beispiel #15
0
static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
{
	struct msm_drm_private *priv = crtc->dev->dev_private;
	return to_mdp4_kms(to_mdp_kms(priv->kms));
}
Beispiel #16
0
static struct mdp4_kms *get_kms(struct drm_plane *plane)
{
	struct msm_drm_private *priv = plane->dev->dev_private;
	return to_mdp4_kms(priv->kms);
}
Beispiel #17
0
void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
{
	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
}
Beispiel #18
0
void mdp4_irq_preinstall(struct msm_kms *kms)
{
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
}