Пример #1
0
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
		gen6_force_wake_timer((unsigned long)dev_priv);

	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	if (IS_VALLEYVIEW(dev))
		vlv_force_wake_reset(dev_priv);
	else if (IS_GEN6(dev) || IS_GEN7(dev))
		__gen6_gt_force_wake_reset(dev_priv);

	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
		__gen7_gt_force_wake_mt_reset(dev_priv);

	if (IS_GEN9(dev))
		__gen9_gt_force_wake_mt_reset(dev_priv);

	if (restore) { /* If reset with a user forcewake, try to restore */
		unsigned fw = 0;

		if (IS_VALLEYVIEW(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;
		} else if (IS_GEN9(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;

			if (dev_priv->uncore.fw_blittercount)
				fw |= FORCEWAKE_BLITTER;
		} else {
			if (dev_priv->uncore.forcewake_count)
				fw = FORCEWAKE_ALL;
		}

		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
				GT_FIFO_FREE_ENTRIES_MASK;
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
Пример #2
0
/**
 * intel_csr_load_program() - write the firmware from memory to register.
 * @dev_priv: i915 drm device.
 *
 * CSR firmware is read from a .bin file and kept in internal memory one time.
 * Everytime display comes back from low power state this function is called to
 * copy the firmware from internal memory to registers.
 */
void intel_csr_load_program(struct drm_i915_private *dev_priv)
{
	u32 *payload = dev_priv->csr.dmc_payload;
	uint32_t i, fw_size;

	if (!IS_GEN9(dev_priv)) {
		DRM_ERROR("No CSR support available for this platform\n");
		return;
	}

	if (!dev_priv->csr.dmc_payload) {
		DRM_ERROR("Tried to program CSR with empty payload\n");
		return;
	}

	fw_size = dev_priv->csr.dmc_fw_size;
	for (i = 0; i < fw_size; i++)
		I915_WRITE(CSR_PROGRAM(i), payload[i]);

	for (i = 0; i < dev_priv->csr.mmio_count; i++) {
		I915_WRITE(dev_priv->csr.mmioaddr[i],
			   dev_priv->csr.mmiodata[i]);
	}

	dev_priv->csr.dc_state = 0;

	gen9_set_dc_state_debugmask(dev_priv);
}
Пример #3
0
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
				     struct intel_fbc_reg_params *params)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;

	/* Since all our fields are integer types, use memset here so the
	 * comparison function can rely on memcmp because the padding will be
	 * zero. */
	memset(params, 0, sizeof(*params));

	params->vma = cache->vma;

	params->crtc.pipe = crtc->pipe;
	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
	params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);

	params->fb.format = cache->fb.format;
	params->fb.stride = cache->fb.stride;

	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);

	if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
		params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
						32 * fbc->threshold) * 8;
}
Пример #4
0
static void guc_prepare_xfer(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	/* Must program this register before loading the ucode with DMA */
	I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
				     GUC_ENABLE_READ_CACHE_LOGIC |
				     GUC_ENABLE_MIA_CACHING |
				     GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
				     GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
				     GUC_ENABLE_MIA_CLOCK_GATING);

	if (IS_GEN9_LP(dev_priv))
		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
	else
		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);

	if (IS_GEN9(dev_priv)) {
		/* DOP Clock Gating Enable for GuC clocks */
		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
					    I915_READ(GEN7_MISCCPCTL)));

		/* allows for 5us (in 10ns units) before GT can go to RC6 */
		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
	}
}
Пример #5
0
/**
 * intel_csr_load_program() - write the firmware from memory to register.
 * @dev: drm device.
 *
 * CSR firmware is read from a .bin file and kept in internal memory one time.
 * Everytime display comes back from low power state this function is called to
 * copy the firmware from internal memory to registers.
 */
void intel_csr_load_program(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	__be32 *payload = dev_priv->csr.dmc_payload;
	uint32_t i, fw_size;

	if (!IS_GEN9(dev)) {
		DRM_ERROR("No CSR support available for this platform\n");
		return;
	}

	mutex_lock(&dev_priv->csr_lock);
	fw_size = dev_priv->csr.dmc_fw_size;
	for (i = 0; i < fw_size; i++)
		I915_WRITE(CSR_PROGRAM_BASE + i * 4,
			(u32 __force)payload[i]);

	for (i = 0; i < dev_priv->csr.mmio_count; i++) {
		I915_WRITE(dev_priv->csr.mmioaddr[i],
			dev_priv->csr.mmiodata[i]);
	}

	dev_priv->csr.state = FW_LOADED;
	mutex_unlock(&dev_priv->csr_lock);
}
Пример #6
0
/**
 * intel_gvt_init_vgpu_types - initialize vGPU type list
 * @gvt : GVT device
 *
 * Initialize vGPU type list based on available resource.
 *
 */
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
	unsigned int num_types;
	unsigned int i, low_avail, high_avail;
	unsigned int min_low;

	/* vGPU type name is defined as GVTg_Vx_y which contains
	 * physical GPU generation type and 'y' means maximum vGPU
	 * instances user can create on one physical GPU for this
	 * type.
	 *
	 * Depend on physical SKU resource, might see vGPU types like
	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
	 * different types of vGPU on same physical GPU depending on
	 * available resource. Each vGPU type will have "avail_instance"
	 * to indicate how many vGPU instance can be created for this
	 * type.
	 *
	 */
	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
	num_types = 4;

	gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
			     GFP_KERNEL);
	if (!gvt->types)
		return -ENOMEM;

	min_low = MB_TO_BYTES(32);
	for (i = 0; i < num_types; ++i) {
		if (low_avail / min_low == 0)
			break;
		gvt->types[i].low_gm_size = min_low;
		gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
		gvt->types[i].fence = 4;
		gvt->types[i].max_instance = min(low_avail / min_low,
						 high_avail / gvt->types[i].high_gm_size);
		gvt->types[i].avail_instance = gvt->types[i].max_instance;

		if (IS_GEN8(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V4_%u",
						gvt->types[i].max_instance);
		else if (IS_GEN9(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V5_%u",
						gvt->types[i].max_instance);

		min_low <<= 1;
		gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
			     i, gvt->types[i].name, gvt->types[i].max_instance,
			     gvt->types[i].avail_instance,
			     gvt->types[i].low_gm_size,
			     gvt->types[i].high_gm_size, gvt->types[i].fence);
	}

	gvt->num_types = i;
	return 0;
}
Пример #7
0
/**
 * intel_gvt_init_vgpu_types - initialize vGPU type list
 * @gvt : GVT device
 *
 * Initialize vGPU type list based on available resource.
 *
 */
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
	unsigned int num_types;
	unsigned int i, low_avail, high_avail;
	unsigned int min_low;

	/* vGPU type name is defined as GVTg_Vx_y which contains
	 * physical GPU generation type (e.g V4 as BDW server, V5 as
	 * SKL server).
	 *
	 * Depend on physical SKU resource, might see vGPU types like
	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
	 * different types of vGPU on same physical GPU depending on
	 * available resource. Each vGPU type will have "avail_instance"
	 * to indicate how many vGPU instance can be created for this
	 * type.
	 *
	 */
	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
	num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);

	gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
			     GFP_KERNEL);
	if (!gvt->types)
		return -ENOMEM;

	min_low = MB_TO_BYTES(32);
	for (i = 0; i < num_types; ++i) {
		if (low_avail / vgpu_types[i].low_mm == 0)
			break;

		gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
		gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
		gvt->types[i].fence = vgpu_types[i].fence;
		gvt->types[i].resolution = vgpu_types[i].edid;
		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
						   high_avail / vgpu_types[i].high_mm);

		if (IS_GEN8(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V4_%s",
						vgpu_types[i].name);
		else if (IS_GEN9(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V5_%s",
						vgpu_types[i].name);

		gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
			     i, gvt->types[i].name,
			     gvt->types[i].avail_instance,
			     gvt->types[i].low_gm_size,
			     gvt->types[i].high_gm_size, gvt->types[i].fence,
			     vgpu_edid_str(gvt->types[i].resolution));
	}

	gvt->num_types = i;
	return 0;
}
media_fillfunc_t get_media_fillfunc(int devid)
{
	media_fillfunc_t fill = NULL;

	if (IS_GEN8(devid))
		fill = gen8_media_fillfunc;
	else if (IS_GEN7(devid))
		fill = gen7_media_fillfunc;
	else if (IS_GEN9(devid))
		fill = gen9_media_fillfunc;

	return fill;
}
Пример #9
0
/*
 * Load the GuC firmware blob into the MinuteIA.
 */
static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* Enable MIA caching. GuC clock gating is disabled. */
	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);

	/* WaDisableMinuteIaClockGating:bxt */
	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
					      ~GUC_ENABLE_MIA_CLOCK_GATING));
	}

	/* WaC6DisallowByGfxPause:bxt */
	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
		I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);

	if (IS_GEN9_LP(dev_priv))
		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
	else
		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);

	if (IS_GEN9(dev_priv)) {
		/* DOP Clock Gating Enable for GuC clocks */
		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
					    I915_READ(GEN7_MISCCPCTL)));

		/* allows for 5us (in 10ns units) before GT can go to RC6 */
		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
	}

	ret = guc_ucode_xfer_dma(dev_priv, vma);

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
}
Пример #10
0
bool lspcon_init(struct intel_digital_port *intel_dig_port)
{
	struct intel_dp *dp = &intel_dig_port->dp;
	struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
	struct drm_device *dev = intel_dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);

	if (!IS_GEN9(dev_priv)) {
		DRM_ERROR("LSPCON is supported on GEN9 only\n");
		return false;
	}

	lspcon->active = false;
	lspcon->mode = DRM_LSPCON_MODE_INVALID;

	if (!lspcon_probe(lspcon)) {
		DRM_ERROR("Failed to probe lspcon\n");
		return false;
	}

	/*
	* In the SW state machine, lets Put LSPCON in PCON mode only.
	* In this way, it will work with both HDMI 1.4 sinks as well as HDMI
	* 2.0 sinks.
	*/
	if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
		if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
			true) < 0) {
			DRM_ERROR("LSPCON mode change to PCON failed\n");
			return false;
		}
	}

	if (!intel_dp_read_dpcd(dp)) {
		DRM_ERROR("LSPCON DPCD read failed\n");
		return false;
	}

	lspcon->desc_valid = intel_dp_read_desc(dp);

	DRM_DEBUG_KMS("Success: LSPCON init\n");
	return true;
}
Пример #11
0
/*
 * Generally this is called implicitly by the register read function. However,
 * if some sequence requires the GT to not power down then this function should
 * be called at the beginning of the sequence followed by a call to
 * gen6_gt_force_wake_put() at the end of the sequence.
 */
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
	unsigned long irqflags;

	if (!dev_priv->uncore.funcs.force_wake_get)
		return;

	intel_runtime_pm_get(dev_priv);

	/* Redirect to Gen9 specific routine */
	if (IS_GEN9(dev_priv->dev))
		return gen9_force_wake_get(dev_priv, fw_engine);

	/* Redirect to VLV specific routine */
	if (IS_VALLEYVIEW(dev_priv->dev))
		return vlv_force_wake_get(dev_priv, fw_engine);

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	if (dev_priv->uncore.forcewake_count++ == 0)
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
Пример #12
0
/*
 * see gen6_gt_force_wake_get()
 */
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
	unsigned long irqflags;
	bool delayed = false;

	if (!dev_priv->uncore.funcs.force_wake_put)
		return;

	/* Redirect to Gen9 specific routine */
	if (IS_GEN9(dev_priv->dev)) {
		gen9_force_wake_put(dev_priv, fw_engine);
		goto out;
	}

	/* Redirect to VLV specific routine */
	if (IS_VALLEYVIEW(dev_priv->dev)) {
		vlv_force_wake_put(dev_priv, fw_engine);
		goto out;
	}


	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	WARN_ON(!dev_priv->uncore.forcewake_count);

	if (--dev_priv->uncore.forcewake_count == 0) {
		dev_priv->uncore.forcewake_count++;
		delayed = true;
		mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
				 jiffies + 1);
	}
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);

out:
	if (!delayed)
		intel_runtime_pm_put(dev_priv);
}
Пример #13
0
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
	struct intel_guc *guc = &dev_priv->guc;
	int ret, attempts;

	if (!i915.enable_guc_loading)
		return 0;

	guc_disable_communication(guc);
	gen9_reset_guc_interrupts(dev_priv);

	/* We need to notify the guc whenever we change the GGTT */
	i915_ggtt_enable_guc(dev_priv);

	if (i915.enable_guc_submission) {
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = i915_guc_submission_init(dev_priv);
		if (ret)
			goto err_guc;
	}

	/* init WOPCM */
	I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
	I915_WRITE(DMA_GUC_WOPCM_OFFSET,
		   GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);

	/* WaEnableuKernelHeaderValidFix:skl */
	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
	if (IS_GEN9(dev_priv))
		attempts = 3;
	else
		attempts = 1;

	while (attempts--) {
		/*
		 * Always reset the GuC just before (re)loading, so
		 * that the state and timing are fairly predictable
		 */
		ret = __intel_uc_reset_hw(dev_priv);
		if (ret)
			goto err_submission;

		intel_huc_init_hw(&dev_priv->huc);
		ret = intel_guc_init_hw(&dev_priv->guc);
		if (ret == 0 || ret != -EAGAIN)
			break;

		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
				 "retry %d more time(s)\n", ret, attempts);
	}

	/* Did we succeded or run out of retries? */
	if (ret)
		goto err_log_capture;

	ret = guc_enable_communication(guc);
	if (ret)
		goto err_log_capture;

	intel_guc_auth_huc(dev_priv);
	if (i915.enable_guc_submission) {
		if (i915.guc_log_level >= 0)
			gen9_enable_guc_interrupts(dev_priv);

		ret = i915_guc_submission_enable(dev_priv);
		if (ret)
			goto err_interrupts;
	}

	return 0;

	/*
	 * We've failed to load the firmware :(
	 *
	 * Decide whether to disable GuC submission and fall back to
	 * execlist mode, and whether to hide the error by returning
	 * zero or to return -EIO, which the caller will treat as a
	 * nonfatal error (i.e. it doesn't prevent driver load, but
	 * marks the GPU as wedged until reset).
	 */
err_interrupts:
	guc_disable_communication(guc);
	gen9_disable_guc_interrupts(dev_priv);
err_log_capture:
	guc_capture_load_err_log(guc);
err_submission:
	if (i915.enable_guc_submission)
		i915_guc_submission_fini(dev_priv);
err_guc:
	i915_ggtt_disable_guc(dev_priv);

	DRM_ERROR("GuC init failed\n");
	if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1)
		ret = -EIO;
	else
		ret = 0;

	if (i915.enable_guc_submission) {
		i915.enable_guc_submission = 0;
		DRM_NOTE("Falling back from GuC submission to execlist mode\n");
	}

	i915.enable_guc_loading = 0;
	DRM_NOTE("GuC firmware loading disabled\n");

	return ret;
}
Пример #14
0
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
	u32 dpfc_ctl;
	int threshold = dev_priv->fbc.threshold;

	/* Display WA #0529: skl, kbl, bxt. */
	if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
		u32 val = I915_READ(CHICKEN_MISC_4);

		val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);

		if (i915_gem_object_get_tiling(params->vma->obj) !=
		    I915_TILING_X)
			val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;

		I915_WRITE(CHICKEN_MISC_4, val);
	}

	dpfc_ctl = 0;
	if (IS_IVYBRIDGE(dev_priv))
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);

	if (params->fb.format->cpp[0] == 2)
		threshold++;

	switch (threshold) {
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

	if (params->vma->fence) {
		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
		I915_WRITE(SNB_DPFC_CTL_SA,
			   SNB_CPU_FENCE_ENABLE |
			   params->vma->fence->id);
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
	} else {
		I915_WRITE(SNB_DPFC_CTL_SA,0);
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
	}

	if (dev_priv->fbc.false_color)
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;

	if (IS_IVYBRIDGE(dev_priv)) {
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
			   ILK_FBCQ_DIS);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
			   HSW_FBCQ_DIS);
	}

	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

	intel_fbc_recompress(dev_priv);
}
Пример #15
0
void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	setup_timer(&dev_priv->uncore.force_wake_timer,
		    gen6_force_wake_timer, (unsigned long)dev_priv);

	__intel_uncore_early_sanitize(dev, false);

	if (IS_GEN9(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
	} else if (IS_VALLEYVIEW(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
		dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
		__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
		__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->uncore.funcs.force_wake_get =
				__gen7_gt_force_wake_mt_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen7_gt_force_wake_mt_put;
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
			__gen6_gt_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put =
			__gen6_gt_force_wake_put;
	}

	switch (INTEL_INFO(dev)->gen) {
	default:
		WARN_ON(1);
		return;
	case 9:
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
		ASSIGN_READ_MMIO_VFUNCS(gen9);
		break;
	case 8:
		if (IS_CHERRYVIEW(dev)) {
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
			ASSIGN_READ_MMIO_VFUNCS(chv);

		} else {
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
			ASSIGN_READ_MMIO_VFUNCS(gen6);
		}
		break;
	case 7:
	case 6:
		if (IS_HASWELL(dev)) {
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
		} else {
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
		}

		if (IS_VALLEYVIEW(dev)) {
			ASSIGN_READ_MMIO_VFUNCS(vlv);
		} else {
			ASSIGN_READ_MMIO_VFUNCS(gen6);
		}
		break;
	case 5:
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
		ASSIGN_READ_MMIO_VFUNCS(gen5);
		break;
	case 4:
	case 3:
	case 2:
		ASSIGN_WRITE_MMIO_VFUNCS(gen4);
		ASSIGN_READ_MMIO_VFUNCS(gen4);
		break;
	}

	i915_check_and_clear_faults(dev);
}
Пример #16
0
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
			      const struct firmware *fw)
{
	struct intel_css_header *css_header;
	struct intel_package_header *package_header;
	struct intel_dmc_header *dmc_header;
	struct intel_csr *csr = &dev_priv->csr;
	const struct stepping_info *si = intel_get_stepping_info(dev_priv);
	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
	uint32_t max_fw_size = 0;
	uint32_t i;
	uint32_t *dmc_payload;
	uint32_t required_version;

	if (!fw)
		return NULL;

	/* Extract CSS Header information*/
	css_header = (struct intel_css_header *)fw->data;
	if (sizeof(struct intel_css_header) !=
	    (css_header->header_len * 4)) {
		DRM_ERROR("DMC firmware has wrong CSS header length "
			  "(%u bytes)\n",
			  (css_header->header_len * 4));
		return NULL;
	}

	csr->version = css_header->version;

	if (csr->fw_path == i915_modparams.dmc_firmware_path) {
		/* Bypass version check for firmware override. */
		required_version = csr->version;
	} else if (IS_ICELAKE(dev_priv)) {
		required_version = ICL_CSR_VERSION_REQUIRED;
	} else if (IS_CANNONLAKE(dev_priv)) {
		required_version = CNL_CSR_VERSION_REQUIRED;
	} else if (IS_GEMINILAKE(dev_priv)) {
		required_version = GLK_CSR_VERSION_REQUIRED;
	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
		required_version = KBL_CSR_VERSION_REQUIRED;
	} else if (IS_SKYLAKE(dev_priv)) {
		required_version = SKL_CSR_VERSION_REQUIRED;
	} else if (IS_BROXTON(dev_priv)) {
		required_version = BXT_CSR_VERSION_REQUIRED;
	} else {
		MISSING_CASE(INTEL_REVID(dev_priv));
		required_version = 0;
	}

	if (csr->version != required_version) {
		DRM_INFO("Refusing to load DMC firmware v%u.%u,"
			 " please use v%u.%u\n",
			 CSR_VERSION_MAJOR(csr->version),
			 CSR_VERSION_MINOR(csr->version),
			 CSR_VERSION_MAJOR(required_version),
			 CSR_VERSION_MINOR(required_version));
		return NULL;
	}

	readcount += sizeof(struct intel_css_header);

	/* Extract Package Header information*/
	package_header = (struct intel_package_header *)
		&fw->data[readcount];
	if (sizeof(struct intel_package_header) !=
	    (package_header->header_len * 4)) {
		DRM_ERROR("DMC firmware has wrong package header length "
			  "(%u bytes)\n",
			  (package_header->header_len * 4));
		return NULL;
	}
	readcount += sizeof(struct intel_package_header);

	/* Search for dmc_offset to find firware binary. */
	for (i = 0; i < package_header->num_entries; i++) {
		if (package_header->fw_info[i].substepping == '*' &&
		    si->stepping == package_header->fw_info[i].stepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (si->stepping == package_header->fw_info[i].stepping &&
			   si->substepping == package_header->fw_info[i].substepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (package_header->fw_info[i].stepping == '*' &&
			   package_header->fw_info[i].substepping == '*')
			dmc_offset = package_header->fw_info[i].offset;
	}
	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
		DRM_ERROR("DMC firmware not supported for %c stepping\n",
			  si->stepping);
		return NULL;
	}
	/* Convert dmc_offset into number of bytes. By default it is in dwords*/
	dmc_offset *= 4;
	readcount += dmc_offset;

	/* Extract dmc_header information. */
	dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
	if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
		DRM_ERROR("DMC firmware has wrong dmc header length "
			  "(%u bytes)\n",
			  (dmc_header->header_len));
		return NULL;
	}
	readcount += sizeof(struct intel_dmc_header);

	/* Cache the dmc header info. */
	if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
		DRM_ERROR("DMC firmware has wrong mmio count %u\n",
			  dmc_header->mmio_count);
		return NULL;
	}
	csr->mmio_count = dmc_header->mmio_count;
	for (i = 0; i < dmc_header->mmio_count; i++) {
		if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
		    dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
			DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
				  dmc_header->mmioaddr[i]);
			return NULL;
		}
		csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
		csr->mmiodata[i] = dmc_header->mmiodata[i];
	}

	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
	nbytes = dmc_header->fw_size * 4;
	if (INTEL_GEN(dev_priv) >= 11)
		max_fw_size = ICL_CSR_MAX_FW_SIZE;
	else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
		max_fw_size = GLK_CSR_MAX_FW_SIZE;
	else if (IS_GEN9(dev_priv))
		max_fw_size = BXT_CSR_MAX_FW_SIZE;
	else
		MISSING_CASE(INTEL_REVID(dev_priv));
	if (nbytes > max_fw_size) {
		DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
		return NULL;
	}
	csr->dmc_fw_size = dmc_header->fw_size;

	dmc_payload = kmalloc(nbytes, GFP_KERNEL);
	if (!dmc_payload) {
		DRM_ERROR("Memory allocation failed for dmc payload\n");
		return NULL;
	}

	return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}