示例#1
0
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
		gen6_force_wake_timer((unsigned long)dev_priv);

	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	if (IS_VALLEYVIEW(dev))
		vlv_force_wake_reset(dev_priv);
	else if (IS_GEN6(dev) || IS_GEN7(dev))
		__gen6_gt_force_wake_reset(dev_priv);

	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
		__gen7_gt_force_wake_mt_reset(dev_priv);

	if (IS_GEN9(dev))
		__gen9_gt_force_wake_mt_reset(dev_priv);

	if (restore) { /* If reset with a user forcewake, try to restore */
		unsigned fw = 0;

		if (IS_VALLEYVIEW(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;
		} else if (IS_GEN9(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;

			if (dev_priv->uncore.fw_blittercount)
				fw |= FORCEWAKE_BLITTER;
		} else {
			if (dev_priv->uncore.forcewake_count)
				fw = FORCEWAKE_ALL;
		}

		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
				GT_FIFO_FREE_ENTRIES_MASK;
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
示例#2
0
void intel_detect_pch(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	device_t pch;

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
	 */
	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
	if (pch) {
		if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
			unsigned short id;
			id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
			dev_priv->pch_id = id;

			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
				WARN_ON(!IS_GEN5(dev));
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_CPT;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
				WARN_ON(!IS_HASWELL(dev));
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
			}
			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
		}
#if 0
		pci_dev_put(pch);
#endif
	}
}
示例#3
0
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
	u32 dpfc_ctl;
	int threshold = dev_priv->fbc.threshold;

	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
	if (params->fb.format->cpp[0] == 2)
		threshold++;

	switch (threshold) {
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

	if (params->vma->fence) {
		dpfc_ctl |= DPFC_CTL_FENCE_EN;
		if (IS_GEN5(dev_priv))
			dpfc_ctl |= params->vma->fence->id;
		if (IS_GEN6(dev_priv)) {
			I915_WRITE(SNB_DPFC_CTL_SA,
				   SNB_CPU_FENCE_ENABLE |
				   params->vma->fence->id);
			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
				   params->crtc.fence_y_offset);
		}
	} else {
		if (IS_GEN6(dev_priv)) {
			I915_WRITE(SNB_DPFC_CTL_SA, 0);
			I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
		}
	}

	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
	I915_WRITE(ILK_FBC_RT_BASE,
		   i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
	/* enable it... */
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

	intel_fbc_recompress(dev_priv);
}
示例#4
0
static size_t get_context_alignment(struct drm_device *dev)
{
    if (IS_GEN6(dev))
        return GEN6_CONTEXT_ALIGN;

    return GEN7_CONTEXT_ALIGN;
}
示例#5
0
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
	enum intel_pch ret = PCH_NOP;

	/*
	 * In a virtualized passthrough environment we can be in a
	 * setup where the ISA bridge is not able to be passed through.
	 * In this case, a south bridge can be emulated and we have to
	 * make an educated guess as to which PCH is really there.
	 */

	if (IS_GEN5(dev)) {
		ret = PCH_IBX;
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
		ret = PCH_CPT;
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = PCH_LPT;
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
		ret = PCH_SPT;
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
	}

	return ret;
}
void
intel_register_write(uint32_t reg, uint32_t val)
{
	struct intel_register_range *range;

	assert(mmio_data.inited);

	if (IS_GEN6(mmio_data.i915_devid))
		assert(mmio_data.key != -1);

	if (!mmio_data.safe)
		goto write_out;

	range = intel_get_register_range(mmio_data.map,
					 reg,
					 INTEL_RANGE_WRITE);

	if (!range) {
		fprintf(stderr, "Register write blocked for safety "
			"(*0x%08x = 0x%x)\n", reg, val);
	}

write_out:
	*(volatile uint32_t *)((volatile char *)mmio + reg) = val;
}
示例#7
0
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

	DRM_DEBUG_KMS("Resuming device\n");

	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	disable_rpm_wakeref_asserts(dev_priv);

	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;
	if (intel_uncore_unclaimed_mmio(dev_priv))
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");

	intel_guc_resume(dev);

	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, true);

	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

	intel_runtime_pm_enable_interrupts(dev_priv);

	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
		intel_hpd_init(dev_priv);

	intel_enable_gt_powersave(dev);

	enable_rpm_wakeref_asserts(dev_priv);

	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
}
示例#8
0
struct intel_batchbuffer * 
intel_batchbuffer_new(struct intel_driver_data *intel, int flag, int buffer_size)
{
    struct intel_batchbuffer *batch = calloc(1, sizeof(*batch));
    assert(flag == I915_EXEC_RENDER ||
           flag == I915_EXEC_BSD ||
           flag == I915_EXEC_BLT ||
           flag == I915_EXEC_VEBOX);

   if (!buffer_size || buffer_size < BATCH_SIZE) {
	buffer_size = BATCH_SIZE;
   }

   /* the buffer size can't exceed 4M */
   if (buffer_size > MAX_BATCH_SIZE) {
	buffer_size = MAX_BATCH_SIZE;
   }

    batch->intel = intel;
    batch->flag = flag;
    batch->run = drm_intel_bo_mrb_exec;

    if (IS_GEN6(intel->device_info) &&
        flag == I915_EXEC_RENDER)
        batch->wa_render_bo = dri_bo_alloc(intel->bufmgr,
                                           "wa scratch",
                                           4096,
                                           4096);
    else
        batch->wa_render_bo = NULL;

    intel_batchbuffer_reset(batch, buffer_size);

    return batch;
}
void intel_uncore_early_sanitize(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev))
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);

	if (IS_HASWELL(dev) &&
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
		/* The docs do not explain exactly how the calculation can be
		 * made. It is somewhat guessable, but for now, it's always
		 * 128MB.
		 * NB: We can't write IDICR yet because we do not have gt funcs
		 * set up */
		dev_priv->ellc_size = 128;
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
	}

	/* clear out old GT FIFO errors */
	if (IS_GEN6(dev) || IS_GEN7(dev))
		__raw_i915_write32(dev_priv, GTFIFODBG,
				   __raw_i915_read32(dev_priv, GTFIFODBG));

	intel_uncore_forcewake_reset(dev);
}
示例#10
0
uint32_t
intel_register_read(uint32_t reg)
{
	struct intel_register_range *range;
	uint32_t ret;

	assert(mmio_data.inited);

	if (IS_GEN6(mmio_data.i915_devid))
		assert(mmio_data.key != -1);

	if (!mmio_data.safe)
		goto read_out;

	range = intel_get_register_range(mmio_data.map,
					 reg,
					 INTEL_RANGE_READ);

	if(!range) {
		fprintf(stderr, "Register read blocked for safety "
			"(*0x%08x)\n", reg);
		ret = 0xffffffff;
		goto out;
	}

read_out:
	ret = *(volatile uint32_t *)((volatile char *)mmio + reg);
out:
	return ret;
}
示例#11
0
int
intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
	struct intel_plane *intel_plane;
	unsigned long possible_crtcs;
	const uint32_t *plane_formats;
	int num_plane_formats;
	int ret;

	if (INTEL_INFO(dev)->gen < 5)
		return -ENODEV;

	intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS,
	    M_WAITOK | M_ZERO);

	switch (INTEL_INFO(dev)->gen) {
	case 5:
	case 6:
		intel_plane->max_downscale = 16;
		intel_plane->update_plane = ilk_update_plane;
		intel_plane->disable_plane = ilk_disable_plane;
		intel_plane->update_colorkey = ilk_update_colorkey;
		intel_plane->get_colorkey = ilk_get_colorkey;

		if (IS_GEN6(dev)) {
			plane_formats = snb_plane_formats;
			num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
		} else {
			plane_formats = ilk_plane_formats;
			num_plane_formats = DRM_ARRAY_SIZE(ilk_plane_formats);
		}
		break;

	case 7:
		intel_plane->max_downscale = 2;
		intel_plane->update_plane = ivb_update_plane;
		intel_plane->disable_plane = ivb_disable_plane;
		intel_plane->update_colorkey = ivb_update_colorkey;
		intel_plane->get_colorkey = ivb_get_colorkey;

		plane_formats = snb_plane_formats;
		num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
		break;

	default:
		return -ENODEV;
	}

	intel_plane->pipe = pipe;
	possible_crtcs = (1 << pipe);
	ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
			     &intel_plane_funcs,
			     plane_formats, num_plane_formats,
			     false);
	if (ret)
		free(intel_plane, DRM_MEM_KMS);

	return ret;
}
示例#12
0
int i915_save_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);

	mutex_lock(&dev->struct_mutex);

	/* Hardware status page */
	dev_priv->saveHWS = I915_READ(HWS_PGA);

	i915_save_display(dev);

	/* Interrupt state */
	if (HAS_PCH_SPLIT(dev)) {
		dev_priv->saveDEIER = I915_READ(DEIER);
		dev_priv->saveDEIMR = I915_READ(DEIMR);
		dev_priv->saveGTIER = I915_READ(GTIER);
		dev_priv->saveGTIMR = I915_READ(GTIMR);
		dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
		dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
		dev_priv->saveMCHBAR_RENDER_STANDBY =
			I915_READ(RSTDBYCTL);
		dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
	} else {
		dev_priv->saveIER = I915_READ(IER);
		dev_priv->saveIMR = I915_READ(IMR);
	}

	if (IS_IRONLAKE_M(dev))
		ironlake_disable_drps(dev);
	if (IS_GEN6(dev))
		gen6_disable_rps(dev);

	/* Cache mode state */
	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);

	/* Memory Arbitration state */
	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);

	/* Scratch space */
	for (i = 0; i < 16; i++) {
		dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
	}
	for (i = 0; i < 3; i++)
		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));

	mutex_unlock(&dev->struct_mutex);

	return 0;
}
示例#13
0
int i915_restore_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);

	/* Hardware status page */
	I915_WRITE(HWS_PGA, dev_priv->saveHWS);

	i915_restore_display(dev);

	/* Interrupt state */
	if (HAS_PCH_SPLIT(dev)) {
		I915_WRITE(DEIER, dev_priv->saveDEIER);
		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
		I915_WRITE(GTIER, dev_priv->saveGTIER);
		I915_WRITE(GTIMR, dev_priv->saveGTIMR);
		I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
		I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
	} else {
		I915_WRITE(IER, dev_priv->saveIER);
		I915_WRITE(IMR, dev_priv->saveIMR);
	}

	intel_init_clock_gating(dev);

	if (IS_IRONLAKE_M(dev)) {
		ironlake_enable_drps(dev);
		intel_init_emon(dev);
	}

	if (IS_GEN6(dev))
		gen6_enable_rps(dev_priv);

	/* Cache mode state */
	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);

	/* Memory arbitration state */
	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);

	for (i = 0; i < 16; i++) {
		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
	}
	for (i = 0; i < 3; i++)
		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);

	intel_i2c_reset(dev);

	return 0;
}
int
intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
	struct intel_plane *intel_plane;
	unsigned long possible_crtcs;
	int ret;

	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
		return -ENODEV;

	intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
	if (!intel_plane)
		return -ENOMEM;

	if (IS_GEN6(dev)) {
		intel_plane->max_downscale = 16;
		intel_plane->update_plane = snb_update_plane;
		intel_plane->disable_plane = snb_disable_plane;
		intel_plane->update_colorkey = snb_update_colorkey;
		intel_plane->get_colorkey = snb_get_colorkey;
	} else if (IS_GEN7(dev)) {
		intel_plane->max_downscale = 2;
		intel_plane->update_plane = ivb_update_plane;
		intel_plane->disable_plane = ivb_disable_plane;
		intel_plane->update_colorkey = ivb_update_colorkey;
		intel_plane->get_colorkey = ivb_get_colorkey;
	}

	intel_plane->pipe = pipe;
	possible_crtcs = (1 << pipe);
	ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
			     &intel_plane_funcs, snb_plane_formats,
			     ARRAY_SIZE(snb_plane_formats), false);
	if (ret)
		kfree(intel_plane);

	return ret;
}
void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (IS_VALLEYVIEW(dev)) {
		dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
	} else if (IS_HASWELL(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
		__gen6_gt_force_wake_mt_get(dev_priv);
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
		__gen6_gt_force_wake_mt_put(dev_priv);
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_mt_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_mt_put;
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
			__gen6_gt_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put =
			__gen6_gt_force_wake_put;
	}

	intel_uncore_forcewake_reset(dev);
}
示例#16
0
int
intel_plane_init(struct drm_device *dev, enum i915_pipe pipe)
{
	struct intel_plane *intel_plane;
	unsigned long possible_crtcs;
	int ret;

	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
		return -ENODEV;

	intel_plane = kmalloc(sizeof(struct intel_plane), DRM_MEM_KMS,
	    M_WAITOK | M_ZERO);

	if (IS_GEN6(dev)) {
		intel_plane->max_downscale = 16;
		intel_plane->update_plane = snb_update_plane;
		intel_plane->disable_plane = snb_disable_plane;
		intel_plane->update_colorkey = snb_update_colorkey;
		intel_plane->get_colorkey = snb_get_colorkey;
	} else if (IS_GEN7(dev)) {
		intel_plane->max_downscale = 2;
		intel_plane->update_plane = ivb_update_plane;
		intel_plane->disable_plane = ivb_disable_plane;
		intel_plane->update_colorkey = ivb_update_colorkey;
		intel_plane->get_colorkey = ivb_get_colorkey;
	}

	intel_plane->pipe = pipe;
	possible_crtcs = (1 << pipe);
	ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
			     &intel_plane_funcs, snb_plane_formats,
			     DRM_ARRAY_SIZE(snb_plane_formats), false);
	if (ret)
		drm_free(intel_plane, DRM_MEM_KMS);

	return ret;
}
示例#17
0
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
			       enum pipe pipe,
			       enum intel_pipe_crc_source *source, u32 *val)
{
	if (IS_GEN2(dev_priv))
		return i8xx_pipe_crc_ctl_reg(source, val);
	else if (INTEL_GEN(dev_priv) < 5)
		return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
	else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
		return ilk_pipe_crc_ctl_reg(source, val);
	else
		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
示例#18
0
static inline int
mi_set_context(struct intel_ring_buffer *ring,
	       struct i915_hw_context *new_context,
	       u32 hw_flags)
{
	int ret;

	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
	 * explicitly, so we rely on the value at ring init, stored in
	 * itlb_before_ctx_switch.
	 */
	if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
		if (ret)
			return ret;
	}

	ret = intel_ring_begin(ring, 6);
	if (ret)
		return ret;

	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
	if (IS_GEN7(ring->dev))
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

	intel_ring_emit(ring, MI_NOOP);
	intel_ring_emit(ring, MI_SET_CONTEXT);
	intel_ring_emit(ring, new_context->obj->gtt_offset |
			MI_MM_SPACE_GTT |
			MI_SAVE_EXT_STATE_EN |
			MI_RESTORE_EXT_STATE_EN |
			hw_flags);
	/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
	intel_ring_emit(ring, MI_NOOP);

	if (IS_GEN7(ring->dev))
		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
	else
		intel_ring_emit(ring, MI_NOOP);

	intel_ring_advance(ring);

	return ret;
}
示例#19
0
static void ilk_fbc_enable(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct drm_framebuffer *fb = crtc->base.primary->fb;
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
	u32 dpfc_ctl;
	int threshold = dev_priv->fbc.threshold;
	unsigned int y_offset;

	dev_priv->fbc.enabled = true;

	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
		threshold++;

	switch (threshold) {
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
	if (IS_GEN5(dev_priv))
		dpfc_ctl |= obj->fence_reg;

	y_offset = get_crtc_fence_y_offset(crtc);
	I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
	/* enable it... */
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

	if (IS_GEN6(dev_priv)) {
		I915_WRITE(SNB_DPFC_CTL_SA,
			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
	}

	intel_fbc_nuke(dev_priv);

	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
示例#20
0
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	WARN_ON(!HAS_RUNTIME_PM(dev));

	DRM_DEBUG_KMS("Resuming device\n");

	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;

	if (IS_GEN6(dev)) {
		ret = snb_runtime_resume(dev_priv);
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = hsw_runtime_resume(dev_priv);
	} else if (IS_VALLEYVIEW(dev)) {
		ret = vlv_runtime_resume(dev_priv);
	} else {
		WARN_ON(1);
		ret = -ENODEV;
	}

	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

	intel_runtime_pm_restore_interrupts(dev);
	intel_reset_gt_powersave(dev);

	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
}
示例#21
0
static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pitch,
		     unsigned src_x, unsigned src_y, unsigned w, unsigned h,
		     drm_intel_bo *dst_bo, uint32_t dst_tiling, unsigned dst_pitch,
		     unsigned dst_x, unsigned dst_y)
{
	uint32_t cmd_bits = 0;

	if (IS_965(devid) && src_tiling) {
		src_pitch /= 4;
		cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED;
	}

	if (IS_965(devid) && dst_tiling) {
		dst_pitch /= 4;
		cmd_bits |= XY_SRC_COPY_BLT_DST_TILED;
	}

	/* copy lower half to upper half */
	BEGIN_BATCH(8);
	OUT_BATCH(XY_SRC_COPY_BLT_CMD |
		  XY_SRC_COPY_BLT_WRITE_ALPHA |
		  XY_SRC_COPY_BLT_WRITE_RGB |
		  cmd_bits);
	OUT_BATCH((3 << 24) | /* 32 bits */
		  (0xcc << 16) | /* copy ROP */
		  dst_pitch);
	OUT_BATCH(dst_y << 16 | dst_x);
	OUT_BATCH((dst_y+h) << 16 | (dst_x+w));
	OUT_RELOC_FENCED(dst_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
	OUT_BATCH(src_y << 16 | src_x);
	OUT_BATCH(src_pitch);
	OUT_RELOC_FENCED(src_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
	ADVANCE_BATCH();

	if (IS_GEN6(devid) || IS_GEN7(devid)) {
		BEGIN_BATCH(3);
		OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
		OUT_BATCH(0);
		OUT_BATCH(0);
		ADVANCE_BATCH();
	}
}
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
    struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
    u32 dpfc_ctl;
    int threshold = dev_priv->fbc.threshold;

    dev_priv->fbc.active = true;

    dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
    if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
        threshold++;

    switch (threshold) {
    case 4:
    case 3:
        dpfc_ctl |= DPFC_CTL_LIMIT_4X;
        break;
    case 2:
        dpfc_ctl |= DPFC_CTL_LIMIT_2X;
        break;
    case 1:
        dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        break;
    }
    dpfc_ctl |= DPFC_CTL_FENCE_EN;
    if (IS_GEN5(dev_priv))
        dpfc_ctl |= params->fb.fence_reg;

    I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
    I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
    /* enable it... */
    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

    if (IS_GEN6(dev_priv)) {
        I915_WRITE(SNB_DPFC_CTL_SA,
                   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
        I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
    }

    intel_fbc_recompress(dev_priv);
}
示例#23
0
static void
parse_general_features(struct drm_i915_private *dev_priv,
		       struct bdb_header *bdb)
{
	struct drm_device *dev = dev_priv->dev;
	struct bdb_general_features *general;

	general = find_section(bdb, BDB_GENERAL_FEATURES);
	if (general) {
		dev_priv->int_tv_support = general->int_tv_support;
		dev_priv->int_crt_support = general->int_crt_support;
		dev_priv->lvds_use_ssc = general->enable_ssc;

		if (IS_I85X(dev))
			dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
		else if (IS_GEN5(dev) || IS_GEN6(dev))
			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
		else
			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
	}
}
示例#24
0
static void render_copyfunc(struct scratch_buf *src, unsigned src_x, unsigned src_y,
			    struct scratch_buf *dst, unsigned dst_x, unsigned dst_y,
			    unsigned logical_tile_no)
{
	if (IS_GEN2(devid))
		gen2_render_copyfunc(src, src_x, src_y,
				     dst, dst_x, dst_y,
				     logical_tile_no);
	else if (IS_GEN3(devid))
		gen3_render_copyfunc(src, src_x, src_y,
				     dst, dst_x, dst_y,
				     logical_tile_no);
	else if (IS_GEN6(devid))
		gen6_render_copyfunc(src, src_x, src_y,
				     dst, dst_x, dst_y,
				     logical_tile_no);
	else
		blitter_copyfunc(src, src_x, src_y,
				 dst, dst_x, dst_y,
				 logical_tile_no);
}
示例#25
0
static void 
intel_driver_init(intel_driver_t *driver, int dev_fd)
{
  driver->fd = dev_fd;
  driver->locked = 0;
  pthread_mutex_init(&driver->ctxmutex, NULL);
#ifndef NDEBUG
  int res =
#endif /* NDEBUG */
  intel_driver_get_param(driver, I915_PARAM_CHIPSET_ID, &driver->device_id);
  assert(res);
  intel_driver_memman_init(driver);

#if EMULATE_GEN
  driver->gen_ver = EMULATE_GEN;
  if (EMULATE_GEN == 75)
    driver->device_id = PCI_CHIP_HASWELL_L;       /* we pick L for HSW */
  else if (EMULATE_GEN == 7)
    driver->device_id = PCI_CHIP_IVYBRIDGE_GT2; /* we pick GT2 for IVB */
  else if (EMULATE_GEN == 6)
    driver->device_id = PCI_CHIP_SANDYBRIDGE_GT2; /* we pick GT2 for SNB */
  else
    FATAL ("Unsupported Gen for emulation");
#else
  if (IS_GEN75(driver->device_id))
    driver->gen_ver = 75;
  else if (IS_GEN7(driver->device_id))
    driver->gen_ver = 7;
  else if (IS_GEN6(driver->device_id))
    driver->gen_ver = 6;
  else if(IS_IGDNG(driver->device_id))
    driver->gen_ver = 5;
  else
    driver->gen_ver = 4;
#endif /* EMULATE_GEN */
}
示例#26
0
GLboolean
intelInitContext(struct intel_context *intel,
                 const __GLcontextModes * mesaVis,
                 __DRIcontext * driContextPriv,
                 void *sharedContextPrivate,
                 struct dd_function_table *functions)
{
   GLcontext *ctx = &intel->ctx;
   GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   struct intel_screen *intelScreen = sPriv->private;
   int bo_reuse_mode;

   /* we can't do anything without a connection to the device */
   if (intelScreen->bufmgr == NULL)
      return GL_FALSE;

   if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
                                 functions, (void *) intel)) {
      printf("%s: failed to init mesa context\n", __FUNCTION__);
      return GL_FALSE;
   }

   driContextPriv->driverPrivate = intel;
   intel->intelScreen = intelScreen;
   intel->driScreen = sPriv;
   intel->driContext = driContextPriv;
   intel->driFd = sPriv->fd;

   intel->has_xrgb_textures = GL_TRUE;
   if (IS_GEN6(intel->intelScreen->deviceID)) {
      intel->gen = 6;
      intel->needs_ff_sync = GL_TRUE;
      intel->has_luminance_srgb = GL_TRUE;
   } else if (IS_GEN5(intel->intelScreen->deviceID)) {
      intel->gen = 5;
      intel->needs_ff_sync = GL_TRUE;
      intel->has_luminance_srgb = GL_TRUE;
   } else if (IS_965(intel->intelScreen->deviceID)) {
      intel->gen = 4;
      if (IS_G4X(intel->intelScreen->deviceID)) {
	  intel->has_luminance_srgb = GL_TRUE;
	  intel->is_g4x = GL_TRUE;
      }
   } else if (IS_9XX(intel->intelScreen->deviceID)) {
      intel->gen = 3;
      if (IS_945(intel->intelScreen->deviceID)) {
	 intel->is_945 = GL_TRUE;
      }
   } else {
      intel->gen = 2;
      if (intel->intelScreen->deviceID == PCI_CHIP_I830_M ||
	  intel->intelScreen->deviceID == PCI_CHIP_845_G) {
	 intel->has_xrgb_textures = GL_FALSE;
      }
   }

   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
                       intel->driScreen->myNum,
		       (intel->gen >= 4) ? "i965" : "i915");
   if (intelScreen->deviceID == PCI_CHIP_I865_G)
      intel->maxBatchSize = 4096;
   else
      intel->maxBatchSize = BATCH_SZ;

   intel->bufmgr = intelScreen->bufmgr;

   bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
   switch (bo_reuse_mode) {
   case DRI_CONF_BO_REUSE_DISABLED:
      break;
   case DRI_CONF_BO_REUSE_ALL:
      intel_bufmgr_gem_enable_reuse(intel->bufmgr);
      break;
   }

   /* This doesn't yet catch all non-conformant rendering, but it's a
    * start.
    */
   if (getenv("INTEL_STRICT_CONFORMANCE")) {
      unsigned int value = atoi(getenv("INTEL_STRICT_CONFORMANCE"));
      if (value > 0) {
         intel->conformance_mode = value;
      }
      else {
         intel->conformance_mode = 1;
      }
   }

   if (intel->conformance_mode > 0) {
      ctx->Const.MinLineWidth = 1.0;
      ctx->Const.MinLineWidthAA = 1.0;
      ctx->Const.MaxLineWidth = 1.0;
      ctx->Const.MaxLineWidthAA = 1.0;
      ctx->Const.LineWidthGranularity = 1.0;
   }
   else {
      ctx->Const.MinLineWidth = 1.0;
      ctx->Const.MinLineWidthAA = 1.0;
      ctx->Const.MaxLineWidth = 5.0;
      ctx->Const.MaxLineWidthAA = 5.0;
      ctx->Const.LineWidthGranularity = 0.5;
   }

   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 255.0;
   ctx->Const.MaxPointSizeAA = 3.0;
   ctx->Const.PointSizeGranularity = 1.0;

   /* reinitialize the context point state.
    * It depend on constants in __GLcontextRec::Const
    */
   _mesa_init_point(ctx);

   meta_init_metaops(ctx, &intel->meta);
   ctx->Const.MaxColorAttachments = 4;  /* XXX FBO: review this */
   if (intel->gen >= 4) {
      if (MAX_WIDTH > 8192)
	 ctx->Const.MaxRenderbufferSize = 8192;
   } else {
      if (MAX_WIDTH > 2048)
	 ctx->Const.MaxRenderbufferSize = 2048;
   }

   /* Initialize the software rasterizer and helper modules. */
   _swrast_CreateContext(ctx);
   _vbo_CreateContext(ctx);
   _tnl_CreateContext(ctx);
   _swsetup_CreateContext(ctx);
 
   /* Configure swrast to match hardware characteristics: */
   _swrast_allow_pixel_fog(ctx, GL_FALSE);
   _swrast_allow_vertex_fog(ctx, GL_TRUE);

   _mesa_meta_init(ctx);

   intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
   intel->hw_stipple = 1;

   /* XXX FBO: this doesn't seem to be used anywhere */
   switch (mesaVis->depthBits) {
   case 0:                     /* what to do in this case? */
   case 16:
      intel->polygon_offset_scale = 1.0;
      break;
   case 24:
      intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
      break;
   default:
      assert(0);
      break;
   }

   if (intel->gen >= 4)
      intel->polygon_offset_scale /= 0xffff;

   intel->RenderIndex = ~0;

   intelInitExtensions(ctx);

   INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
   if (INTEL_DEBUG & DEBUG_BUFMGR)
      dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);

   intel->batch = intel_batchbuffer_alloc(intel);

   intel_fbo_init(intel);

   if (intel->ctx.Mesa_DXTn) {
      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
      _mesa_enable_extension(ctx, "GL_S3_s3tc");
   }
   else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
   }
   intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
					       "texture_tiling");
   intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");

   intel->prim.primitive = ~0;

   /* Force all software fallbacks */
   if (driQueryOptionb(&intel->optionCache, "no_rast")) {
      fprintf(stderr, "disabling 3D rasterization\n");
      intel->no_rast = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
      intel->always_flush_batch = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
      intel->always_flush_cache = 1;
   }

   /* Disable all hardware rendering (skip emitting batches and fences/waits
    * to the kernel)
    */
   intel->no_hw = getenv("INTEL_NO_HW") != NULL;

   return GL_TRUE;
}
示例#27
0
static void
ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
		 unsigned int crtc_w, unsigned int crtc_h,
		 uint32_t x, uint32_t y,
		 uint32_t src_w, uint32_t src_h)
{
	struct drm_device *dev = plane->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_plane *intel_plane = to_intel_plane(plane);
	int pipe = intel_plane->pipe, pixel_size;
	u32 dvscntr, dvsscale;

	dvscntr = I915_READ(DVSCNTR(pipe));

	/* Mask out pixel format bits in case we change it */
	dvscntr &= ~DVS_PIXFORMAT_MASK;
	dvscntr &= ~DVS_RGB_ORDER_XBGR;
	dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;

	switch (fb->pixel_format) {
	case DRM_FORMAT_XBGR8888:
		dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
		pixel_size = 4;
		break;
	case DRM_FORMAT_XRGB8888:
		dvscntr |= DVS_FORMAT_RGBX888;
		pixel_size = 4;
		break;
	case DRM_FORMAT_YUYV:
		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
		pixel_size = 2;
		break;
	case DRM_FORMAT_YVYU:
		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
		pixel_size = 2;
		break;
	case DRM_FORMAT_UYVY:
		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
		pixel_size = 2;
		break;
	case DRM_FORMAT_VYUY:
		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
		pixel_size = 2;
		break;
	default:
		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
		dvscntr |= DVS_FORMAT_RGBX888;
		pixel_size = 4;
		break;
	}

	if (obj->tiling_mode != I915_TILING_NONE)
		dvscntr |= DVS_TILED;

	if (IS_GEN6(dev))
		dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
	dvscntr |= DVS_ENABLE;

	/* Sizes are 0 based */
	src_w--;
	src_h--;
	crtc_w--;
	crtc_h--;

	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);

	dvsscale = 0;
	if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;

	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
	if (obj->tiling_mode != I915_TILING_NONE) {
		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
	} else {
		unsigned long offset;

		offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
		I915_WRITE(DVSLINOFF(pipe), offset);
	}
	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
	I915_WRITE(DVSSCALE(pipe), dvsscale);
	I915_WRITE(DVSCNTR(pipe), dvscntr);
	I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
	POSTING_READ(DVSSURF(pipe));
}
示例#28
0
void intel_detect_pch(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct pci_dev *pch = NULL;

	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
	 * (which really amounts to a PCH but no South Display).
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		return;
	}

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
	 *
	 * In some virtualized environments (e.g. XEN), there is irrelevant
	 * ISA bridge in the system. To work reliably, we should scan trhough
	 * all the ISA bridge devices and check for the first match, instead
	 * of only checking the first one.
	 */
	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
			dev_priv->pch_id = id;

			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
				WARN_ON(!IS_GEN5(dev));
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
				WARN_ON(!IS_SKYLAKE(dev) &&
					!IS_KABYLAKE(dev));
			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_SPT;
				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
				WARN_ON(!IS_SKYLAKE(dev) &&
					!IS_KABYLAKE(dev));
			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
				   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
				    pch->subsystem_vendor == 0x1af4 &&
				    pch->subsystem_device == 0x1100)) {
				dev_priv->pch_type = intel_virt_detect_pch(dev);
			} else
				continue;

			break;
		}
	}
	if (!pch)
		DRM_DEBUG_KMS("No PCH found.\n");

	pci_dev_put(pch);
}
示例#29
0
/**
 * Detects bit 6 swizzling of address lookup between IGD access and CPU
 * access through main memory.
 */
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
	bool need_disable;

	if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
		/* On Ironlake whatever DRAM config, GPU always do
		 * same swizzling setup.
		 */
		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
		swizzle_y = I915_BIT_6_SWIZZLE_9;
	} else if (!IS_I9XX(dev)) {
		/* As far as we know, the 865 doesn't have these bit 6
		 * swizzling issues.
		 */
		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
	} else if (IS_MOBILE(dev)) {
		uint32_t dcc;

		/* Try to make sure MCHBAR is enabled before poking at it */
		need_disable = intel_setup_mchbar(dev);

		/* On mobile 9xx chipsets, channel interleave by the CPU is
		 * determined by DCC.  For single-channel, neither the CPU
		 * nor the GPU do swizzling.  For dual channel interleaved,
		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
		 * 9 for Y tiled.  The CPU's interleave is independent, and
		 * can be based on either bit 11 (haven't seen this yet) or
		 * bit 17 (common).
		 */
		dcc = I915_READ(DCC);
		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
			break;
		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
				/* This is the base swizzling by the GPU for
				 * tiled buffers.
				 */
				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
				swizzle_y = I915_BIT_6_SWIZZLE_9;
			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
				/* Bit 11 swizzling by the CPU in addition. */
				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
			} else {
				/* Bit 17 swizzling by the CPU in addition. */
				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
			}
			break;
		}
		if (dcc == 0xffffffff) {
			DRM_ERROR("Couldn't read from MCHBAR.  "
				  "Disabling tiling.\n");
			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
		}

		intel_teardown_mchbar(dev, need_disable);
	} else {
		/* The 965, G33, and newer, have a very flexible memory
		 * configuration.  It will enable dual-channel mode
		 * (interleaving) on as much memory as it can, and the GPU
		 * will additionally sometimes enable different bit 6
		 * swizzling for tiled objects from the CPU.
		 *
		 * Here's what I found on the G965:
		 *    slot fill         memory size  swizzling
		 * 0A   0B   1A   1B    1-ch   2-ch
		 * 512  0    0    0     512    0     O
		 * 512  0    512  0     16     1008  X
		 * 512  0    0    512   16     1008  X
		 * 0    512  0    512   16     1008  X
		 * 1024 1024 1024 0     2048   1024  O
		 *
		 * We could probably detect this based on either the DRB
		 * matching, which was the case for the swizzling required in
		 * the table above, or from the 1-ch value being less than
		 * the minimum size of a rank.
		 */
		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
		} else {
			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
			swizzle_y = I915_BIT_6_SWIZZLE_9;
		}
	}

	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
			  gen6_force_wake_work);

	if (IS_VALLEYVIEW(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
		__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
		__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_mt_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_mt_put;
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
			__gen6_gt_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put =
			__gen6_gt_force_wake_put;
	}

	switch (INTEL_INFO(dev)->gen) {
	default:
		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
		break;
	case 7:
	case 6:
		if (IS_HASWELL(dev)) {
			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
		} else {
			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
		}

		if (IS_VALLEYVIEW(dev)) {
			dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
			dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
			dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
			dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
		} else {
			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
		}
		break;
	case 5:
		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
		break;
	case 4:
	case 3:
	case 2:
		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
		break;
	}
}