Пример #1
0
void intel_detect_pch(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	device_t pch;

	/*
	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
	 * make graphics device passthrough work easy for VMM, that only
	 * need to expose ISA bridge to let driver know the real hardware
	 * underneath. This is a requirement from virtualization team.
	 */
	pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA);
	if (pch) {
		if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) {
			unsigned short id;
			id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
			dev_priv->pch_id = id;

			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
				WARN_ON(!IS_GEN5(dev));
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_CPT;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
				WARN_ON(!IS_HASWELL(dev));
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
			}
			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
		}
#if 0
		pci_dev_put(pch);
#endif
	}
}
Пример #2
0
void intel_uncore_early_sanitize(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev))
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);

	if (IS_HASWELL(dev) &&
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
		/* The docs do not explain exactly how the calculation can be
		 * made. It is somewhat guessable, but for now, it's always
		 * 128MB.
		 * NB: We can't write IDICR yet because we do not have gt funcs
		 * set up */
		dev_priv->ellc_size = 128;
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
	}

	/* clear out old GT FIFO errors */
	if (IS_GEN6(dev) || IS_GEN7(dev))
		__raw_i915_write32(dev_priv, GTFIFODBG,
				   __raw_i915_read32(dev_priv, GTFIFODBG));

	intel_uncore_forcewake_reset(dev);
}
Пример #3
0
int Calc_CSC_Param(struct CSCCoeff_Matrix *CSC_Matrix,
				struct csc_coeff *CSC_Coeff_t, int devid)
{
    struct drm_intel_csc_params input_csc_params;
    unsigned short Hsw_Vlv_CSC_Coeff[CSC_MAX_COEFF_COUNT];

    memcpy(input_csc_params.m_CSCCoeff, CSC_Matrix->CoeffMatrix,
					sizeof(float) * CSC_MAX_COEFF_COUNT);
    Convert_Coeff_ToBinary(&input_csc_params, Hsw_Vlv_CSC_Coeff, devid);
    Convert_Coeff_ToBSpecFormat(CSC_Coeff_t->csc_coeff,
						Hsw_Vlv_CSC_Coeff, devid);

    if (IS_HASWELL(devid) || IS_BROADWELL(devid)) {
        if (CSC_Matrix->param_valid & CSC_OFFSET_VALID_MASK)
            Convert_CSC_Offset_ToBSpecFormat(CSC_Matrix, CSC_Coeff_t);

        if (CSC_Matrix->param_valid & CSC_MODE_VALID_MASK) {
            if(CSC_Matrix->CSCMode == 0x1)
                CSC_Coeff_t->csc_mode = 0x2; /* CSC is before Gamma */
            else
                CSC_Coeff_t->csc_mode = 0;
        }
        CSC_Coeff_t->param_valid = CSC_Matrix->param_valid;
    }

    return 0;
}
Пример #4
0
static int get_context_size(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;
	u32 reg;

	switch (INTEL_INFO(dev)->gen) {
	case 6:
		reg = I915_READ(CXT_SIZE);
		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
		break;
	case 7:
		reg = I915_READ(GEN7_CXT_SIZE);
#ifdef FREEBSD_WIP
		if (IS_HASWELL(dev))
			ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
		else
#endif
			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
		break;
	default:
		panic("i915_gem_context: Unsupported Intel GPU generation %d",
		    INTEL_INFO(dev)->gen);
	}

	return ret;
}
Пример #5
0
static int get_context_size(struct drm_device *dev)
{
    struct drm_i915_private *dev_priv = dev->dev_private;
    int ret;
    u32 reg;

    switch (INTEL_INFO(dev)->gen) {
    case 6:
        reg = I915_READ(CXT_SIZE);
        ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
        break;
    case 7:
        reg = I915_READ(GEN7_CXT_SIZE);
        if (IS_HASWELL(dev))
            ret = HSW_CXT_TOTAL_SIZE;
        else
            ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
        break;
    case 8:
        ret = GEN8_CXT_TOTAL_SIZE;
        break;
    default:
        BUG();
    }

    return ret;
}
Пример #6
0
static int i915_pm_suspend_late(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = drm_dev->dev_private;

	/*
	 * We have a suspedn ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

	if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
		hsw_enable_pc8(dev_priv);

	pci_disable_device(pdev);
	pci_set_power_state(pdev, PCI_D3hot);

	return 0;
}
Пример #7
0
bool
gputop_perf_initialize(void)
{
    if (intel_dev.device)
	return true;

    drm_fd = open_render_node(&intel_dev);
    if (drm_fd < 0) {
	gputop_log(GPUTOP_LOG_LEVEL_HIGH, "Failed to open render node", -1);
	return false;
    }

    /* NB: eu_count needs to be initialized before declaring counters */
    init_dev_info(drm_fd, intel_dev.device);
    page_size = sysconf(_SC_PAGE_SIZE);

    if (IS_HASWELL(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_compute_basic_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_compute_extended_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_memory_reads_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_memory_writes_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_sampler_balance_counter_query_hsw(&gputop_devinfo);
    } else if (IS_BROADWELL(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_bdw(&gputop_devinfo);
    } else if (IS_CHERRYVIEW(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_chv(&gputop_devinfo);
    } else if (IS_SKYLAKE(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_skl(&gputop_devinfo);
    } else
	assert(0);

    return true;
}
Пример #8
0
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
				enum pipe pipe,
				enum intel_pipe_crc_source *source,
				uint32_t *val)
{
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PF;

	switch (*source) {
	case INTEL_PIPE_CRC_SOURCE_PLANE1:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PLANE2:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PF:
		if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
			hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);

		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
							int fw_engine)
{
	u32 forcewake_ack;

	if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
		forcewake_ack = FORCEWAKE_ACK_HSW;
	else
		forcewake_ack = FORCEWAKE_MT_ACK;

	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");

	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
	/* something from same cacheline, but !FORCEWAKE_MT */
	__raw_posting_read(dev_priv, ECOBUS);

	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");

	/* WaRsForcewakeWaitTC0:ivb,hsw */
	if (INTEL_INFO(dev_priv->dev)->gen < 8)
		__gen6_gt_wait_for_thread_c0(dev_priv);
}
Пример #10
0
/**
 * i915_check_vgpu - detect virtual GPU
 * @dev_priv: i915 device private
 *
 * This function is called at the initialization stage, to detect whether
 * running on a vGPU.
 */
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
	uint64_t magic;
	uint32_t version;

	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);

	if (!IS_HASWELL(dev_priv))
		return;

	magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
	if (magic != VGT_MAGIC)
		return;

	version = INTEL_VGT_IF_VERSION_ENCODE(
		__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
		__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
	if (version != INTEL_VGT_IF_VERSION) {
		DRM_INFO("VGT interface version mismatch!\n");
		return;
	}

	dev_priv->vgpu.active = true;
	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
Пример #11
0
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

	DRM_DEBUG_KMS("Resuming device\n");

	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	disable_rpm_wakeref_asserts(dev_priv);

	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;
	if (intel_uncore_unclaimed_mmio(dev_priv))
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");

	intel_guc_resume(dev);

	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, true);

	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

	intel_runtime_pm_enable_interrupts(dev_priv);

	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
		intel_hpd_init(dev_priv);

	intel_enable_gt_powersave(dev);

	enable_rpm_wakeref_asserts(dev_priv);

	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
}
Пример #12
0
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
	enum intel_pch ret = PCH_NOP;

	/*
	 * In a virtualized passthrough environment we can be in a
	 * setup where the ISA bridge is not able to be passed through.
	 * In this case, a south bridge can be emulated and we have to
	 * make an educated guess as to which PCH is really there.
	 */

	if (IS_GEN5(dev)) {
		ret = PCH_IBX;
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
		ret = PCH_CPT;
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = PCH_LPT;
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
		ret = PCH_SPT;
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
	}

	return ret;
}
Пример #13
0
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
			      size_t *values_cnt)
{
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum intel_display_power_domain power_domain;
	enum intel_pipe_crc_source source;
	u32 val = 0; /* shut up gcc */
	int ret = 0;

	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
		return -EINVAL;
	}

	power_domain = POWER_DOMAIN_PIPE(crtc->index);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
	if (ret != 0)
		goto out;

	if (source) {
		/*
		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
		 * enabled and disabled dynamically based on package C states,
		 * user space can't make reliable use of the CRCs, so let's just
		 * completely disable it.
		 */
		hsw_disable_ips(intel_crtc);
	}

	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
	POSTING_READ(PIPE_CRC_CTL(crtc->index));

	if (!source) {
		if (IS_G4X(dev_priv))
			g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
			hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);

		hsw_enable_ips(intel_crtc);
	}

	pipe_crc->skipped = 0;
	*values_cnt = 5;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
}
Пример #14
0
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
		gen6_force_wake_timer((unsigned long)dev_priv);

	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	if (IS_VALLEYVIEW(dev))
		vlv_force_wake_reset(dev_priv);
	else if (IS_GEN6(dev) || IS_GEN7(dev))
		__gen6_gt_force_wake_reset(dev_priv);

	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
		__gen7_gt_force_wake_mt_reset(dev_priv);

	if (IS_GEN9(dev))
		__gen9_gt_force_wake_mt_reset(dev_priv);

	if (restore) { /* If reset with a user forcewake, try to restore */
		unsigned fw = 0;

		if (IS_VALLEYVIEW(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;
		} else if (IS_GEN9(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;

			if (dev_priv->uncore.fw_blittercount)
				fw |= FORCEWAKE_BLITTER;
		} else {
			if (dev_priv->uncore.forcewake_count)
				fw = FORCEWAKE_ALL;
		}

		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
				GT_FIFO_FREE_ENTRIES_MASK;
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
Пример #15
0
/*
 * Starting with Haswell, DDI port buffers must be programmed with correct
 * values in advance. The buffer values are different for FDI and DP modes,
 * but the HDMI/DVI fields are shared among those. So we program the DDI
 * in either FDI or DP modes only, as HDMI connections will work with both
 * of those
 */
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 reg;
	int i;
	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
	const u32 *ddi_translations_fdi;
	const u32 *ddi_translations_dp;
	const u32 *ddi_translations_edp;
	const u32 *ddi_translations;

	if (IS_BROADWELL(dev)) {
		ddi_translations_fdi = bdw_ddi_translations_fdi;
		ddi_translations_dp = bdw_ddi_translations_dp;
		ddi_translations_edp = bdw_ddi_translations_edp;
	} else if (IS_HASWELL(dev)) {
		ddi_translations_fdi = hsw_ddi_translations_fdi;
		ddi_translations_dp = hsw_ddi_translations_dp;
		ddi_translations_edp = hsw_ddi_translations_dp;
	} else {
		WARN(1, "ddi translation table missing\n");
		ddi_translations_edp = bdw_ddi_translations_dp;
		ddi_translations_fdi = bdw_ddi_translations_fdi;
		ddi_translations_dp = bdw_ddi_translations_dp;
	}

	switch (port) {
	case PORT_A:
		ddi_translations = ddi_translations_edp;
		break;
	case PORT_B:
	case PORT_C:
		ddi_translations = ddi_translations_dp;
		break;
	case PORT_D:
		if (intel_dp_is_edp(dev, PORT_D))
			ddi_translations = ddi_translations_edp;
		else
			ddi_translations = ddi_translations_dp;
		break;
	case PORT_E:
		ddi_translations = ddi_translations_fdi;
		break;
	default:
		BUG();
	}

	for (i = 0, reg = DDI_BUF_TRANS(port);
	     i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
		I915_WRITE(reg, ddi_translations[i]);
		reg += 4;
	}
	/* Entry 9 is for HDMI: */
	for (i = 0; i < 2; i++) {
		I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
		reg += 4;
	}
}
Пример #16
0
static void gen7_fbc_enable(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct drm_framebuffer *fb = crtc->base.primary->fb;
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
	u32 dpfc_ctl;
	int threshold = dev_priv->fbc.threshold;

	dev_priv->fbc.enabled = true;

	dpfc_ctl = 0;
	if (IS_IVYBRIDGE(dev_priv))
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);

	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
		threshold++;

	switch (threshold) {
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;

	if (dev_priv->fbc.false_color)
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;

	if (IS_IVYBRIDGE(dev_priv)) {
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
			   ILK_FBCQ_DIS);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
			   HSW_FBCQ_DIS);
	}

	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

	I915_WRITE(SNB_DPFC_CTL_SA,
		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));

	intel_fbc_nuke(dev_priv);

	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
Пример #17
0
static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
{
	struct drm_device *dev = intel_hdmi_to_dev(hdmi);

	if (IS_G4X(dev))
		return 165000;
	else if (IS_HASWELL(dev))
		return 300000;
	else
		return 225000;
}
Пример #18
0
static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
{
	struct drm_device *dev = intel_hdmi_to_dev(hdmi);

	if (!hdmi->has_hdmi_sink || IS_G4X(dev))
		return 165000;
	else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
		return 300000;
	else
		return 225000;
}
Пример #19
0
static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (IS_VALLEYVIEW(dev)) {
		vlv_force_wake_reset(dev_priv);
	} else if (INTEL_INFO(dev)->gen >= 6) {
		__gen6_gt_force_wake_reset(dev_priv);
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
			__gen6_gt_force_wake_mt_reset(dev_priv);
	}
}
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
    struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
    u32 dpfc_ctl;
    int threshold = dev_priv->fbc.threshold;

    dev_priv->fbc.active = true;

    dpfc_ctl = 0;
    if (IS_IVYBRIDGE(dev_priv))
        dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);

    if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
        threshold++;

    switch (threshold) {
    case 4:
    case 3:
        dpfc_ctl |= DPFC_CTL_LIMIT_4X;
        break;
    case 2:
        dpfc_ctl |= DPFC_CTL_LIMIT_2X;
        break;
    case 1:
        dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        break;
    }

    dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;

    if (dev_priv->fbc.false_color)
        dpfc_ctl |= FBC_CTL_FALSE_COLOR;

    if (IS_IVYBRIDGE(dev_priv)) {
        /* WaFbcAsynchFlipDisableFbcQueue:ivb */
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
                   I915_READ(ILK_DISPLAY_CHICKEN1) |
                   ILK_FBCQ_DIS);
    } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
        /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
        I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
                   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
                   HSW_FBCQ_DIS);
    }

    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

    I915_WRITE(SNB_DPFC_CTL_SA,
               SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
    I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);

    intel_fbc_recompress(dev_priv);
}
Пример #21
0
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
	struct drm_device *dev = intel_hdmi->base.base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t enabled_bits;

	enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;

	WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
	     "HDMI port enabled, expecting disabled\n");
}
Пример #22
0
static int i915_drm_thaw_early(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
		hsw_disable_pc8(dev_priv);

	intel_uncore_early_sanitize(dev, true);
	intel_uncore_sanitize(dev);
	intel_power_domains_init_hw(dev_priv);

	return 0;
}
Пример #23
0
static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
{
	int ret;
	u32 val;

	val = I915_READ(HDCP_KEY_STATUS);
	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
		return 0;

	/*
	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
	 * out of reset. So if Key is not already loaded, its an error state.
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
			return -ENXIO;

	/*
	 * Initiate loading the HDCP key from fuses.
	 *
	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
	 * differ in the key load trigger process from other platforms.
	 */
	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
		mutex_lock(&dev_priv->pcu_lock);
		ret = sandybridge_pcode_write(dev_priv,
					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
		mutex_unlock(&dev_priv->pcu_lock);
		if (ret) {
			DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
			          ret);
			return ret;
		}
	} else {
		I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
	}

	/* Wait for the keys to load (500us) */
	ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
					10, 1, &val);
	if (ret)
		return ret;
	else if (!(val & HDCP_KEY_LOAD_STATUS))
		return -ENXIO;

	/* Send Aksv over to PCH display for use in authentication */
	I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);

	return 0;
}
void intel_uncore_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (IS_VALLEYVIEW(dev)) {
		dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
	} else if (IS_HASWELL(dev)) {
		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
		__gen6_gt_force_wake_mt_get(dev_priv);
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
		__gen6_gt_force_wake_mt_put(dev_priv);
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_mt_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_mt_put;
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->uncore.funcs.force_wake_get =
				__gen6_gt_force_wake_get;
			dev_priv->uncore.funcs.force_wake_put =
				__gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->uncore.funcs.force_wake_get =
			__gen6_gt_force_wake_get;
		dev_priv->uncore.funcs.force_wake_put =
			__gen6_gt_force_wake_put;
	}

	intel_uncore_forcewake_reset(dev);
}
Пример #25
0
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
 * mode and port E for FDI.
 */
void intel_prepare_ddi(struct drm_device *dev)
{
	int port;

	if (IS_HASWELL(dev)) {
		for (port = PORT_A; port < PORT_E; port++)
			intel_prepare_ddi_buffers(dev, port, false);

		/* DDI E is the suggested one to work in FDI mode, so program is as such by
		 * default. It will have to be re-programmed in case a digital DP output
		 * will be detected on it
		 */
		intel_prepare_ddi_buffers(dev, PORT_E, true);
	}
}
Пример #26
0
/*
 * This function implements common functionality of runtime and system
 * suspend sequence.
 */
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
	int ret;

	if (IS_BROXTON(dev_priv))
		ret = bxt_suspend_complete(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		ret = hsw_suspend_complete(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
	else
		ret = 0;

	return ret;
}
Пример #27
0
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
	u32 gt_thread_status_mask;

	if (IS_HASWELL(dev_priv->dev))
		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
	else
		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;

	/* w/a for a sporadic read returning 0 by waiting for the GT
	 * thread to wake up.
	 */
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
		DRM_ERROR("GT thread status wait timed out\n");
}
Пример #28
0
static int i915_drm_resume_early(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (pci_enable_device(dev->pdev)) {
		ret = -EIO;
		goto out;
	}

	pci_set_master(dev->pdev);

	disable_rpm_wakeref_asserts(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, false);
	if (ret)
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);

	intel_uncore_early_sanitize(dev, true);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);

	intel_uncore_sanitize(dev);

	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
		intel_power_domains_init_hw(dev_priv, true);

out:
	dev_priv->suspended_to_idle = false;

	enable_rpm_wakeref_asserts(dev_priv);

	return ret;
}
Пример #29
0
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
			      size_t *values_cnt)
{
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
	enum intel_display_power_domain power_domain;
	enum intel_pipe_crc_source source;
	u32 val = 0; /* shut up gcc */
	int ret = 0;

	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
		return -EINVAL;
	}

	power_domain = POWER_DOMAIN_PIPE(crtc->index);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
	if (ret != 0)
		goto out;

	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
	POSTING_READ(PIPE_CRC_CTL(crtc->index));

	if (!source) {
		if (IS_G4X(dev_priv))
			g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if ((IS_HASWELL(dev_priv) ||
			  IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
			hsw_pipe_A_crc_wa(dev_priv, false);
	}

	pipe_crc->skipped = 0;
	*values_cnt = 5;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
}
Пример #30
0
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
{
	struct drm_crtc *crtc = NULL, *tmp_crtc;
	enum pipe pipe;
	bool pipe_a_only = false;

	if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
		pipe_a_only = true;

	for_each_pipe(dev_priv, pipe) {
		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];

		if (intel_crtc_active(tmp_crtc) &&
		    to_intel_plane_state(tmp_crtc->primary->state)->visible)
			crtc = tmp_crtc;

		if (pipe_a_only)
			break;
	}