Ejemplo n.º 1
0
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct intel_fbc *fbc = &dev_priv->fbc;
	bool enable_by_default = IS_BROADWELL(dev_priv);

	if (intel_vgpu_active(dev_priv->dev)) {
		fbc->no_fbc_reason = "VGPU is active";
		return false;
	}

	if (i915.enable_fbc < 0 && !enable_by_default) {
		fbc->no_fbc_reason = "disabled per chip default";
		return false;
	}

	if (!i915.enable_fbc) {
		fbc->no_fbc_reason = "disabled per module param";
		return false;
	}

	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
		fbc->no_fbc_reason = "no enabled pipes can have FBC";
		return false;
	}

	if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
		fbc->no_fbc_reason = "no enabled planes can have FBC";
		return false;
	}

	return true;
}
Ejemplo n.º 2
0
int i915_gem_context_init(struct drm_i915_private *dev_priv)
{
	struct i915_gem_context *ctx;

	/* Init should only be called once per module load. Eventually the
	 * restriction on the context_disabled check can be loosened. */
	if (WARN_ON(dev_priv->kernel_context))
		return 0;

	if (intel_vgpu_active(dev_priv) &&
	    HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
		if (!i915.enable_execlists) {
			DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
			return -EINVAL;
		}
	}

	/* Using the simple ida interface, the max is limited by sizeof(int) */
	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
	ida_init(&dev_priv->context_hw_ida);

	if (i915.enable_execlists) {
		/* NB: intentionally left blank. We will allocate our own
		 * backing objects as we need them, thank you very much */
		dev_priv->hw_context_size = 0;
	} else if (HAS_HW_CONTEXTS(dev_priv)) {
		dev_priv->hw_context_size =
			round_up(get_context_size(dev_priv),
				 I915_GTT_PAGE_SIZE);
		if (dev_priv->hw_context_size > (1<<20)) {
			DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
					 dev_priv->hw_context_size);
			dev_priv->hw_context_size = 0;
		}
	}

	ctx = i915_gem_create_context(dev_priv, NULL);
	if (IS_ERR(ctx)) {
		DRM_ERROR("Failed to create default global context (error %ld)\n",
			  PTR_ERR(ctx));
		return PTR_ERR(ctx);
	}

	/* For easy recognisablity, we want the kernel context to be 0 and then
	 * all user contexts will have non-zero hw_id.
	 */
	GEM_BUG_ON(ctx->hw_id);

	i915_gem_context_clear_bannable(ctx);
	ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
	dev_priv->kernel_context = ctx;

	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));

	DRM_DEBUG_DRIVER("%s context support initialized\n",
			i915.enable_execlists ? "LR" :
			dev_priv->hw_context_size ? "HW" : "fake");
	return 0;
}
Ejemplo n.º 3
0
/**
 * intel_vgt_deballoon - deballoon reserved graphics address trunks
 * @dev_priv: i915 device private data
 *
 * This function is called to deallocate the ballooned-out graphic memory, when
 * driver is unloaded or when ballooning fails.
 */
void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
{
	int i;

	if (!intel_vgpu_active(dev_priv))
		return;

	DRM_DEBUG("VGT deballoon.\n");

	for (i = 0; i < 4; i++) {
		if (bl_info.space[i].allocated)
			drm_mm_remove_node(&bl_info.space[i]);
	}

	memset(&bl_info, 0, sizeof(bl_info));
}
Ejemplo n.º 4
0
/**
 * intel_gvt_sanitize_options - sanitize GVT related options
 * @dev_priv: drm i915 private data
 *
 * This function is called at the i915 options sanitize stage.
 */
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
	if (!i915_modparams.enable_gvt)
		return;

	if (intel_vgpu_active(dev_priv)) {
		DRM_INFO("GVT-g is disabled for guest\n");
		goto bail;
	}

	if (!is_supported_device(dev_priv)) {
		DRM_INFO("Unsupported device. GVT-g is disabled\n");
		goto bail;
	}

	return;
bail:
	i915_modparams.enable_gvt = 0;
}
Ejemplo n.º 5
0
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

	if (intel_vgpu_active(dev_priv)) {
		fbc->no_fbc_reason = "VGPU is active";
		return false;
	}

	if (!i915_modparams.enable_fbc) {
		fbc->no_fbc_reason = "disabled per module param or by default";
		return false;
	}

	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

	return true;
}
Ejemplo n.º 6
0
/**
 * intel_vgt_balloon - balloon out reserved graphics address trunks
 * @dev_priv: i915 device private data
 *
 * This function is called at the initialization stage, to balloon out the
 * graphic address space allocated to other vGPUs, by marking these spaces as
 * reserved. The ballooning related knowledge(starting address and size of
 * the mappable/unmappable graphic memory) is described in the vgt_if structure
 * in a reserved mmio range.
 *
 * To give an example, the drawing below depicts one typical scenario after
 * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
 * out each for the mappable and the non-mappable part. From the vGPU1 point of
 * view, the total size is the same as the physical one, with the start address
 * of its graphic space being zero. Yet there are some portions ballooned out(
 * the shadow part, which are marked as reserved by drm allocator). From the
 * host point of view, the graphic address space is partitioned by multiple
 * vGPUs in different VMs. ::
 *
 *                         vGPU1 view         Host view
 *              0 ------> +-----------+     +-----------+
 *                ^       |###########|     |   vGPU3   |
 *                |       |###########|     +-----------+
 *                |       |###########|     |   vGPU2   |
 *                |       +-----------+     +-----------+
 *         mappable GM    | available | ==> |   vGPU1   |
 *                |       +-----------+     +-----------+
 *                |       |###########|     |           |
 *                v       |###########|     |   Host    |
 *                +=======+===========+     +===========+
 *                ^       |###########|     |   vGPU3   |
 *                |       |###########|     +-----------+
 *                |       |###########|     |   vGPU2   |
 *                |       +-----------+     +-----------+
 *       unmappable GM    | available | ==> |   vGPU1   |
 *                |       +-----------+     +-----------+
 *                |       |###########|     |           |
 *                |       |###########|     |   Host    |
 *                v       |###########|     |           |
 *  total GM size ------> +-----------+     +-----------+
 *
 * Returns:
 * zero on success, non-zero if configuration invalid or ballooning failed
 */
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;

	unsigned long mappable_base, mappable_size, mappable_end;
	unsigned long unmappable_base, unmappable_size, unmappable_end;
	int ret;

	if (!intel_vgpu_active(dev_priv))
		return 0;

	mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
	mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
	unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
	unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));

	mappable_end = mappable_base + mappable_size;
	unmappable_end = unmappable_base + unmappable_size;

	DRM_INFO("VGT ballooning configuration:\n");
	DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
		 mappable_base, mappable_size / 1024);
	DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
		 unmappable_base, unmappable_size / 1024);

	if (mappable_base < ggtt->base.start ||
	    mappable_end > ggtt->mappable_end ||
	    unmappable_base < ggtt->mappable_end ||
	    unmappable_end > ggtt_end) {
		DRM_ERROR("Invalid ballooning configuration!\n");
		return -EINVAL;
	}

	/* Unmappable graphic memory ballooning */
	if (unmappable_base > ggtt->mappable_end) {
		ret = vgt_balloon_space(&ggtt->base.mm,
					&bl_info.space[2],
					ggtt->mappable_end,
					unmappable_base);

		if (ret)
			goto err;
	}

	/*
	 * No need to partition out the last physical page,
	 * because it is reserved to the guard page.
	 */
	if (unmappable_end < ggtt_end - PAGE_SIZE) {
		ret = vgt_balloon_space(&ggtt->base.mm,
					&bl_info.space[3],
					unmappable_end,
					ggtt_end - PAGE_SIZE);
		if (ret)
			goto err;
	}

	/* Mappable graphic memory ballooning */
	if (mappable_base > ggtt->base.start) {
		ret = vgt_balloon_space(&ggtt->base.mm,
					&bl_info.space[0],
					ggtt->base.start, mappable_base);

		if (ret)
			goto err;
	}

	if (mappable_end < ggtt->mappable_end) {
		ret = vgt_balloon_space(&ggtt->base.mm,
					&bl_info.space[1],
					mappable_end,
					ggtt->mappable_end);

		if (ret)
			goto err;
	}

	DRM_INFO("VGT balloon successfully\n");
	return 0;

err:
	DRM_ERROR("VGT balloon fail\n");
	intel_vgt_deballoon(dev_priv);
	return ret;
}
Ejemplo n.º 7
0
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
	resource_size_t reserved_base, stolen_top;
	resource_size_t reserved_total, reserved_size;

	mutex_init(&dev_priv->mm.stolen_lock);

	if (intel_vgpu_active(dev_priv)) {
		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
		return 0;
	}

	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
		DRM_INFO("DMAR active, disabling use of stolen memory\n");
		return 0;
	}

	if (resource_size(&intel_graphics_stolen_res) == 0)
		return 0;

	dev_priv->dsm = intel_graphics_stolen_res;

	if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
		return 0;

	GEM_BUG_ON(dev_priv->dsm.start == 0);
	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);

	stolen_top = dev_priv->dsm.end + 1;
	reserved_base = stolen_top;
	reserved_size = 0;

	switch (INTEL_GEN(dev_priv)) {
	case 2:
	case 3:
		break;
	case 4:
		if (!IS_G4X(dev_priv))
			break;
		/* fall through */
	case 5:
		g4x_get_stolen_reserved(dev_priv,
					&reserved_base, &reserved_size);
		break;
	case 6:
		gen6_get_stolen_reserved(dev_priv,
					 &reserved_base, &reserved_size);
		break;
	case 7:
		if (IS_VALLEYVIEW(dev_priv))
			vlv_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		else
			gen7_get_stolen_reserved(dev_priv,
						 &reserved_base, &reserved_size);
		break;
	case 8:
	case 9:
	case 10:
		if (IS_LP(dev_priv))
			chv_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		else
			bdw_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		break;
	case 11:
	default:
		icl_get_stolen_reserved(dev_priv, &reserved_base,
					&reserved_size);
		break;
	}

	/*
	 * Our expectation is that the reserved space is at the top of the
	 * stolen region and *never* at the bottom. If we see !reserved_base,
	 * it likely means we failed to read the registers correctly.
	 */
	if (!reserved_base) {
		DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
			  &reserved_base, &reserved_size);
		reserved_base = stolen_top;
		reserved_size = 0;
	}

	dev_priv->dsm_reserved =
		(struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);

	if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
			  &dev_priv->dsm_reserved, &dev_priv->dsm);
		return 0;
	}

	/* It is possible for the reserved area to end before the end of stolen
	 * memory, so just consider the start. */
	reserved_total = stolen_top - reserved_base;

	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
			 (u64)resource_size(&dev_priv->dsm) >> 10,
			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);

	dev_priv->stolen_usable_size =
		resource_size(&dev_priv->dsm) - reserved_total;

	/* Basic memrange allocator for stolen space. */
	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);

	return 0;
}