Exemplo n.º 1
0
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
	/* setup the ballooning information */
	vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
	vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
	vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
	vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
	vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
		vgpu_aperture_gmadr_base(vgpu);
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
		vgpu_aperture_sz(vgpu);
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
		vgpu_hidden_gmadr_base(vgpu);
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
		vgpu_hidden_sz(vgpu);

	vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);

	gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
	gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
		vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
	gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
		vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
	gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));

	WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
Exemplo n.º 2
0
/**
 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
 * @vgpu: virtual GPU
 * @dmlr: vGPU Device Model Level Reset or GT Reset
 * @engine_mask: engines to reset for GT reset
 *
 * This function is called when user wants to reset a virtual GPU through
 * device model reset or GT reset. The caller should hold the gvt lock.
 *
 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
 * the whole vGPU to default state as when it is created. This vGPU function
 * is required both for functionary and security concerns.The ultimate goal
 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
 * assign a vGPU to a virtual machine we must isse such reset first.
 *
 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
 * the reset request. Guest driver can issue a GT reset by programming the
 * virtual GDRST register to reset specific virtual GPU engine or all
 * engines.
 *
 * The parameter dev_level is to identify if we will do DMLR or GT reset.
 * The parameter engine_mask is to specific the engines that need to be
 * resetted. If value ALL_ENGINES is given for engine_mask, it means
 * the caller requests a full GT reset that we will reset all virtual
 * GPU engines. For FLR, engine_mask is ignored.
 */
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
				 unsigned int engine_mask)
{
	struct intel_gvt *gvt = vgpu->gvt;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;

	gvt_dbg_core("------------------------------------------\n");
	gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
		     vgpu->id, dmlr, engine_mask);

	vgpu->resetting_eng = resetting_eng;

	intel_vgpu_stop_schedule(vgpu);
	/*
	 * The current_vgpu will set to NULL after stopping the
	 * scheduler when the reset is triggered by current vgpu.
	 */
	if (scheduler->current_vgpu == NULL) {
		mutex_unlock(&gvt->lock);
		intel_gvt_wait_vgpu_idle(vgpu);
		mutex_lock(&gvt->lock);
	}

	intel_vgpu_reset_submission(vgpu, resetting_eng);
	/* full GPU reset or device model level reset */
	if (engine_mask == ALL_ENGINES || dmlr) {
		intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
		/*fence will not be reset during virtual reset */
		if (dmlr) {
			intel_vgpu_reset_gtt(vgpu);
			intel_vgpu_reset_resource(vgpu);
		}

		intel_vgpu_reset_mmio(vgpu, dmlr);
		populate_pvinfo_page(vgpu);
		intel_vgpu_reset_display(vgpu);

		if (dmlr) {
			intel_vgpu_reset_cfg_space(vgpu);
			/* only reset the failsafe mode when dmlr reset */
			vgpu->failsafe = false;
			vgpu->pv_notified = false;
		}
	}

	vgpu->resetting_eng = 0;
	gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
	gvt_dbg_core("------------------------------------------\n");
}
Exemplo n.º 3
0
static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
{
	int i;
	unsigned int low_gm_avail, high_gm_avail, fence_avail;
	unsigned int low_gm_min, high_gm_min, fence_min, total_min;

	/* Need to depend on maxium hw resource size but keep on
	 * static config for now.
	 */
	low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
		gvt->gm.vgpu_allocated_low_gm_size;
	high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
		gvt->gm.vgpu_allocated_high_gm_size;
	fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
		gvt->fence.vgpu_allocated_fence_num;

	for (i = 0; i < gvt->num_types; i++) {
		low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
		high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
		fence_min = fence_avail / gvt->types[i].fence;
		total_min = min(min(low_gm_min, high_gm_min), fence_min);
		gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
						   total_min);

		gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
		       i, gvt->types[i].name, gvt->types[i].max_instance,
		       gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
		       gvt->types[i].high_gm_size, gvt->types[i].fence);
	}
}
Exemplo n.º 4
0
/**
 * intel_gvt_init_host - Load MPT modules and detect if we're running in host
 * @gvt: intel gvt device
 *
 * This function is called at the driver loading stage. If failed to find a
 * loadable MPT module or detect currently we're running in a VM, then GVT-g
 * will be disabled
 *
 * Returns:
 * Zero on success, negative error code if failed.
 *
 */
int intel_gvt_init_host(void)
{
	if (intel_gvt_host.initialized)
		return 0;

	/* Xen DOM U */
	if (xen_domain() && !xen_initial_domain())
		return -ENODEV;

	/* Try to load MPT modules for hypervisors */
	if (xen_initial_domain()) {
		/* In Xen dom0 */
		intel_gvt_host.mpt = try_then_request_module(
				symbol_get(xengt_mpt), "xengt");
		intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
	} else {
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
		/* not in Xen. Try KVMGT */
		intel_gvt_host.mpt = try_then_request_module(
				symbol_get(kvmgt_mpt), "kvmgt");
		intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
#endif
	}

	/* Fail to load MPT modules - bail out */
	if (!intel_gvt_host.mpt)
		return -EINVAL;

	gvt_dbg_core("Running with hypervisor %s in host mode\n",
			supported_hypervisors[intel_gvt_host.hypervisor_type]);

	intel_gvt_host.initialized = true;
	return 0;
}
Exemplo n.º 5
0
static int gvt_service_thread(void *data)
{
	struct intel_gvt *gvt = (struct intel_gvt *)data;
	int ret;

	gvt_dbg_core("service thread start\n");

	while (!kthread_should_stop()) {
		ret = wait_event_interruptible(gvt->service_thread_wq,
				kthread_should_stop() || gvt->service_request);

		if (kthread_should_stop())
			break;

		if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
			continue;

		if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
					(void *)&gvt->service_request)) {
			mutex_lock(&gvt->lock);
			intel_gvt_emulate_vblank(gvt);
			mutex_unlock(&gvt->lock);
		}

		if (test_bit(INTEL_GVT_REQUEST_SCHED,
				(void *)&gvt->service_request) ||
			test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
					(void *)&gvt->service_request)) {
			intel_gvt_schedule(gvt);
		}
	}

	return 0;
}
Exemplo n.º 6
0
/**
 * intel_vgpu_decode_cursor_plane - Decode sprite plane
 * @vgpu: input vgpu
 * @plane: cursor plane to save decoded info
 * This function is called for decoding plane
 *
 * Returns:
 * 0 on success, non-zero if failed.
 */
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
	struct intel_vgpu_cursor_plane_format *plane)
{
	u32 val, mode, index;
	u32 alpha_plane, alpha_force;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	int pipe;

	pipe = get_active_pipe(vgpu);
	if (pipe >= I915_MAX_PIPES)
		return -ENODEV;

	val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
	mode = val & MCURSOR_MODE;
	plane->enabled = (mode != MCURSOR_MODE_DISABLE);
	if (!plane->enabled)
		return -ENODEV;

	index = cursor_mode_to_drm(mode);

	if (!cursor_pixel_formats[index].bpp) {
		gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode);
		return -EINVAL;
	}
	plane->mode = mode;
	plane->bpp = cursor_pixel_formats[index].bpp;
	plane->drm_format = cursor_pixel_formats[index].drm_format;
	plane->width = cursor_pixel_formats[index].width;
	plane->height = cursor_pixel_formats[index].height;

	alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >>
				_CURSOR_ALPHA_PLANE_SHIFT;
	alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >>
				_CURSOR_ALPHA_FORCE_SHIFT;
	if (alpha_plane || alpha_force)
		gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
			alpha_plane, alpha_force);

	plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
	if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
		return  -EINVAL;

	plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
	if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
		gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n",
				plane->base);
		return  -EINVAL;
	}

	val = vgpu_vreg_t(vgpu, CURPOS(pipe));
	plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
	plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
	plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
	plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;

	plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
	plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
	return 0;
}
Exemplo n.º 7
0
/**
 * intel_gvt_init_vgpu_types - initialize vGPU type list
 * @gvt : GVT device
 *
 * Initialize vGPU type list based on available resource.
 *
 */
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
	unsigned int num_types;
	unsigned int i, low_avail, high_avail;
	unsigned int min_low;

	/* vGPU type name is defined as GVTg_Vx_y which contains
	 * physical GPU generation type and 'y' means maximum vGPU
	 * instances user can create on one physical GPU for this
	 * type.
	 *
	 * Depend on physical SKU resource, might see vGPU types like
	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
	 * different types of vGPU on same physical GPU depending on
	 * available resource. Each vGPU type will have "avail_instance"
	 * to indicate how many vGPU instance can be created for this
	 * type.
	 *
	 */
	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
	num_types = 4;

	gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
			     GFP_KERNEL);
	if (!gvt->types)
		return -ENOMEM;

	min_low = MB_TO_BYTES(32);
	for (i = 0; i < num_types; ++i) {
		if (low_avail / min_low == 0)
			break;
		gvt->types[i].low_gm_size = min_low;
		gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
		gvt->types[i].fence = 4;
		gvt->types[i].max_instance = min(low_avail / min_low,
						 high_avail / gvt->types[i].high_gm_size);
		gvt->types[i].avail_instance = gvt->types[i].max_instance;

		if (IS_GEN8(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V4_%u",
						gvt->types[i].max_instance);
		else if (IS_GEN9(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V5_%u",
						gvt->types[i].max_instance);

		min_low <<= 1;
		gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
			     i, gvt->types[i].name, gvt->types[i].max_instance,
			     gvt->types[i].avail_instance,
			     gvt->types[i].low_gm_size,
			     gvt->types[i].high_gm_size, gvt->types[i].fence);
	}

	gvt->num_types = i;
	return 0;
}
Exemplo n.º 8
0
/**
 * intel_gvt_init_vgpu_types - initialize vGPU type list
 * @gvt : GVT device
 *
 * Initialize vGPU type list based on available resource.
 *
 */
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
	unsigned int num_types;
	unsigned int i, low_avail, high_avail;
	unsigned int min_low;

	/* vGPU type name is defined as GVTg_Vx_y which contains
	 * physical GPU generation type (e.g V4 as BDW server, V5 as
	 * SKL server).
	 *
	 * Depend on physical SKU resource, might see vGPU types like
	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
	 * different types of vGPU on same physical GPU depending on
	 * available resource. Each vGPU type will have "avail_instance"
	 * to indicate how many vGPU instance can be created for this
	 * type.
	 *
	 */
	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
	num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);

	gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
			     GFP_KERNEL);
	if (!gvt->types)
		return -ENOMEM;

	min_low = MB_TO_BYTES(32);
	for (i = 0; i < num_types; ++i) {
		if (low_avail / vgpu_types[i].low_mm == 0)
			break;

		gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
		gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
		gvt->types[i].fence = vgpu_types[i].fence;
		gvt->types[i].resolution = vgpu_types[i].edid;
		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
						   high_avail / vgpu_types[i].high_mm);

		if (IS_GEN8(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V4_%s",
						vgpu_types[i].name);
		else if (IS_GEN9(gvt->dev_priv))
			sprintf(gvt->types[i].name, "GVTg_V5_%s",
						vgpu_types[i].name);

		gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
			     i, gvt->types[i].name,
			     gvt->types[i].avail_instance,
			     gvt->types[i].low_gm_size,
			     gvt->types[i].high_gm_size, gvt->types[i].fence,
			     vgpu_edid_str(gvt->types[i].resolution));
	}

	gvt->num_types = i;
	return 0;
}
Exemplo n.º 9
0
/**
 * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
 * @vgpu: a vGPU
 * @gpa: guest physical address of opregion
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
	int ret;

	gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);

	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
		gvt_dbg_core("emulate opregion from kernel\n");

		ret = init_vgpu_opregion(vgpu, gpa);
		if (ret)
			return ret;

		ret = map_vgpu_opregion(vgpu, true);
		if (ret)
			return ret;
	}

	return 0;
}
Exemplo n.º 10
0
/**
 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
 * @vgpu: virtual GPU
 * @dmlr: vGPU Device Model Level Reset or GT Reset
 * @engine_mask: engines to reset for GT reset
 *
 * This function is called when user wants to reset a virtual GPU through
 * device model reset or GT reset. The caller should hold the gvt lock.
 *
 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
 * the whole vGPU to default state as when it is created. This vGPU function
 * is required both for functionary and security concerns.The ultimate goal
 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
 * assign a vGPU to a virtual machine we must isse such reset first.
 *
 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
 * the reset request. Guest driver can issue a GT reset by programming the
 * virtual GDRST register to reset specific virtual GPU engine or all
 * engines.
 *
 * The parameter dev_level is to identify if we will do DMLR or GT reset.
 * The parameter engine_mask is to specific the engines that need to be
 * resetted. If value ALL_ENGINES is given for engine_mask, it means
 * the caller requests a full GT reset that we will reset all virtual
 * GPU engines. For FLR, engine_mask is ignored.
 */
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
				 unsigned int engine_mask)
{
	struct intel_gvt *gvt = vgpu->gvt;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;

	gvt_dbg_core("------------------------------------------\n");
	gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
		     vgpu->id, dmlr, engine_mask);
	vgpu->resetting = true;

	intel_vgpu_stop_schedule(vgpu);
	/*
	 * The current_vgpu will set to NULL after stopping the
	 * scheduler when the reset is triggered by current vgpu.
	 */
	if (scheduler->current_vgpu == NULL) {
		mutex_unlock(&gvt->lock);
		intel_gvt_wait_vgpu_idle(vgpu);
		mutex_lock(&gvt->lock);
	}

	intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);

	/* full GPU reset or device model level reset */
	if (engine_mask == ALL_ENGINES || dmlr) {
		intel_vgpu_reset_gtt(vgpu, dmlr);
		intel_vgpu_reset_resource(vgpu);
		intel_vgpu_reset_mmio(vgpu);
		populate_pvinfo_page(vgpu);
		intel_vgpu_reset_display(vgpu);

		if (dmlr)
			intel_vgpu_reset_cfg_space(vgpu);
	}

	vgpu->resetting = false;
	gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
	gvt_dbg_core("------------------------------------------\n");
}
Exemplo n.º 11
0
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
	u32 tiled, int stride_mask, int bpp)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;

	u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
	u32 stride = stride_reg;

	if (IS_SKYLAKE(dev_priv)
		|| IS_KABYLAKE(dev_priv)
		|| IS_BROXTON(dev_priv)) {
		switch (tiled) {
		case PLANE_CTL_TILED_LINEAR:
			stride = stride_reg * 64;
			break;
		case PLANE_CTL_TILED_X:
			stride = stride_reg * 512;
			break;
		case PLANE_CTL_TILED_Y:
			stride = stride_reg * 128;
			break;
		case PLANE_CTL_TILED_YF:
			if (bpp == 8)
				stride = stride_reg * 64;
			else if (bpp == 16 || bpp == 32 || bpp == 64)
				stride = stride_reg * 128;
			else
				gvt_dbg_core("skl: unsupported bpp:%d\n", bpp);
			break;
		default:
			gvt_dbg_core("skl: unsupported tile format:%x\n",
				tiled);
		}
	}

	return stride;
}
Exemplo n.º 12
0
/**
 * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
 * @vgpu: a vGPU
 *
 */
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
	gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);

	if (!vgpu_opregion(vgpu)->va)
		return;

	if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
		map_vgpu_opregion(vgpu, false);
		free_pages((unsigned long)vgpu_opregion(vgpu)->va,
				get_order(INTEL_GVT_OPREGION_SIZE));

		vgpu_opregion(vgpu)->va = NULL;
	}
}
Exemplo n.º 13
0
/**
 * intel_gvt_init_opregion - initialize host opergion related stuffs
 * @gvt: a GVT device
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
int intel_gvt_init_opregion(struct intel_gvt *gvt)
{
	gvt_dbg_core("init host opregion\n");

	pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
			&gvt->opregion.opregion_pa);

	gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
					     INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
	if (!gvt->opregion.opregion_va) {
		gvt_err("fail to map host opregion\n");
		return -EFAULT;
	}
	return 0;
}
Exemplo n.º 14
0
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
		struct intel_vgpu_creation_params *param)
{
	struct intel_vgpu *vgpu;
	int ret;

	gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
			param->handle, param->low_gm_sz, param->high_gm_sz,
			param->fence_sz);

	vgpu = vzalloc(sizeof(*vgpu));
	if (!vgpu)
		return ERR_PTR(-ENOMEM);

	mutex_lock(&gvt->lock);

	ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
	if (ret < 0)
		goto out_free_vgpu;

	vgpu->id = ret;
	vgpu->handle = param->handle;
	vgpu->gvt = gvt;
	bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);

	intel_vgpu_init_cfg_space(vgpu, param->primary);

	ret = intel_vgpu_init_mmio(vgpu);
	if (ret)
		goto out_clean_idr;

	ret = intel_vgpu_alloc_resource(vgpu, param);
	if (ret)
		goto out_clean_vgpu_mmio;

	populate_pvinfo_page(vgpu);

	ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
	if (ret)
		goto out_clean_vgpu_resource;

	ret = intel_vgpu_init_gtt(vgpu);
	if (ret)
		goto out_detach_hypervisor_vgpu;

	ret = intel_vgpu_init_display(vgpu);
	if (ret)
		goto out_clean_gtt;

	ret = intel_vgpu_init_execlist(vgpu);
	if (ret)
		goto out_clean_display;

	ret = intel_vgpu_init_gvt_context(vgpu);
	if (ret)
		goto out_clean_execlist;

	ret = intel_vgpu_init_sched_policy(vgpu);
	if (ret)
		goto out_clean_shadow_ctx;

	vgpu->active = true;
	mutex_unlock(&gvt->lock);

	return vgpu;

out_clean_shadow_ctx:
	intel_vgpu_clean_gvt_context(vgpu);
out_clean_execlist:
	intel_vgpu_clean_execlist(vgpu);
out_clean_display:
	intel_vgpu_clean_display(vgpu);
out_clean_gtt:
	intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
	intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
	intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
	intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
	idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
	vfree(vgpu);
	mutex_unlock(&gvt->lock);
	return ERR_PTR(ret);
}
Exemplo n.º 15
0
/**
 * intel_gvt_init_device - initialize a GVT device
 * @dev_priv: drm i915 private data
 *
 * This function is called at the initialization stage, to initialize
 * necessary GVT components.
 *
 * Returns:
 * Zero on success, negative error code if failed.
 *
 */
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
	struct intel_gvt *gvt;
	struct intel_vgpu *vgpu;
	int ret;

	/*
	 * Cannot initialize GVT device without intel_gvt_host gets
	 * initialized first.
	 */
	if (WARN_ON(!intel_gvt_host.initialized))
		return -EINVAL;

	if (WARN_ON(dev_priv->gvt))
		return -EEXIST;

	gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
	if (!gvt)
		return -ENOMEM;

	gvt_dbg_core("init gvt device\n");

	idr_init(&gvt->vgpu_idr);
	spin_lock_init(&gvt->scheduler.mmio_context_lock);
	mutex_init(&gvt->lock);
	gvt->dev_priv = dev_priv;

	init_device_info(gvt);

	ret = intel_gvt_setup_mmio_info(gvt);
	if (ret)
		goto out_clean_idr;

	ret = intel_gvt_load_firmware(gvt);
	if (ret)
		goto out_clean_mmio_info;

	ret = intel_gvt_init_irq(gvt);
	if (ret)
		goto out_free_firmware;

	ret = intel_gvt_init_gtt(gvt);
	if (ret)
		goto out_clean_irq;

	ret = intel_gvt_init_opregion(gvt);
	if (ret)
		goto out_clean_gtt;

	ret = intel_gvt_init_workload_scheduler(gvt);
	if (ret)
		goto out_clean_opregion;

	ret = intel_gvt_init_sched_policy(gvt);
	if (ret)
		goto out_clean_workload_scheduler;

	ret = intel_gvt_init_cmd_parser(gvt);
	if (ret)
		goto out_clean_sched_policy;

	ret = init_service_thread(gvt);
	if (ret)
		goto out_clean_cmd_parser;

	ret = intel_gvt_init_vgpu_types(gvt);
	if (ret)
		goto out_clean_thread;

	ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
				&intel_gvt_ops);
	if (ret) {
		gvt_err("failed to register gvt-g host device: %d\n", ret);
		goto out_clean_types;
	}

	vgpu = intel_gvt_create_idle_vgpu(gvt);
	if (IS_ERR(vgpu)) {
		ret = PTR_ERR(vgpu);
		gvt_err("failed to create idle vgpu\n");
		goto out_clean_types;
	}
	gvt->idle_vgpu = vgpu;

	gvt_dbg_core("gvt device initialization is done\n");
	dev_priv->gvt = gvt;
	return 0;

out_clean_types:
	intel_gvt_clean_vgpu_types(gvt);
out_clean_thread:
	clean_service_thread(gvt);
out_clean_cmd_parser:
	intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy:
	intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
	intel_gvt_clean_workload_scheduler(gvt);
out_clean_opregion:
	intel_gvt_clean_opregion(gvt);
out_clean_gtt:
	intel_gvt_clean_gtt(gvt);
out_clean_irq:
	intel_gvt_clean_irq(gvt);
out_free_firmware:
	intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
	intel_gvt_clean_mmio_info(gvt);
out_clean_idr:
	idr_destroy(&gvt->vgpu_idr);
	kfree(gvt);
	return ret;
}
Exemplo n.º 16
0
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
		struct intel_vgpu_creation_params *param)
{
	struct intel_vgpu *vgpu;
	int ret;

	gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
			param->handle, param->low_gm_sz, param->high_gm_sz,
			param->fence_sz);

	vgpu = vzalloc(sizeof(*vgpu));
	if (!vgpu)
		return ERR_PTR(-ENOMEM);

	mutex_lock(&gvt->lock);

	ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
		GFP_KERNEL);
	if (ret < 0)
		goto out_free_vgpu;

	vgpu->id = ret;
	vgpu->handle = param->handle;
	vgpu->gvt = gvt;
	vgpu->sched_ctl.weight = param->weight;
	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
	idr_init(&vgpu->object_idr);
	intel_vgpu_init_cfg_space(vgpu, param->primary);

	ret = intel_vgpu_init_mmio(vgpu);
	if (ret)
		goto out_clean_idr;

	ret = intel_vgpu_alloc_resource(vgpu, param);
	if (ret)
		goto out_clean_vgpu_mmio;

	populate_pvinfo_page(vgpu);

	ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
	if (ret)
		goto out_clean_vgpu_resource;

	ret = intel_vgpu_init_gtt(vgpu);
	if (ret)
		goto out_detach_hypervisor_vgpu;

	ret = intel_vgpu_init_opregion(vgpu);
	if (ret)
		goto out_clean_gtt;

	ret = intel_vgpu_init_display(vgpu, param->resolution);
	if (ret)
		goto out_clean_opregion;

	ret = intel_vgpu_setup_submission(vgpu);
	if (ret)
		goto out_clean_display;

	ret = intel_vgpu_init_sched_policy(vgpu);
	if (ret)
		goto out_clean_submission;

	ret = intel_gvt_debugfs_add_vgpu(vgpu);
	if (ret)
		goto out_clean_sched_policy;

	ret = intel_gvt_hypervisor_set_opregion(vgpu);
	if (ret)
		goto out_clean_sched_policy;

	mutex_unlock(&gvt->lock);

	return vgpu;

out_clean_sched_policy:
	intel_vgpu_clean_sched_policy(vgpu);
out_clean_submission:
	intel_vgpu_clean_submission(vgpu);
out_clean_display:
	intel_vgpu_clean_display(vgpu);
out_clean_opregion:
	intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
	intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
	intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
	intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
	intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
	idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
	vfree(vgpu);
	mutex_unlock(&gvt->lock);
	return ERR_PTR(ret);
}