/** * i915_check_vgpu - detect virtual GPU * @dev_priv: i915 device private * * This function is called at the initialization stage, to detect whether * running on a vGPU. */ void i915_check_vgpu(struct drm_i915_private *dev_priv) { uint64_t magic; uint32_t version; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); if (!IS_HASWELL(dev_priv)) return; magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); if (magic != VGT_MAGIC) return; version = INTEL_VGT_IF_VERSION_ENCODE( __raw_i915_read16(dev_priv, vgtif_reg(version_major)), __raw_i915_read16(dev_priv, vgtif_reg(version_minor))); if (version != INTEL_VGT_IF_VERSION) { DRM_INFO("VGT interface version mismatch!\n"); return; } dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); }
void populate_pvinfo_page(struct intel_vgpu *vgpu) { /* setup the ballooning information */ vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1; vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = vgpu_aperture_sz(vgpu); vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = vgpu_hidden_gmadr_base(vgpu); vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = vgpu_hidden_sz(vgpu); vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n", vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu)); gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu)); WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); }
/** * intel_vgpu_decode_cursor_plane - Decode sprite plane * @vgpu: input vgpu * @plane: cursor plane to save decoded info * This function is called for decoding plane * * Returns: * 0 on success, non-zero if failed. */ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, struct intel_vgpu_cursor_plane_format *plane) { u32 val, mode, index; u32 alpha_plane, alpha_force; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int pipe; pipe = get_active_pipe(vgpu); if (pipe >= I915_MAX_PIPES) return -ENODEV; val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); mode = val & MCURSOR_MODE; plane->enabled = (mode != MCURSOR_MODE_DISABLE); if (!plane->enabled) return -ENODEV; index = cursor_mode_to_drm(mode); if (!cursor_pixel_formats[index].bpp) { gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode); return -EINVAL; } plane->mode = mode; plane->bpp = cursor_pixel_formats[index].bpp; plane->drm_format = cursor_pixel_formats[index].drm_format; plane->width = cursor_pixel_formats[index].width; plane->height = cursor_pixel_formats[index].height; alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >> _CURSOR_ALPHA_PLANE_SHIFT; alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >> _CURSOR_ALPHA_FORCE_SHIFT; if (alpha_plane || alpha_force) gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", plane->base); return -EINVAL; } val = vgpu_vreg_t(vgpu, CURPOS(pipe)); plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT; plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)); plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)); return 0; }
/** * intel_vgt_balloon - balloon out reserved graphics address trunks * @dev_priv: i915 device private data * * This function is called at the initialization stage, to balloon out the * graphic address space allocated to other vGPUs, by marking these spaces as * reserved. The ballooning related knowledge(starting address and size of * the mappable/unmappable graphic memory) is described in the vgt_if structure * in a reserved mmio range. * * To give an example, the drawing below depicts one typical scenario after * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned * out each for the mappable and the non-mappable part. From the vGPU1 point of * view, the total size is the same as the physical one, with the start address * of its graphic space being zero. Yet there are some portions ballooned out( * the shadow part, which are marked as reserved by drm allocator). From the * host point of view, the graphic address space is partitioned by multiple * vGPUs in different VMs. :: * * vGPU1 view Host view * 0 ------> +-----------+ +-----------+ * ^ |###########| | vGPU3 | * | |###########| +-----------+ * | |###########| | vGPU2 | * | +-----------+ +-----------+ * mappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ * | |###########| | | * v |###########| | Host | * +=======+===========+ +===========+ * ^ |###########| | vGPU3 | * | |###########| +-----------+ * | |###########| | vGPU2 | * | +-----------+ +-----------+ * unmappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ * | |###########| | | * | |###########| | Host | * v |###########| | | * total GM size ------> +-----------+ +-----------+ * * Returns: * zero on success, non-zero if configuration invalid or ballooning failed */ int intel_vgt_balloon(struct drm_i915_private *dev_priv) { struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; unsigned long mappable_base, mappable_size, mappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end; int ret; if (!intel_vgpu_active(dev_priv)) return 0; mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size)); mappable_end = mappable_base + mappable_size; unmappable_end = unmappable_base + unmappable_size; DRM_INFO("VGT ballooning configuration:\n"); DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n", mappable_base, mappable_size / 1024); DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n", unmappable_base, unmappable_size / 1024); if (mappable_base < ggtt->base.start || mappable_end > ggtt->mappable_end || unmappable_base < ggtt->mappable_end || unmappable_end > ggtt_end) { DRM_ERROR("Invalid ballooning configuration!\n"); return -EINVAL; } /* Unmappable graphic memory ballooning */ if (unmappable_base > ggtt->mappable_end) { ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[2], ggtt->mappable_end, unmappable_base); if (ret) goto err; } /* * No need to partition out the last physical page, * because it is reserved to the guard page. */ if (unmappable_end < ggtt_end - PAGE_SIZE) { ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[3], unmappable_end, ggtt_end - PAGE_SIZE); if (ret) goto err; } /* Mappable graphic memory ballooning */ if (mappable_base > ggtt->base.start) { ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[0], ggtt->base.start, mappable_base); if (ret) goto err; } if (mappable_end < ggtt->mappable_end) { ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[1], mappable_end, ggtt->mappable_end); if (ret) goto err; } DRM_INFO("VGT balloon successfully\n"); return 0; err: DRM_ERROR("VGT balloon fail\n"); intel_vgt_deballoon(dev_priv); return ret; }