/** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU * */ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; void *mmio = gvt->firmware.mmio; if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); memcpy(vgpu->mmio.sreg, mmio, info->mmio_size); vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) /* only reset the engine related, so starting with 0x44200 * interrupt include DE,display mmio related will not be * touched */ memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } }
/** * intel_vgpu_decode_cursor_plane - Decode sprite plane * @vgpu: input vgpu * @plane: cursor plane to save decoded info * This function is called for decoding plane * * Returns: * 0 on success, non-zero if failed. */ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, struct intel_vgpu_cursor_plane_format *plane) { u32 val, mode, index; u32 alpha_plane, alpha_force; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int pipe; pipe = get_active_pipe(vgpu); if (pipe >= I915_MAX_PIPES) return -ENODEV; val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); mode = val & MCURSOR_MODE; plane->enabled = (mode != MCURSOR_MODE_DISABLE); if (!plane->enabled) return -ENODEV; index = cursor_mode_to_drm(mode); if (!cursor_pixel_formats[index].bpp) { gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode); return -EINVAL; } plane->mode = mode; plane->bpp = cursor_pixel_formats[index].bpp; plane->drm_format = cursor_pixel_formats[index].drm_format; plane->width = cursor_pixel_formats[index].width; plane->height = cursor_pixel_formats[index].height; alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >> _CURSOR_ALPHA_PLANE_SHIFT; alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >> _CURSOR_ALPHA_FORCE_SHIFT; if (alpha_plane || alpha_force) gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", plane->base); return -EINVAL; } val = vgpu_vreg_t(vgpu, CURPOS(pipe)); plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT; plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)); plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)); return 0; }
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, u32 tiled, int stride_mask, int bpp) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride = stride_reg; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { switch (tiled) { case PLANE_CTL_TILED_LINEAR: stride = stride_reg * 64; break; case PLANE_CTL_TILED_X: stride = stride_reg * 512; break; case PLANE_CTL_TILED_Y: stride = stride_reg * 128; break; case PLANE_CTL_TILED_YF: if (bpp == 8) stride = stride_reg * 64; else if (bpp == 16 || bpp == 32 || bpp == 64) stride = stride_reg * 128; else gvt_dbg_core("skl: unsupported bpp:%d\n", bpp); break; default: gvt_dbg_core("skl: unsupported tile format:%x\n", tiled); } } return stride; }
/** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU * @dmlr: whether this is device model level reset */ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; void *mmio = gvt->firmware.mmio; if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); memcpy(vgpu->mmio.sreg, mmio, info->mmio_size); vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; if (IS_BROXTON(vgpu->gvt->dev_priv)) { vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ~PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= ~PHY_POWER_GOOD; vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30); vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= ~BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= ~BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= ~BXT_PHY_LANE_ENABLED; vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; } } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) /* only reset the engine related, so starting with 0x44200 * interrupt include DE,display mmio related will not be * touched */ memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } }
void populate_pvinfo_page(struct intel_vgpu *vgpu) { /* setup the ballooning information */ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = vgpu_aperture_sz(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = vgpu_hidden_gmadr_base(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = vgpu_hidden_sz(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n", vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu)); gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu)); WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); }
/** * intel_vgpu_decode_sprite_plane - Decode sprite plane * @vgpu: input vgpu * @plane: sprite plane to save decoded info * This function is called for decoding plane * * Returns: * 0 on success, non-zero if failed. */ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, struct intel_vgpu_sprite_plane_format *plane) { u32 val, fmt; u32 color_order, yuv_order; int drm_format; int pipe; pipe = get_active_pipe(vgpu); if (pipe >= I915_MAX_PIPES) return -ENODEV; val = vgpu_vreg_t(vgpu, SPRCTL(pipe)); plane->enabled = !!(val & SPRITE_ENABLE); if (!plane->enabled) return -ENODEV; plane->tiled = !!(val & SPRITE_TILED); color_order = !!(val & SPRITE_RGB_ORDER_RGBX); yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >> _SPRITE_YUV_ORDER_SHIFT; fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; if (!sprite_pixel_formats[fmt].bpp) { gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt); return -EINVAL; } plane->hw_format = fmt; plane->bpp = sprite_pixel_formats[fmt].bpp; drm_format = sprite_pixel_formats[fmt].drm_format; /* Order of RGB values in an RGBxxx buffer may be ordered RGB or * BGR depending on the state of the color_order field */ if (!color_order) { if (drm_format == DRM_FORMAT_XRGB2101010) drm_format = DRM_FORMAT_XBGR2101010; else if (drm_format == DRM_FORMAT_XRGB8888) drm_format = DRM_FORMAT_XBGR8888; } if (drm_format == DRM_FORMAT_YUV422) { switch (yuv_order) { case 0: drm_format = DRM_FORMAT_YUYV; break; case 1: drm_format = DRM_FORMAT_UYVY; break; case 2: drm_format = DRM_FORMAT_YVYU; break; case 3: drm_format = DRM_FORMAT_VYUY; break; default: /* yuv_order has only 2 bits */ break; } } plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", plane->base); return -EINVAL; } plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) & _SPRITE_STRIDE_MASK; val = vgpu_vreg_t(vgpu, SPRSIZE(pipe)); plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> _SPRITE_SIZE_HEIGHT_SHIFT; plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >> _SPRITE_SIZE_WIDTH_SHIFT; plane->height += 1; /* raw height is one minus the real value */ plane->width += 1; /* raw width is one minus the real value */ val = vgpu_vreg_t(vgpu, SPRPOS(pipe)); plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; val = vgpu_vreg_t(vgpu, SPROFFSET(pipe)); plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> _SPRITE_OFFSET_START_X_SHIFT; plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> _SPRITE_OFFSET_START_Y_SHIFT; return 0; }
/** * intel_vgpu_decode_primary_plane - Decode primary plane * @vgpu: input vgpu * @plane: primary plane to save decoded info * This function is called for decoding plane * * Returns: * 0 on success, non-zero if failed. */ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, struct intel_vgpu_primary_plane_format *plane) { u32 val, fmt; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int pipe; pipe = get_active_pipe(vgpu); if (pipe >= I915_MAX_PIPES) return -ENODEV; val = vgpu_vreg_t(vgpu, DSPCNTR(pipe)); plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); if (!plane->enabled) return -ENODEV; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { plane->tiled = val & PLANE_CTL_TILED_MASK; fmt = skl_format_to_drm( val & PLANE_CTL_FORMAT_MASK, val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ALPHA_MASK, val & PLANE_CTL_YUV422_ORDER_MASK); if (fmt >= ARRAY_SIZE(skl_pixel_formats)) { gvt_vgpu_err("Out-of-bounds pixel format index\n"); return -EINVAL; } plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { plane->tiled = val & DISPPLANE_TILED; fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; } if (!plane->bpp) { gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt); return -EINVAL; } plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", plane->base); return -EINVAL; } plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) ? (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> _PIPE_H_SRCSZ_SHIFT; plane->width += 1; plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; plane->height += 1; /* raw height is one minus the real value */ val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe)); plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> _PRI_PLANE_X_OFF_SHIFT; plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> _PRI_PLANE_Y_OFF_SHIFT; return 0; }