Beispiel #1
0
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
	/* setup the ballooning information */
	vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
	vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
	vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
	vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
	vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
		vgpu_aperture_gmadr_base(vgpu);
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
		vgpu_aperture_sz(vgpu);
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
		vgpu_hidden_gmadr_base(vgpu);
	vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
		vgpu_hidden_sz(vgpu);

	vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);

	gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
	gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
		vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
	gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
		vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
	gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));

	WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
Beispiel #2
0
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;

	if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
		return 0;

	if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
		return 0;
	return 1;
}
Beispiel #3
0
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
			SDE_PORTC_HOTPLUG_CPT |
			SDE_PORTD_HOTPLUG_CPT);

	if (IS_SKYLAKE(dev_priv))
		vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
				SDE_PORTE_HOTPLUG_SPT);

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;

	if (IS_SKYLAKE(dev_priv) &&
			intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
	}

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
		if (IS_BROADWELL(dev_priv))
			vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
				GEN8_PORT_DP_A_HOTPLUG;
		else
			vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
	}
}
Beispiel #4
0
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
				    int type, unsigned int resolution)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);

	if (WARN_ON(resolution >= GVT_EDID_NUM))
		return -EINVAL;

	port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
	if (!port->edid)
		return -ENOMEM;

	port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
	if (!port->dpcd) {
		kfree(port->edid);
		return -ENOMEM;
	}

	memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
			EDID_SIZE);
	port->edid->data_valid = true;

	memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
	port->dpcd->data_valid = true;
	port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
	port->type = type;

	emulate_monitor_status_change(vgpu);
	vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
	return 0;
}
Beispiel #5
0
static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;

	if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
		return -EINVAL;

	if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
		return 1;

	if (edp_pipe_is_enabled(vgpu) &&
			get_edp_pipe(vgpu) == pipe)
		return 1;
	return 0;
}
Beispiel #6
0
static int get_edp_pipe(struct intel_vgpu *vgpu)
{
	u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
	int pipe = -1;

	switch (data & TRANS_DDI_EDP_INPUT_MASK) {
	case TRANS_DDI_EDP_INPUT_A_ON:
	case TRANS_DDI_EDP_INPUT_A_ONOFF:
		pipe = PIPE_A;
		break;
	case TRANS_DDI_EDP_INPUT_B_ONOFF:
		pipe = PIPE_B;
		break;
	case TRANS_DDI_EDP_INPUT_C_ONOFF:
		pipe = PIPE_C;
		break;
	}
	return pipe;
}
Beispiel #7
0
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
			SDE_PORTC_HOTPLUG_CPT |
			SDE_PORTD_HOTPLUG_CPT);

	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
		vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
				SDE_PORTE_HOTPLUG_SPT);
		vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
				SKL_FUSE_DOWNLOAD_STATUS |
				SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
				SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
				SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
		vgpu_vreg(vgpu, LCPLL1_CTL) |=
				LCPLL_PLL_ENABLE |
				LCPLL_PLL_LOCK;
		vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;

	}

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(PORT_B << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		if (IS_BROADWELL(dev_priv)) {
			vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
				~PORT_CLK_SEL_MASK;
			vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
				PORT_CLK_SEL_LCPLL_810;
		}
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
	}

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(PORT_C << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		if (IS_BROADWELL(dev_priv)) {
			vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
				~PORT_CLK_SEL_MASK;
			vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
				PORT_CLK_SEL_LCPLL_810;
		}
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
	}

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
			~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
			TRANS_DDI_PORT_MASK);
		vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
			(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
			(PORT_D << TRANS_DDI_PORT_SHIFT) |
			TRANS_DDI_FUNC_ENABLE);
		if (IS_BROADWELL(dev_priv)) {
			vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
				~PORT_CLK_SEL_MASK;
			vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
				PORT_CLK_SEL_LCPLL_810;
		}
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
		vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
	}

	if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
			intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
	}

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
		if (IS_BROADWELL(dev_priv))
			vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
				GEN8_PORT_DP_A_HOTPLUG;
		else
			vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;

		vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
	}

	/* Clear host CRT status, so guest couldn't detect this host CRT. */
	if (IS_BROADWELL(dev_priv))
		vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
}
Beispiel #8
0
/**
 * intel_vgpu_emulate_mmio_write - emulate MMIO write
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: write data buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                                  void *p_data, unsigned int bytes)
{
    struct intel_gvt *gvt = vgpu->gvt;
    struct intel_gvt_mmio_info *mmio;
    unsigned int offset = 0;
    u32 old_vreg = 0, old_sreg = 0;
    int ret = -EINVAL;

    mutex_lock(&gvt->lock);

    if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
        struct intel_vgpu_guest_page *gp;

        gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
        if (gp) {
            ret = gp->handler(gp, pa, p_data, bytes);
            if (ret) {
                gvt_err("vgpu%d: guest page write error %d, "
                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
                        vgpu->id, ret,
                        gp->gfn, pa, *(u32 *)p_data, bytes);
            }
            mutex_unlock(&gvt->lock);
            return ret;
        }
    }

    offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

    if (WARN_ON(bytes > 8))
        goto err;

    if (reg_is_gtt(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
            goto err;
        if (WARN_ON(bytes != 4 && bytes != 8))
            goto err;
        if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
            goto err;

        ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
                                                p_data, bytes);
        if (ret)
            goto err;
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
        ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
    }

    mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
    if (!mmio && !vgpu->mmio.disable_warn_untrack)
        gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
                vgpu->id, offset, bytes, *(u32 *)p_data);

    if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, bytes)))
            goto err;
    }

    if (mmio) {
        u64 ro_mask = mmio->ro_mask;

        if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
            if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
                goto err;
            if (WARN_ON(mmio->offset != offset))
                goto err;
        }

        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            old_vreg = vgpu_vreg(vgpu, offset);
            old_sreg = vgpu_sreg(vgpu, offset);
        }

        if (!ro_mask) {
            ret = mmio->write(vgpu, offset, p_data, bytes);
        } else {
            /* Protect RO bits like HW */
            u64 data = 0;

            /* all register bits are RO. */
            if (ro_mask == ~(u64)0) {
                gvt_err("vgpu%d: try to write RO reg %x\n",
                        vgpu->id, offset);
                ret = 0;
                goto out;
            }
            /* keep the RO bits in the virtual register */
            memcpy(&data, p_data, bytes);
            data &= ~mmio->ro_mask;
            data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
            ret = mmio->write(vgpu, offset, &data, bytes);
        }

        /* higher 16bits of mode ctl regs are mask bits for change */
        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            u32 mask = vgpu_vreg(vgpu, offset) >> 16;

            vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
                                      | (vgpu_vreg(vgpu, offset) & mask);
            vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
                                      | (vgpu_sreg(vgpu, offset) & mask);
        }
    } else