Beispiel #1
0
/**
 * intel_vgpu_emulate_mmio_write - emulate MMIO write
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: write data buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
		void *p_data, unsigned int bytes)
{
	struct intel_gvt *gvt = vgpu->gvt;
	unsigned int offset = 0;
	int ret = -EINVAL;

	if (vgpu->failsafe) {
		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
		return 0;
	}

	mutex_lock(&vgpu->vgpu_lock);

	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

	if (WARN_ON(bytes > 8))
		goto err;

	if (reg_is_gtt(gvt, offset)) {
		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
			goto err;
		if (WARN_ON(bytes != 4 && bytes != 8))
			goto err;
		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
			goto err;

		ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
				p_data, bytes);
		if (ret)
			goto err;
		goto out;
	}

	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
		goto out;
	}

	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
	if (ret < 0)
		goto err;

	intel_gvt_mmio_set_accessed(gvt, offset);
	ret = 0;
	goto out;
err:
	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
		     bytes);
out:
	mutex_unlock(&vgpu->vgpu_lock);
	return ret;
}
Beispiel #2
0
/**
 * intel_vgpu_emulate_mmio_write - emulate MMIO write
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: write data buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                                  void *p_data, unsigned int bytes)
{
    struct intel_gvt *gvt = vgpu->gvt;
    struct intel_gvt_mmio_info *mmio;
    unsigned int offset = 0;
    u32 old_vreg = 0, old_sreg = 0;
    int ret = -EINVAL;

    mutex_lock(&gvt->lock);

    if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
        struct intel_vgpu_guest_page *gp;

        gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
        if (gp) {
            ret = gp->handler(gp, pa, p_data, bytes);
            if (ret) {
                gvt_err("vgpu%d: guest page write error %d, "
                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
                        vgpu->id, ret,
                        gp->gfn, pa, *(u32 *)p_data, bytes);
            }
            mutex_unlock(&gvt->lock);
            return ret;
        }
    }

    offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

    if (WARN_ON(bytes > 8))
        goto err;

    if (reg_is_gtt(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
            goto err;
        if (WARN_ON(bytes != 4 && bytes != 8))
            goto err;
        if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
            goto err;

        ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
                                                p_data, bytes);
        if (ret)
            goto err;
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
        ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
    }

    mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
    if (!mmio && !vgpu->mmio.disable_warn_untrack)
        gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
                vgpu->id, offset, bytes, *(u32 *)p_data);

    if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, bytes)))
            goto err;
    }

    if (mmio) {
        u64 ro_mask = mmio->ro_mask;

        if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
            if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
                goto err;
            if (WARN_ON(mmio->offset != offset))
                goto err;
        }

        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            old_vreg = vgpu_vreg(vgpu, offset);
            old_sreg = vgpu_sreg(vgpu, offset);
        }

        if (!ro_mask) {
            ret = mmio->write(vgpu, offset, p_data, bytes);
        } else {
            /* Protect RO bits like HW */
            u64 data = 0;

            /* all register bits are RO. */
            if (ro_mask == ~(u64)0) {
                gvt_err("vgpu%d: try to write RO reg %x\n",
                        vgpu->id, offset);
                ret = 0;
                goto out;
            }
            /* keep the RO bits in the virtual register */
            memcpy(&data, p_data, bytes);
            data &= ~mmio->ro_mask;
            data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
            ret = mmio->write(vgpu, offset, &data, bytes);
        }

        /* higher 16bits of mode ctl regs are mask bits for change */
        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            u32 mask = vgpu_vreg(vgpu, offset) >> 16;

            vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
                                      | (vgpu_vreg(vgpu, offset) & mask);
            vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
                                      | (vgpu_sreg(vgpu, offset) & mask);
        }
    } else