Beispiel #1
0
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
		void *p_data, unsigned int bytes, bool read)
{
	struct intel_gvt *gvt = NULL;
	void *pt = NULL;
	unsigned int offset = 0;

	if (!vgpu || !p_data)
		return;

	gvt = vgpu->gvt;
	mutex_lock(&vgpu->vgpu_lock);
	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
	if (reg_is_mmio(gvt, offset)) {
		if (read)
			intel_vgpu_default_mmio_read(vgpu, offset, p_data,
					bytes);
		else
			intel_vgpu_default_mmio_write(vgpu, offset, p_data,
					bytes);
	} else if (reg_is_gtt(gvt, offset)) {
		offset -= gvt->device_info.gtt_start_offset;
		pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
		if (read)
			memcpy(p_data, pt, bytes);
		else
			memcpy(pt, p_data, bytes);

	}
	mutex_unlock(&vgpu->vgpu_lock);
}
Beispiel #2
0
/**
 * intel_vgpu_emulate_mmio_write - emulate MMIO write
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: write data buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
		void *p_data, unsigned int bytes)
{
	struct intel_gvt *gvt = vgpu->gvt;
	unsigned int offset = 0;
	int ret = -EINVAL;

	if (vgpu->failsafe) {
		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
		return 0;
	}

	mutex_lock(&vgpu->vgpu_lock);

	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

	if (WARN_ON(bytes > 8))
		goto err;

	if (reg_is_gtt(gvt, offset)) {
		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
			goto err;
		if (WARN_ON(bytes != 4 && bytes != 8))
			goto err;
		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
			goto err;

		ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
				p_data, bytes);
		if (ret)
			goto err;
		goto out;
	}

	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
		goto out;
	}

	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
	if (ret < 0)
		goto err;

	intel_gvt_mmio_set_accessed(gvt, offset);
	ret = 0;
	goto out;
err:
	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
		     bytes);
out:
	mutex_unlock(&vgpu->vgpu_lock);
	return ret;
}
Beispiel #3
0
/**
 * intel_vgpu_emulate_mmio_read - emulate MMIO read
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: data return buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                                 void *p_data, unsigned int bytes)
{
    struct intel_gvt *gvt = vgpu->gvt;
    struct intel_gvt_mmio_info *mmio;
    unsigned int offset = 0;
    int ret = -EINVAL;

    mutex_lock(&gvt->lock);

    if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
        struct intel_vgpu_guest_page *gp;

        gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
        if (gp) {
            ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
                                                p_data, bytes);
            if (ret) {
                gvt_err("vgpu%d: guest page read error %d, "
                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
                        vgpu->id, ret,
                        gp->gfn, pa, *(u32 *)p_data, bytes);
            }
            mutex_unlock(&gvt->lock);
            return ret;
        }
    }

    offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

    if (WARN_ON(bytes > 8))
        goto err;

    if (reg_is_gtt(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
            goto err;
        if (WARN_ON(bytes != 4 && bytes != 8))
            goto err;
        if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
            goto err;

        ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
                                               p_data, bytes);
        if (ret)
            goto err;
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
        ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
        goto err;

    mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
    if (!mmio && !vgpu->mmio.disable_warn_untrack) {
        gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
                vgpu->id, offset, bytes, *(u32 *)p_data);

        if (offset == 0x206c) {
            gvt_err("------------------------------------------\n");
            gvt_err("vgpu%d: likely triggers a gfx reset\n",
                    vgpu->id);
            gvt_err("------------------------------------------\n");
            vgpu->mmio.disable_warn_untrack = true;
        }
    }

    if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, bytes)))
            goto err;
    }

    if (mmio) {
        if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
            if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
                goto err;
            if (WARN_ON(mmio->offset != offset))
                goto err;
        }
        ret = mmio->read(vgpu, offset, p_data, bytes);
    } else
        ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);

    if (ret)
        goto err;

    intel_gvt_mmio_set_accessed(gvt, offset);
    mutex_unlock(&gvt->lock);
    return 0;
err:
    gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
            vgpu->id, offset, bytes);
    mutex_unlock(&gvt->lock);
    return ret;
}
Beispiel #4
0
/**
 * intel_vgpu_emulate_mmio_write - emulate MMIO write
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: write data buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                                  void *p_data, unsigned int bytes)
{
    struct intel_gvt *gvt = vgpu->gvt;
    struct intel_gvt_mmio_info *mmio;
    unsigned int offset = 0;
    u32 old_vreg = 0, old_sreg = 0;
    int ret = -EINVAL;

    mutex_lock(&gvt->lock);

    if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
        struct intel_vgpu_guest_page *gp;

        gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
        if (gp) {
            ret = gp->handler(gp, pa, p_data, bytes);
            if (ret) {
                gvt_err("vgpu%d: guest page write error %d, "
                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
                        vgpu->id, ret,
                        gp->gfn, pa, *(u32 *)p_data, bytes);
            }
            mutex_unlock(&gvt->lock);
            return ret;
        }
    }

    offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

    if (WARN_ON(bytes > 8))
        goto err;

    if (reg_is_gtt(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
            goto err;
        if (WARN_ON(bytes != 4 && bytes != 8))
            goto err;
        if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
            goto err;

        ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
                                                p_data, bytes);
        if (ret)
            goto err;
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
        ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
    }

    mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
    if (!mmio && !vgpu->mmio.disable_warn_untrack)
        gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
                vgpu->id, offset, bytes, *(u32 *)p_data);

    if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, bytes)))
            goto err;
    }

    if (mmio) {
        u64 ro_mask = mmio->ro_mask;

        if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
            if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
                goto err;
            if (WARN_ON(mmio->offset != offset))
                goto err;
        }

        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            old_vreg = vgpu_vreg(vgpu, offset);
            old_sreg = vgpu_sreg(vgpu, offset);
        }

        if (!ro_mask) {
            ret = mmio->write(vgpu, offset, p_data, bytes);
        } else {
            /* Protect RO bits like HW */
            u64 data = 0;

            /* all register bits are RO. */
            if (ro_mask == ~(u64)0) {
                gvt_err("vgpu%d: try to write RO reg %x\n",
                        vgpu->id, offset);
                ret = 0;
                goto out;
            }
            /* keep the RO bits in the virtual register */
            memcpy(&data, p_data, bytes);
            data &= ~mmio->ro_mask;
            data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
            ret = mmio->write(vgpu, offset, &data, bytes);
        }

        /* higher 16bits of mode ctl regs are mask bits for change */
        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            u32 mask = vgpu_vreg(vgpu, offset) >> 16;

            vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
                                      | (vgpu_vreg(vgpu, offset) & mask);
            vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
                                      | (vgpu_sreg(vgpu, offset) & mask);
        }
    } else