Exemple #1
0
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
	int i;

	for (i = 0; i < kvm->nmemslots; i++) {
		kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
				    kvm->memslots[i].npages);
	}

	return 0;
}
Exemple #2
0
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
    int i;
    struct kvm_memslots *slots;

    slots = rcu_dereference(kvm->memslots);

    for (i = 0; i < slots->nmemslots; i++) {
        kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
                            slots->memslots[i].npages);
    }

    return 0;
}
Exemple #3
0
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
    gfn_t gfn = slot->base_gfn;
    unsigned long npages = slot->npages;
    pfn_t pfn;
    int i, r = 0;
    struct iommu_domain *domain = kvm->arch.iommu_domain;
    int flags;

    /* check if iommu exists and in use */
    if (!domain)
        return 0;

    flags = IOMMU_READ | IOMMU_WRITE;
    if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
        flags |= IOMMU_CACHE;

    for (i = 0; i < npages; i++) {
        /* check if already mapped */
        if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
            continue;

        pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
        r = iommu_map_range(domain,
                            gfn_to_gpa(gfn),
                            pfn_to_hpa(pfn),
                            PAGE_SIZE, flags);
        if (r) {
            printk(KERN_ERR "kvm_iommu_map_address:"
                   "iommu failed to map pfn=%lx\n", pfn);
            goto unmap_pages;
        }
        gfn++;
    }
    return 0;

unmap_pages:
    kvm_iommu_put_pages(kvm, slot->base_gfn, i);
    return r;
}
Exemple #4
0
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	gfn_t gfn, end_gfn;
	kvm_pfn_t pfn;
	int r = 0;
	struct iommu_domain *domain = kvm->arch.iommu_domain;
	int flags;

	/* check if iommu exists and in use */
	if (!domain)
		return 0;

	gfn     = slot->base_gfn;
	end_gfn = gfn + slot->npages;

	flags = IOMMU_READ;
	if (!(slot->flags & KVM_MEM_READONLY))
		flags |= IOMMU_WRITE;
	if (!kvm->arch.iommu_noncoherent)
		flags |= IOMMU_CACHE;


	while (gfn < end_gfn) {
		unsigned long page_size;

		/* Check if already mapped */
		if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
			gfn += 1;
			continue;
		}

		/* Get the page size we could use to map */
		page_size = kvm_host_page_size(kvm, gfn);

		/* Make sure the page_size does not exceed the memslot */
		while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
			page_size >>= 1;

		/* Make sure gfn is aligned to the page size we want to map */
		while ((gfn << PAGE_SHIFT) & (page_size - 1))
			page_size >>= 1;

		/* Make sure hva is aligned to the page size we want to map */
		while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
			page_size >>= 1;

		/*
		 * Pin all pages we are about to map in memory. This is
		 * important because we unmap and unpin in 4kb steps later.
		 */
		pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
		if (is_error_noslot_pfn(pfn)) {
			gfn += 1;
			continue;
		}

		/* Map into IO address space */
		r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
			      page_size, flags);
		if (r) {
			printk(KERN_ERR "kvm_iommu_map_address:"
			       "iommu failed to map pfn=%llx\n", pfn);
			kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
			goto unmap_pages;
		}

		gfn += page_size >> PAGE_SHIFT;

		cond_resched();
	}

	return 0;

unmap_pages:
	kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
	return r;
}