static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long npages) { gfn_t end_gfn; kvm_pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + npages; gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; }
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn = slot->base_gfn; unsigned long npages = slot->npages; pfn_t pfn; int i, r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; flags = IOMMU_READ | IOMMU_WRITE; if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) flags |= IOMMU_CACHE; for (i = 0; i < npages; i++) { /* check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) continue; pfn = gfn_to_pfn_memslot(kvm, slot, gfn); r = iommu_map_range(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), PAGE_SIZE, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%lx\n", pfn); goto unmap_pages; } gfn++; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, i); return r; }