static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t start, u64 size) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; phys_addr_t next; pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); unmap_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); }
static void stage2_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) { phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; phys_addr_t end = addr + PAGE_SIZE * memslot->npages; phys_addr_t next; pgd_t *pgd; pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); stage2_flush_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); }
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, phys_addr_t addr) { pgd_t *pgd; pud_t *pud; pgd = kvm->arch.pgd + kvm_pgd_index(addr); if (WARN_ON(pgd_none(*pgd))) { if (!cache) return NULL; pud = mmu_memory_cache_alloc(cache); pgd_populate(NULL, pgd, pud); get_page(virt_to_page(pgd)); } return pud_offset(pgd, addr); }
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, phys_addr_t addr, const pte_t *new_pte, bool iomap) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte, old_pte; /* Create 2nd stage page table mapping - Level 1 */ pgd = kvm->arch.pgd + kvm_pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { if (!cache) return 0; /* ignore calls from kvm_set_spte_hva */ pmd = mmu_memory_cache_alloc(cache); pud_populate(NULL, pud, pmd); get_page(virt_to_page(pud)); } pmd = pmd_offset(pud, addr); /* Create 2nd stage page table mapping - Level 2 */ if (pmd_none(*pmd)) { if (!cache) return 0; /* ignore calls from kvm_set_spte_hva */ pte = mmu_memory_cache_alloc(cache); kvm_clean_pte(pte); pmd_populate_kernel(NULL, pmd, pte); get_page(virt_to_page(pmd)); } pte = pte_offset_kernel(pmd, addr); if (iomap && pte_present(*pte)) return -EFAULT; /* Create 2nd stage page table mapping - Level 3 */ old_pte = *pte; kvm_set_pte(pte, *new_pte); if (pte_present(old_pte)) kvm_tlb_flush_vmid_ipa(kvm, addr); else get_page(virt_to_page(pte)); return 0; }
/** * stage2_wp_range() - write protect stage2 memory region range * @kvm: The KVM pointer * @addr: Start address of range * @end: End address of range */ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { pgd_t *pgd; phys_addr_t next; pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { /* * Release kvm_mmu_lock periodically if the memory region is * large. Otherwise, we may see kernel panics with * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, * CONFIG_LOCKDEP. Additionally, holding the lock too long * will also starve other vCPUs. */ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) cond_resched_lock(&kvm->mmu_lock); next = kvm_pgd_addr_end(addr, end); if (pgd_present(*pgd)) stage2_wp_puds(pgd, addr, next); } while (pgd++, addr = next, addr != end); }
static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t start, u64 size) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; phys_addr_t next; pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); if (!pgd_none(*pgd)) unmap_puds(kvm, pgd, addr, next); /* * If we are dealing with a large range in * stage2 table, release the kvm->mmu_lock * to prevent starvation and lockup detector * warnings. */ if (kvm && (next != end)) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); }