static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { addr = kvm_pud_addr_end(addr, end); continue; } if (pud_huge(*pud)) { /* * If we are dealing with a huge pud, just clear it and * move on. */ clear_pud_entry(kvm, pud, addr); addr = kvm_pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = kvm_pmd_addr_end(addr, end); continue; } if (!kvm_pmd_huge(*pmd)) { pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); next = addr + PAGE_SIZE; } /* * If the pmd entry is to be cleared, walk back up the ladder */ if (kvm_pmd_huge(*pmd) || page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = kvm_pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); next = kvm_pud_addr_end(addr, end); } } addr = next; } }
static void unmap_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t addr, phys_addr_t end) { phys_addr_t next, start_addr = addr; pmd_t *pmd, *start_pmd; start_pmd = pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) { pmd_t old_pmd = *pmd; pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_flush_dcache_pmd(old_pmd); put_page(virt_to_page(pmd)); } else { unmap_ptes(kvm, pmd, addr, next); } } } while (pmd++, addr = next, addr != end); if (kvm_pmd_table_empty(kvm, start_pmd)) clear_pud_entry(kvm, pud, start_addr); }
static void unmap_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t addr, phys_addr_t end) { phys_addr_t next, start_addr = addr; pmd_t *pmd, *start_pmd; start_pmd = pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { unmap_ptes(kvm, pmd, addr, next); } } while (pmd++, addr = next, addr != end); if (kvm_pmd_table_empty(start_pmd)) clear_pud_entry(kvm, pud, start_addr); }
static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { addr = pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = pmd_addr_end(addr, end); continue; } pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); next = addr + PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ if (page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); next = pud_addr_end(addr, end); } } addr = next; } }