static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { addr = kvm_pud_addr_end(addr, end); continue; } if (pud_huge(*pud)) { /* * If we are dealing with a huge pud, just clear it and * move on. */ clear_pud_entry(kvm, pud, addr); addr = kvm_pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = kvm_pmd_addr_end(addr, end); continue; } if (!kvm_pmd_huge(*pmd)) { pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); next = addr + PAGE_SIZE; } /* * If the pmd entry is to be cleared, walk back up the ladder */ if (kvm_pmd_huge(*pmd) || page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = kvm_pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); next = kvm_pud_addr_end(addr, end); } } addr = next; } }
static void unmap_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t addr, phys_addr_t end) { phys_addr_t next, start_addr = addr; pmd_t *pmd, *start_pmd; start_pmd = pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) { pmd_t old_pmd = *pmd; pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_flush_dcache_pmd(old_pmd); put_page(virt_to_page(pmd)); } else { unmap_ptes(kvm, pmd, addr, next); } } } while (pmd++, addr = next, addr != end); if (kvm_pmd_table_empty(kvm, start_pmd)) clear_pud_entry(kvm, pud, start_addr); }
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) { pte_t *pte_table = pte_offset_kernel(pmd, 0); VM_BUG_ON(kvm_pmd_huge(*pmd)); pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); pte_free_kernel(NULL, pte_table); put_page(virt_to_page(pmd)); }
/** * stage2_dissolve_pmd() - clear and flush huge PMD entry * @kvm: pointer to kvm structure. * @addr: IPA * @pmd: pmd pointer for IPA * * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all * pages in the range dirty. */ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) { if (!kvm_pmd_huge(*pmd)) return; pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); put_page(virt_to_page(pmd)); }
static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t addr, phys_addr_t end) { pmd_t *pmd; phys_addr_t next; pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) { hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); } else { stage2_flush_ptes(kvm, pmd, addr, next); } }
static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t addr, phys_addr_t end) { pmd_t *pmd; phys_addr_t next; pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) kvm_flush_dcache_pmd(*pmd); else stage2_flush_ptes(kvm, pmd, addr, next); } } while (pmd++, addr = next, addr != end); }
/** * stage2_wp_pmds - write protect PUD range * @pud: pointer to pud entry * @addr: range start address * @end: range end address */ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) { pmd_t *pmd; phys_addr_t next; pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) { if (!kvm_s2pmd_readonly(pmd)) kvm_set_s2pmd_readonly(pmd); } else { stage2_wp_ptes(pmd, addr, next); } } } while (pmd++, addr = next, addr != end); }