static inline int filemap_sync_pmd_range(pud_t * pud, unsigned long address, unsigned long end, struct vm_area_struct *vma, unsigned int flags) { pmd_t * pmd; int error; if (pud_none(*pud)) return 0; if (pud_bad(*pud)) { pud_ERROR(*pud); pud_clear(pud); return 0; } pmd = pmd_offset(pud, address); if ((address & PUD_MASK) != (end & PUD_MASK)) end = (address & PUD_MASK) + PUD_SIZE; error = 0; do { error |= filemap_sync_pte_range(pmd, address, end, vma, flags); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); return error; }
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { pmd_t *pmd_table = pmd_offset(pud, 0); pud_clear(pud); kvm_tlb_flush_vmid_ipa(kvm, addr); pmd_free(NULL, pmd_table); put_page(virt_to_page(pud)); }
int oleole_flush_guest_virt_memory(oleole_guest_system_t *gsys) { unsigned long flags; unsigned long start, end; struct vm_area_struct *vma; struct mm_struct *mm; pgd_t *pgd; spin_lock_irqsave(&gsys->lock, flags); vma = gsys->vma; spin_unlock_irqrestore(&gsys->lock, flags); if (!vma) return -1; mm = vma->vm_mm; if (!mm) return -1; start = vma->vm_start + OLEOLE_GUSET_VIRT_SPACE_OFFSET; end = start + 0x100000000UL; down_write(&mm->mmap_sem); pgd = pgd_offset(mm, start); if (!pgd_present(*pgd)) goto miss; for (; start < end ; start += PUD_SIZE) { pud_t *pud; pmd_t *pmd; struct page *page; pud = pud_offset(pgd, start); if (!pud_present(*pud)) goto miss; free_pmd_range(pud); pmd = pmd_offset(pud, 0); page = virt_to_page(pmd); __free_page(page); pud_clear(pud); } miss: up_write(&mm->mmap_sem); __flush_tlb(); return 0; }
static void free_pud_range(pgd_t *pgd) { int i; pud_t *pud; pud = pud_offset(pgd, 0); for (i=0 ; i<PTRS_PER_PUD ; i++, pud++) { pmd_t *pmd; struct page *page; if (oleole_pud_none_or_clear_bad(pud)) continue; free_pmd_range(pud); pmd = pmd_offset(pud, 0); page = virt_to_page(pmd); __free_page(page); pud_clear(pud); } }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { unsigned long flags; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd_base, 2); }
static void unmap_puds(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr, phys_addr_t end) { phys_addr_t next, start_addr = addr; pud_t *pud, *start_pud; start_pud = pud = pud_offset(pgd, addr); do { next = kvm_pud_addr_end(addr, end); if (!pud_none(*pud)) { if (pud_huge(*pud)) { pud_clear(pud); kvm_tlb_flush_vmid_ipa(kvm, addr); put_page(virt_to_page(pud)); } else { unmap_pmds(kvm, pud, addr, next); } } } while (pud++, addr = next, addr != end); if (kvm_pud_table_empty(kvm, start_pud)) clear_pgd_entry(kvm, pgd, start_addr); }
inline void nvmm_rm_pud(pud_t *pud) { pud_clear(pud); }
void pud_clear_bad(pud_t *pud) { pud_ERROR(*pud); pud_clear(pud); }