static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, unsigned long end, unsigned long pfn, pgprot_t prot) { pmd_t *pmd; pte_t *pte; unsigned long addr, next; addr = start; do { pmd = pmd_offset(pud, addr); BUG_ON(pmd_sect(*pmd)); if (pmd_none(*pmd)) { pte = pte_alloc_one_kernel(NULL, addr); if (!pte) { kvm_err("Cannot allocate Hyp pte\n"); return -ENOMEM; } pmd_populate_kernel(NULL, pmd, pte); get_page(virt_to_page(pmd)); kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); } next = pmd_addr_end(addr, end); create_hyp_pte_mappings(pmd, addr, next, pfn, prot); pfn += (next - addr) >> PAGE_SHIFT; } while (addr = next, addr != end); return 0; }
static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start, unsigned long end, unsigned long pfn, pgprot_t prot) { pud_t *pud; pmd_t *pmd; unsigned long addr, next; int ret; addr = start; do { pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) { pmd = pmd_alloc_one(NULL, addr); if (!pmd) { kvm_err("Cannot allocate Hyp pmd\n"); return -ENOMEM; } pud_populate(NULL, pud, pmd); get_page(virt_to_page(pud)); kvm_flush_dcache_to_poc(pud, sizeof(*pud)); } next = pud_addr_end(addr, end); ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); if (ret) return ret; pfn += (next - addr) >> PAGE_SHIFT; } while (addr = next, addr != end); return 0; }
static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { pte_t *pte; pte = pte_offset_kernel(pmd, addr); do { if (!pte_none(*pte)) { hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); } } while (pte++, addr += PAGE_SIZE, addr != end); }
static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, unsigned long end, unsigned long pfn, pgprot_t prot) { pte_t *pte; unsigned long addr; addr = start; do { pte = pte_offset_kernel(pmd, addr); kvm_set_pte(pte, pfn_pte(pfn, prot)); get_page(virt_to_page(pte)); kvm_flush_dcache_to_poc(pte, sizeof(*pte)); pfn++; } while (addr += PAGE_SIZE, addr != end); }
static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t addr, phys_addr_t end) { pmd_t *pmd; phys_addr_t next; pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) { hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); } else { stage2_flush_ptes(kvm, pmd, addr, next); } }
static int __create_hyp_mappings(pgd_t *pgdp, unsigned long start, unsigned long end, unsigned long pfn, pgprot_t prot) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; unsigned long addr, next; int err = 0; mutex_lock(&kvm_hyp_pgd_mutex); addr = start & PAGE_MASK; end = PAGE_ALIGN(end); do { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) { pmd = pmd_alloc_one(NULL, addr); if (!pmd) { kvm_err("Cannot allocate Hyp pmd\n"); err = -ENOMEM; goto out; } pud_populate(NULL, pud, pmd); get_page(virt_to_page(pud)); kvm_flush_dcache_to_poc(pud, sizeof(*pud)); } next = pgd_addr_end(addr, end); err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); if (err) goto out; pfn += (next - addr) >> PAGE_SHIFT; } while (addr = next, addr != end); out: mutex_unlock(&kvm_hyp_pgd_mutex); return err; }