pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; #ifdef CONFIG_HUGETLB_SUPER_PAGES pte_t *pte; #endif /* Get the top-level page table entry. */ pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0); /* We don't have four levels. */ pud = pud_offset(pgd, addr); #ifndef __PAGETABLE_PUD_FOLDED # error support fourth page table level #endif if (!pud_present(*pud)) return NULL; /* Check for an L0 huge PTE, if we have three levels. */ #ifndef __PAGETABLE_PMD_FOLDED if (pud_huge(*pud)) return (pte_t *)pud; pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud), pmd_index(addr), 1); if (!pmd_present(*pmd)) return NULL; #else pmd = pmd_offset(pud, addr); #endif /* Check for an L1 huge PTE. */ if (pmd_huge(*pmd)) return (pte_t *)pmd; #ifdef CONFIG_HUGETLB_SUPER_PAGES /* Check for an L2 huge PTE. */ pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2); if (!pte_present(*pte)) return NULL; if (pte_super(*pte)) return pte; #endif return NULL; }
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P) { int i; pmd_t *start; start = (pmd_t *) pud_page_vaddr(addr); for (i = 0; i < PTRS_PER_PMD; i++) { st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); if (!pmd_none(*start)) { pgprotval_t prot = pmd_val(*start) & PTE_FLAGS_MASK; if (pmd_large(*start) || !pmd_present(*start)) note_page(m, st, __pgprot(prot), 3); else walk_pte_level(m, st, *start, P + i * PMD_LEVEL_MULT); } else note_page(m, st, __pgprot(0), 3); start++; } }
struct page *pud_page(pud_t pud) { if (pud_huge(pud)) return pte_page(pud_pte(pud)); return virt_to_page(pud_page_vaddr(pud)); }