static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void)) { pmd_t *pmd; unsigned long next; /* * Check for initial section mappings in the pgd/pud and remove them. */ if (pud_none(*pud) || pud_sect(*pud)) { phys_addr_t pmd_phys; BUG_ON(!pgtable_alloc); pmd_phys = pgtable_alloc(); pmd = pmd_set_fixmap(pmd_phys); if (pud_sect(*pud)) { /* * need to have the 1G of mappings continue to be * present */ split_pud(pud, pmd); } __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); flush_tlb_all(); pmd_clear_fixmap(); } BUG_ON(pud_bad(*pud)); pmd = pmd_set_fixmap_offset(pud, addr); do { next = pmd_addr_end(addr, end); /* try section mapping first */ if (((addr | next | phys) & ~SECTION_MASK) == 0 && block_mappings_allowed(pgtable_alloc)) { pmd_t old_pmd =*pmd; pmd_set_huge(pmd, phys, prot); /* * Check for previous table entries created during * boot (__create_page_tables) and flush them. */ if (!pmd_none(old_pmd)) { flush_tlb_all(); if (pmd_table(old_pmd)) { phys_addr_t table = pmd_page_paddr(old_pmd); if (!WARN_ON_ONCE(slab_is_available())) memblock_free(table, PAGE_SIZE); } } } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), prot, pgtable_alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); pmd_clear_fixmap(); }
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, void *(*alloc)(unsigned long size)) { pmd_t *pmd; unsigned long next; /* * Check for initial section mappings in the pgd/pud and remove them. */ if (pud_none(*pud) || pud_sect(*pud)) { pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t)); if (pud_sect(*pud)) { /* * need to have the 1G of mappings continue to be * present */ split_pud(pud, pmd); } pud_populate(mm, pud, pmd); flush_tlb_all(); } BUG_ON(pud_bad(*pud)); pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); /* try section mapping first */ if (((addr | next | phys) & ~SECTION_MASK) == 0) { pmd_t old_pmd =*pmd; set_pmd(pmd, __pmd(phys | pgprot_val(mk_sect_prot(prot)))); /* * Check for previous table entries created during * boot (__create_page_tables) and flush them. */ if (!pmd_none(old_pmd)) { flush_tlb_all(); if (pmd_table(old_pmd)) { phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0)); if (!WARN_ON_ONCE(slab_is_available())) memblock_free(table, PAGE_SIZE); } } } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), prot, alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); }
/* * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function * is used to determine if a linear map page has been marked as not-valid by * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit. * This is based on kern_addr_valid(), which almost does what we need. * * Because this is only called on the kernel linear map, p?d_sect() implies * p?d_present(). When debug_pagealloc is enabled, sections mappings are * disabled. */ bool kernel_page_present(struct page *page) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long addr = (unsigned long)page_address(page); pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return false; pud = pud_offset(pgd, addr); if (pud_none(*pud)) return false; if (pud_sect(*pud)) return true; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return false; if (pmd_sect(*pmd)) return true; pte = pte_offset_kernel(pmd, addr); return pte_valid(*pte); }
/* * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function * is used to determine if a linear map page has been marked as not-valid by * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit. * This is based on kern_addr_valid(), which almost does what we need. * * Because this is only called on the kernel linear map, p?d_sect() implies * p?d_present(). When debug_pagealloc is enabled, sections mappings are * disabled. */ bool kernel_page_present(struct page *page) { pgd_t *pgdp; pud_t *pudp, pud; pmd_t *pmdp, pmd; pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); pgdp = pgd_offset_k(addr); if (pgd_none(READ_ONCE(*pgdp))) return false; pudp = pud_offset(pgdp, addr); pud = READ_ONCE(*pudp); if (pud_none(pud)) return false; if (pud_sect(pud)) return true; pmdp = pmd_offset(pudp, addr); pmd = READ_ONCE(*pmdp); if (pmd_none(pmd)) return false; if (pmd_sect(pmd)) return true; ptep = pte_offset_kernel(pmdp, addr); return pte_valid(READ_ONCE(*ptep)); }
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) { pmd_t *pmd = pmd_offset(pud, 0); unsigned long addr; unsigned i; for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { addr = start + i * PMD_SIZE; #ifdef CONFIG_ARM64 if (pmd_none(*pmd) || pmd_sect (*pmd)) { #else if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) { #endif note_page(st, addr, 3, pmd_val(*pmd)); } else { walk_pte(st, pmd, addr); } #ifdef CONFIG_ARM if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); #endif } } static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) { pud_t *pud = pud_offset(pgd, 0); unsigned long addr; unsigned i; for (i = 0; i < PTRS_PER_PUD; i++, pud++) { addr = start + i * PUD_SIZE; #if defined CONFIG_ARM64 && !defined (CONFIG_ANDROID) if (pud_none (*pud) || pud_sect (*pud)) { note_page (st, addr, 2, pud_val (*pud)); } else { walk_pmd (st, pud, addr); } #else if (!pud_none(*pud)) { walk_pmd (st, pud, addr); } else { note_page (st, addr, 2, pud_val (*pud)); } #endif } }