static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, struct mm_walk *walk) { p4d_t *p4d; unsigned long next; int err = 0; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pmd_entry || walk->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; } while (p4d++, addr = next, addr != end); return err; }
/** * walk_page_range - walk a memory map's page tables with a callback * @mm: memory map to walk * @addr: starting address * @end: ending address * @walk: set of callbacks to invoke for each level of the tree * * Recursively walk the page table for the memory area in a VMA, * calling supplied callbacks. Callbacks are called in-order (first * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, * etc.). If lower-level callbacks are omitted, walking depth is reduced. * * Each callback receives an entry pointer and the start and end of the * associated range, and a copy of the original mm_walk for access to * the ->private or ->mm fields. * * No locks are taken, but the bottom level iterator will map PTE * directories from highmem if necessary. * * If any callback returns a non-zero value, the walk is aborted and * the return value is propagated back to the caller. Otherwise 0 is returned. */ int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { pgd_t *pgd; unsigned long next; int err = 0; if (addr >= end) return err; if (!walk->mm) return -EINVAL; pgd = pgd_offset(walk->mm, addr); do { struct vm_area_struct *uninitialized_var(vma); next = pgd_addr_end(addr, end); #ifdef CONFIG_HUGETLB_PAGE /* * handle hugetlb vma individually because pagetable walk for * the hugetlb page is dependent on the architecture and * we can't handled it in the same manner as non-huge pages. */ vma = find_vma(walk->mm, addr); if (vma && is_vm_hugetlb_page(vma)) { if (vma->vm_end < next) next = vma->vm_end; /* * Hugepage is very tightly coupled with vma, so * walk through hugetlb entries within a given vma. */ err = walk_hugetlb_range(vma, addr, next, walk); if (err) break; pgd = pgd_offset(walk->mm, next); continue; } #endif if (pgd_none_or_clear_bad(pgd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; pgd++; continue; } if (walk->pgd_entry) err = walk->pgd_entry(pgd, addr, next, walk); if (!err && (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) err = walk_pud_range(pgd, addr, next, walk); if (err) break; pgd++; } while (addr = next, addr != end); return err; }
/** * walk_page_range - walk a memory map's page tables with a callback * @mm: memory map to walk * @addr: starting address * @end: ending address * @walk: set of callbacks to invoke for each level of the tree * * Recursively walk the page table for the memory area in a VMA, * calling supplied callbacks. Callbacks are called in-order (first * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, * etc.). If lower-level callbacks are omitted, walking depth is reduced. * * Each callback receives an entry pointer and the start and end of the * associated range, and a copy of the original mm_walk for access to * the ->private or ->mm fields. * * No locks are taken, but the bottom level iterator will map PTE * directories from highmem if necessary. * * If any callback returns a non-zero value, the walk is aborted and * the return value is propagated back to the caller. Otherwise 0 is returned. */ int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { pgd_t *pgd; unsigned long next; int err = 0; if (addr >= end) return err; if (!walk->mm) return -EINVAL; pgd = pgd_offset(walk->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pgd_entry) err = walk->pgd_entry(pgd, addr, next, walk); if (!err && (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) err = walk_pud_range(pgd, addr, next, walk); if (err) break; } while (pgd++, addr = next, addr != end); return err; }
/** * walk_page_range - walk a memory map's page tables with a callback * @mm: memory map to walk * @addr: starting address * @end: ending address * @walk: set of callbacks to invoke for each level of the tree * * Recursively walk the page table for the memory area in a VMA, * calling supplied callbacks. Callbacks are called in-order (first * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, * etc.). If lower-level callbacks are omitted, walking depth is reduced. * * Each callback receives an entry pointer and the start and end of the * associated range, and a copy of the original mm_walk for access to * the ->private or ->mm fields. * * Usually no locks are taken, but splitting transparent huge page may * take page table lock. And the bottom level iterator will map PTE * directories from highmem if necessary. * * If any callback returns a non-zero value, the walk is aborted and * the return value is propagated back to the caller. Otherwise 0 is returned. * * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry * is !NULL. */ int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { pgd_t *pgd; unsigned long next; int err = 0; if (addr >= end) return err; if (!walk->mm) return -EINVAL; VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); pgd = pgd_offset(walk->mm, addr); do { struct vm_area_struct *vma = NULL; next = pgd_addr_end(addr, end); /* * This function was not intended to be vma based. * But there are vma special cases to be handled: * - hugetlb vma's * - VM_PFNMAP vma's */ vma = find_vma(walk->mm, addr); if (vma) { /* * There are no page structures backing a VM_PFNMAP * range, so do not allow split_huge_page_pmd(). */ if ((vma->vm_start <= addr) && (vma->vm_flags & VM_PFNMAP)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; pgd = pgd_offset(walk->mm, next); continue; } /* * Handle hugetlb vma individually because pagetable * walk for the hugetlb page is dependent on the * architecture and we can't handled it in the same * manner as non-huge pages. */ if (walk->hugetlb_entry && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) { if (vma->vm_end < next) next = vma->vm_end; /* * Hugepage is very tightly coupled with vma, * so walk through hugetlb entries within a * given vma. */ err = walk_hugetlb_range(vma, addr, next, walk); if (err) break; pgd = pgd_offset(walk->mm, next); continue; } } if (pgd_none_or_clear_bad(pgd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; pgd++; continue; } if (walk->pgd_entry) err = walk->pgd_entry(pgd, addr, next, walk); if (!err && (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) err = walk_pud_range(pgd, addr, next, walk); if (err) break; pgd++; } while (addr = next, addr != end); return err; }