static unsigned long clear_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end) { pte_t *pte; pte_t ptecont; do { pte = pte_offset_map(pmd, addr); ptecont = *pte; if (pte_none(ptecont)) continue; /* * pte_young is a confusing name, though it AND _PAGE_ACCESSED * Instead, I think we should call it pte_accessed */ if (pte_present(ptecont) && pte_young(ptecont)) { /* * The physical page, which this pte points to, has * been read or written to during this time period. */ DEBUG_INFO("[%#016lx - %#016lx], pfn = %#013lx", addr, end, pte_pfn(ptecont)); collect_statistics(pte_pfn(ptecont)); pte_clear_flags(ptecont, _PAGE_ACCESSED); } } while (pte++, addr += PAGE_SIZE, addr != end); return addr; }
static inline int callback_page_walk(pte_t *pte, unsigned long addr, unsigned long next_addr, struct mm_walk *walk) { if (pte_none(*pte) || !pte_present(*pte) /* || !pte_young(*pte) || pte_special(*pte) */ ) return 0; proc[(long)walk->private].next_addr = addr; /* TODO: try pte_mknuma on 3.8 */ *pte = pte_clear_flags(*pte, _PAGE_PRESENT); spcd_pf_extra++; return 1; }
static inline void clear_soft_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) { /* * The soft-dirty tracker uses #PF-s to catch writes * to pages, so write-protect the pte as well. See the * Documentation/vm/soft-dirty.txt for full description * of how soft-dirty works. */ pte_t ptent = *pte; if (pte_present(ptent)) { ptent = pte_wrprotect(ptent); ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); } else if (is_swap_pte(ptent)) { ptent = pte_swp_clear_soft_dirty(ptent); } set_pte_at(vma->vm_mm, addr, pte, ptent); }
static inline void protect_memory(void) { /* Restore kernel memory page protection */ set_pte_atomic(pte, pte_clear_flags(*pte, _PAGE_RW)); }