static int unuse_mm(struct mm_struct *mm, swp_entry_t entry, struct page *page) { struct vm_area_struct *vma; if (!down_read_trylock(&mm->mmap_sem)) { /* * Activate page so shrink_cache is unlikely to unmap its * ptes while lock is dropped, so swapoff can make progress. */ activate_page(page); unlock_page(page); down_read(&mm->mmap_sem); lock_page(page); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma && unuse_vma(vma, entry, page)) break; } up_read(&mm->mmap_sem); /* * Currently unuse_mm cannot fail, but leave error handling * at call sites for now, since we change it from time to time. */ return 0; }
/* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced * * When a newly allocated page is not yet visible, so safe for non-atomic ops, * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). */ void mark_page_accessed(struct page *page) { page = compound_head(page); if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page)) { /* * If the page is on the LRU, queue it for activation via * activate_page_pvecs. Otherwise, assume the page is on a * pagevec, mark it active and it'll be moved to the active * LRU on the next drain. */ if (PageLRU(page)) activate_page(page); else __lru_cache_activate_page(page); ClearPageReferenced(page); if (page_is_file_cache(page)) workingset_activation(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } if (page_is_idle(page)) clear_page_idle(page); }
/* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to * force COW, vm_page_prot omits write permission from any private vma. */ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct page *page) { spinlock_t *ptl; pte_t *pte; int ret = 1; if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) ret = -ENOMEM; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { if (ret > 0) mem_cgroup_uncharge_page(page); ret = 0; goto out; } inc_mm_counter(vma->vm_mm, anon_rss); get_page(page); set_pte_at(vma->vm_mm, addr, pte, pte_mkold(mk_pte(page, vma->vm_page_prot))); page_add_anon_rmap(page, vma, addr); swap_free(entry); /* * Move the page to the active list so it is not * immediately swapped out again after swapon. */ activate_page(page); out: pte_unmap_unlock(pte, ptl); return ret; }
/* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void fastcall mark_page_accessed(struct page *page) { if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } }
/* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } }
/* * mark_page_accessed:页访问的状态变化 * 非活动页的第一次访问状态变化: inactive,unreferenced -> inactive,referenced * 非活动页的第二次访问状态变化: inactive,referenced -> active,unreferenced * 活动页的第一次访问状态变化: active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { /* 非活动页的第二次访问: * 将会使页从非活动链表移动到 * 活动链表,并且设置unreference*/ activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { /*第一次访问非活动page或者访问活动页时只设置PG_reference*/ SetPageReferenced(page); } }
/* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to * force COW, vm_page_prot omits write permission from any private vma. */ static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, swp_entry_t entry, struct page *page) { inc_mm_counter(vma->vm_mm, anon_rss); get_page(page); set_pte_at(vma->vm_mm, addr, pte, pte_mkold(mk_pte(page, vma->vm_page_prot))); page_add_anon_rmap(page, vma, addr); swap_free(entry); /* * Move the page to the active list so it is not * immediately swapped out again after swapon. */ activate_page(page); }
/* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page)) { if (PageLRU(page)) activate_page(page); else if (PageIONBacked(page)) SetPageActive(page); else return; ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); if (PageIONBacked(page) && PageActive(page)) ion_activate_page(page); } }
static int unuse_mm(struct mm_struct *mm, swp_entry_t entry, struct page *page) { struct vm_area_struct *vma; int ret = 0; if (!down_read_trylock(&mm->mmap_sem)) { /* * Activate page so shrink_inactive_list is unlikely to unmap * its ptes while lock is dropped, so swapoff can make progress. */ activate_page(page); unlock_page(page); down_read(&mm->mmap_sem); lock_page(page); } for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) break; } up_read(&mm->mmap_sem); return (ret < 0)? ret: 0; }