static void pagevec_move_tail_fn(struct page *page, void *arg) { int *pgmoved = arg; struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); list_move_tail(&page->lru, &zone->lru[lru].list); mem_cgroup_rotate_reclaimable_page(page); (*pgmoved)++; } }
/* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ static void lru_deactivate_fn(struct page *page, void *arg) { int lru, file; bool active; struct zone *zone = page_zone(page); if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(zone, page, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ list_move_tail(&page->lru, &zone->lru[lru].list); mem_cgroup_rotate_reclaimable_page(page); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(zone, page, file, 0); }