/* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int i; int pgmoved = 0; struct zone *zone = NULL; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock(&zone->lru_lock); zone = pagezone; spin_lock(&zone->lru_lock); } if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int lru = page_lru_base_type(page); list_move_tail(&page->lru, &zone->lru[lru].list); pgmoved++; } } if (zone) spin_unlock(&zone->lru_lock); __count_vm_events(PGROTATED, pgmoved); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); }
/* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int pgmoved = 0; pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); __count_vm_events(PGROTATED, pgmoved); }
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, void *arg) { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { bool active = PageActive(page); del_page_from_lru_list(page, lruvec, LRU_INACTIVE_ANON + active); ClearPageActive(page); ClearPageReferenced(page); /* * lazyfree pages are clean anonymous pages. They have * SwapBacked flag cleared to distinguish normal anonymous * pages */ ClearPageSwapBacked(page); add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); __count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); count_memcg_page_event(page, PGLAZYFREE); update_page_reclaim_stat(lruvec, 1, 0); } }