/* * This moves pages from the active list to * the inactive list. * * We move them the other way when we see the * reference bit on the page. */ static void refill_inactive(int nr_pages) { struct list_head * entry; spin_lock(&pagemap_lru_lock); entry = active_list.prev; while (nr_pages && entry != &active_list) { struct page * page; page = list_entry(entry, struct page, lru); entry = entry->prev; if (PageTestandClearReferenced(page)) { list_del(&page->lru); list_add(&page->lru, &active_list); continue; } nr_pages--; del_page_from_active_list(page); add_page_to_inactive_list(page); SetPageReferenced(page); } spin_unlock(&pagemap_lru_lock); }
/* * This moves pages from the active list to * the inactive list. * * We move them the other way when we see the * reference bit on the page. */ static void refill_inactive(int nr_pages, zone_t * classzone) { struct list_head * entry; unsigned long ratio; ratio = (unsigned long) nr_pages * classzone->nr_active_pages / (((unsigned long) classzone->nr_inactive_pages * vm_lru_balance_ratio) + 1); entry = active_list.prev; while (ratio && entry != &active_list) { struct page * page; page = list_entry(entry, struct page, lru); entry = entry->prev; if (PageTestandClearReferenced(page)) { list_del(&page->lru); list_add(&page->lru, &active_list); continue; } ratio--; del_page_from_active_list(page); add_page_to_inactive_list(page); SetPageReferenced(page); } if (entry != &active_list) { list_del(&active_list); list_add(&active_list, entry); } }
// dyc: add all pages into inactive list void __pagevec_lru_add(struct pagevec *pvec) { int i; struct zone *zone = NULL; // dyc: iterate all pages and add them to inactive list for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); // dyc: lock/unlock until next zone if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } VM_BUG_ON(PageLRU(page)); // dyc: set pg LRU flag SetPageLRU(page); // dyc: add to inactive list and increase vm_stat add_page_to_inactive_list(zone, page); } if (zone) spin_unlock_irq(&zone->lru_lock); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); }
static inline void putback_page_to_lru(struct zone *zone, struct page *page) { if (TestSetPageLRU(page)) BUG(); if (PageActive(page)) add_page_to_active_list(zone, page); else add_page_to_inactive_list(zone, page); }
/* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void __pagevec_lru_add(struct pagevec *pvec) { int i; struct zone *zone = NULL; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } if (TestSetPageLRU(page)) BUG(); add_page_to_inactive_list(zone, page); } if (zone) spin_unlock_irq(&zone->lru_lock); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); }
void lru_cache_add(struct page * page) { add_page_to_inactive_list(page); }