/* * Attempt to shrink the dcache memory. This is simply a functional * to ensure we can correctly call the shrinker. We don't check that * the cache actually decreased because we have no control over what * else may be running on the system. This avoid false positives. */ static int splat_linux_test1(struct file *file, void *arg) { int remain_before; int remain_after; remain_before = shrink_dcache_memory(0, GFP_KERNEL); remain_after = shrink_dcache_memory(KMC_REAP_CHUNK, GFP_KERNEL); splat_vprint(file, SPLAT_LINUX_TEST1_NAME, "Shrink dcache memory, remain %d -> %d\n", remain_before, remain_after); return 0; }
static int shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages) { int chunk_size = nr_pages; unsigned long ratio; nr_pages -= kmem_cache_reap(gfp_mask); if (nr_pages <= 0) return 0; nr_pages = chunk_size; /* try to keep the active list 2/3 of the size of the cache */ ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2); refill_inactive(ratio); nr_pages = shrink_cache(nr_pages, classzone, gfp_mask, priority); if (nr_pages <= 0) return 0; shrink_dcache_memory(priority, gfp_mask); shrink_icache_memory(priority, gfp_mask); #ifdef CONFIG_QUOTA shrink_dqcache_memory(DEF_PRIORITY, gfp_mask); #endif return nr_pages; }
static int do_try_to_free_pages(unsigned int gfp_mask, int user) { int ret = 0; /* * If we're low on free pages, move pages from the * inactive_dirty list to the inactive_clean list. * * Usually bdflush will have pre-cleaned the pages * before we get around to moving them to the other * list, so this is a relatively cheap operation. */ if (free_shortage() || nr_inactive_dirty_pages > nr_free_pages() + nr_inactive_clean_pages()) ret += page_launder(gfp_mask, user); /* * If needed, we move pages from the active list * to the inactive list. We also "eat" pages from * the inode and dentry cache whenever we do this. */ if (free_shortage() || inactive_shortage()) { shrink_dcache_memory(6, gfp_mask); shrink_icache_memory(6, gfp_mask); ret += refill_inactive(gfp_mask, user); } else { /* * Reclaim unused slab cache memory. */ kmem_cache_reap(gfp_mask); ret = 1; } return ret; }
int try_to_free_pages_zone(zone_t *classzone, unsigned int gfp_mask) { gfp_mask = pf_gfp_mask(gfp_mask); for (;;) { int tries = vm_passes; int failed_swapout = !(gfp_mask & __GFP_IO); int nr_pages = SWAP_CLUSTER_MAX; do { nr_pages = shrink_caches(classzone, gfp_mask, nr_pages, &failed_swapout); if (nr_pages <= 0) return 1; shrink_dcache_memory(vm_vfs_scan_ratio, gfp_mask); shrink_icache_memory(vm_vfs_scan_ratio, gfp_mask); #ifdef CONFIG_QUOTA shrink_dqcache_memory(vm_vfs_scan_ratio, gfp_mask); #endif if (!failed_swapout) failed_swapout = !swap_out(classzone); } while (--tries); #ifdef CONFIG_OOM_KILLER out_of_memory(); #else if (likely(current->pid != 1)) break; if (!check_classzone_need_balance(classzone)) break; __set_current_state(TASK_RUNNING); yield(); #endif } return 0; }
/* * We need to make the locks finer granularity, but right * now we need this so that we can do page allocations * without holding the kernel lock etc. * * We want to try to free "count" pages, and we want to * cluster them so that we get good swap-out behaviour. * * OTOH, if we're a user process (and not kswapd), we * really care about latency. In that case we don't try * to free too many pages. */ static int refill_inactive(unsigned int gfp_mask, int user) { int priority, count, start_count, made_progress; unsigned long idle_time; count = inactive_shortage() + free_shortage(); if (user) count = (1 << page_cluster); start_count = count; /* Always trim SLAB caches when memory gets low. */ kmem_cache_reap(gfp_mask); /* * Calculate the minimum time (in seconds) a process must * have slept before we consider it for idle swapping. * This must be the number of seconds it takes to go through * all of the cache. Doing this idle swapping makes the VM * smoother once we start hitting swap. */ idle_time = atomic_read(&page_cache_size); idle_time += atomic_read(&buffermem_pages); idle_time /= (inactive_target + 1); priority = 6; do { made_progress = 0; if (current->need_resched) { __set_current_state(TASK_RUNNING); schedule(); } while (refill_inactive_scan(priority, 1) || swap_out(priority, gfp_mask, idle_time)) { made_progress = 1; if (--count <= 0) goto done; } /* * don't be too light against the d/i cache since * refill_inactive() almost never fail when there's * really plenty of memory free. */ shrink_dcache_memory(priority, gfp_mask); shrink_icache_memory(priority, gfp_mask); /* * Then, try to page stuff out.. */ while (swap_out(priority, gfp_mask, 0)) { made_progress = 1; if (--count <= 0) goto done; } /* * If we either have enough free memory, or if * page_launder() will be able to make enough * free memory, then stop. */ if (!inactive_shortage() || !free_shortage()) goto done; /* * Only switch to a lower "priority" if we * didn't make any useful progress in the * last loop. */ if (!made_progress) priority--; } while (priority >= 0); /* Always end on a refill_inactive.., may sleep... */ while (refill_inactive_scan(0, 1)) { if (--count <= 0) goto done; } done: return (count < start_count); }
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int * failed_swapout) { struct list_head * entry; int max_scan = (classzone->nr_inactive_pages + classzone->nr_active_pages) / vm_cache_scan_ratio; int max_mapped = vm_mapped_ratio * nr_pages; while (max_scan && classzone->nr_inactive_pages && (entry = inactive_list.prev) != &inactive_list) { struct page * page; if (unlikely(current->need_resched)) { spin_unlock(&pagemap_lru_lock); __set_current_state(TASK_RUNNING); schedule(); spin_lock(&pagemap_lru_lock); continue; } page = list_entry(entry, struct page, lru); BUG_ON(!PageLRU(page)); BUG_ON(PageActive(page)); list_del(entry); list_add(entry, &inactive_list); /* * Zero page counts can happen because we unlink the pages * _after_ decrementing the usage count.. */ if (unlikely(!page_count(page))) continue; if (!memclass(page_zone(page), classzone)) continue; max_scan--; /* Racy check to avoid trylocking when not worthwhile */ if (!page->buffers && (page_count(page) != 1 || !page->mapping)) goto page_mapped; /* * The page is locked. IO in progress? * Move it to the back of the list. */ if (unlikely(TryLockPage(page))) { if (PageLaunder(page) && (gfp_mask & __GFP_FS)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); wait_on_page(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); } continue; } if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) { /* * It is not critical here to write it only if * the page is unmapped beause any direct writer * like O_DIRECT would set the PG_dirty bitflag * on the phisical page after having successfully * pinned it and after the I/O to the page is finished, * so the direct writes to the page cannot get lost. */ int (*writepage)(struct page *); writepage = page->mapping->a_ops->writepage; if ((gfp_mask & __GFP_FS) && writepage) { ClearPageDirty(page); SetPageLaunder(page); page_cache_get(page); spin_unlock(&pagemap_lru_lock); writepage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. */ if (page->buffers) { spin_unlock(&pagemap_lru_lock); /* avoid to free a locked page */ page_cache_get(page); if (try_to_release_page(page, gfp_mask)) { if (!page->mapping) { /* * We must not allow an anon page * with no buffers to be visible on * the LRU, so we unlock the page after * taking the lru lock */ spin_lock(&pagemap_lru_lock); UnlockPage(page); __lru_cache_del(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } else { /* * The page is still in pagecache so undo the stuff * before the try_to_release_page since we've not * finished and we can now try the next step. */ page_cache_release(page); spin_lock(&pagemap_lru_lock); } } else { /* failed to drop the buffers so stop here */ UnlockPage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } spin_lock(&pagecache_lock); /* * This is the non-racy check for busy page. * It is critical to check PageDirty _after_ we made sure * the page is freeable so not in use by anybody. * At this point we're guaranteed that page->buffers is NULL, * nobody can refill page->buffers under us because we still * hold the page lock. */ if (!page->mapping || page_count(page) > 1) { spin_unlock(&pagecache_lock); UnlockPage(page); page_mapped: if (--max_mapped < 0) { spin_unlock(&pagemap_lru_lock); nr_pages -= kmem_cache_reap(gfp_mask); if (nr_pages <= 0) goto out; shrink_dcache_memory(vm_vfs_scan_ratio, gfp_mask); shrink_icache_memory(vm_vfs_scan_ratio, gfp_mask); #ifdef CONFIG_QUOTA shrink_dqcache_memory(vm_vfs_scan_ratio, gfp_mask); #endif if (!*failed_swapout) *failed_swapout = !swap_out(classzone); max_mapped = nr_pages * vm_mapped_ratio; spin_lock(&pagemap_lru_lock); refill_inactive(nr_pages, classzone); } continue; } if (PageDirty(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); continue; } __lru_cache_del(page); /* point of no return */ if (likely(!PageSwapCache(page))) { __remove_inode_page(page); spin_unlock(&pagecache_lock); } else { swp_entry_t swap; swap.val = page->index; __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); swap_free(swap); } UnlockPage(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } spin_unlock(&pagemap_lru_lock); out: return nr_pages; }