/** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ int add_to_swap(struct page *page) { swp_entry_t entry; int err; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageUptodate(page)); #ifdef CONFIG_HSWAP if (!current_is_kswapd()) entry = get_lowest_prio_swap_page(); else #endif entry = get_swap_page(); if (!entry.val) return 0; if (unlikely(PageTransHuge(page))) if (unlikely(split_huge_page(page))) { swapcache_free(entry, NULL); return 0; } /* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ /* * Add it to the swap cache and mark it dirty */ err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); if (!err) { /* Success */ SetPageDirty(page); return 1; } else { /* -ENOMEM radix-tree allocation failure */ /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ swapcache_free(entry, NULL); return 0; } }
void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) { gfp_t gfp_mask; struct zone *preferred_zone; struct zonelist *zonelist; enum zone_type high_zoneidx, classzone_idx; unsigned long balance_gap; gfp_mask = sc->gfp_mask; zonelist = node_zonelist(0, gfp_mask); high_zoneidx = gfp_zone(gfp_mask); first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); classzone_idx = zone_idx(preferred_zone); balance_gap = min(low_wmark_pages(preferred_zone), (preferred_zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0, high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX + balance_gap, 0, 0))) { if (lmk_fast_run) tune_lmk_zone_param(zonelist, classzone_idx, other_free, other_file); else tune_lmk_zone_param(zonelist, classzone_idx, other_free, NULL); if (zone_watermark_ok(preferred_zone, 0, 0, ZONE_HIGHMEM, 0)) *other_free -= preferred_zone->lowmem_reserve[ZONE_HIGHMEM]; else *other_free -= zone_page_state(preferred_zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem " "ofree %d, %d\n", *other_free, *other_file); } else { tune_lmk_zone_param(zonelist, classzone_idx, other_free, other_file); lowmem_print(4, "lowmem_shrink tunning for others ofree %d, " "%d\n", *other_free, *other_file); } }
/* * Attempt to release the private state associated with a page * - Called if either PG_private or PG_fscache is set on the page * - Caller holds page lock * - Return true (may release page) or false (may not) */ static int nfs_release_page(struct page *page, gfp_t gfp) { struct address_space *mapping = page->mapping; dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); /* Only do I/O if gfp is a superset of GFP_KERNEL */ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) { int how = FLUSH_SYNC; /* Don't let kswapd deadlock waiting for OOM RPC calls */ if (current_is_kswapd()) how = 0; nfs_commit_inode(mapping->host, how); } /* If PagePrivate() is set, then the page is not freeable */ if (PagePrivate(page)) return 0; return nfs_fscache_release_page(page, gfp); }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int selected_tasksize = 0; int selected_oom_score_adj; #ifdef CONFIG_SAMP_HOTNESS int selected_hotness_adj = 0; #endif int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; unsigned long nr_to_scan = sc->nr_to_scan; #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 1); #endif unsigned long nr_cma_free; struct reclaim_state *reclaim_state = current->reclaim_state; #if defined(CONFIG_CMA_PAGE_COUNTING) unsigned long nr_cma_inactive_file; unsigned long nr_cma_active_file; unsigned long cma_page_ratio; bool is_active_high; bool flag = 0; #endif if (nr_to_scan > 0) { if (mutex_lock_interruptible(&scan_mutex) < 0) return 0; } other_free = global_page_state(NR_FREE_PAGES); nr_cma_free = global_page_state(NR_FREE_CMA_PAGES); #ifdef CONFIG_ZSWAP if (!current_is_kswapd() || sc->priority <= 6) #endif other_free -= nr_cma_free; #if defined(CONFIG_CMA_PAGE_COUNTING) nr_cma_inactive_file = global_page_state(NR_CMA_INACTIVE_FILE); nr_cma_active_file = global_page_state(NR_CMA_ACTIVE_FILE); cma_page_ratio = 100 * global_page_state(NR_CMA_INACTIVE_FILE) / global_page_state(NR_INACTIVE_FILE); is_active_high = (global_page_state(NR_ACTIVE_FILE) > global_page_state(NR_INACTIVE_FILE)) ? 1 : 0; #endif other_file = global_page_state(NR_FILE_PAGES); #if defined(CONFIG_CMA_PAGE_COUNTING) && defined(CONFIG_EXCLUDE_LRU_LIVING_IN_CMA) if (get_nr_swap_pages() < SSWAP_LMK_THRESHOLD && cma_page_ratio >= CMA_PAGE_RATIO && !is_active_high) { other_file = other_file - (nr_cma_inactive_file + nr_cma_active_file); flag = 1; } #endif if (global_page_state(NR_SHMEM) + total_swapcache_pages < other_file) other_file -= global_page_state(NR_SHMEM) + total_swapcache_pages; else other_file = 0; if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < lowmem_minfree[i] && other_file < lowmem_minfree[i]) { min_score_adj = lowmem_adj[i]; break; } } if (nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); if (nr_to_scan > 0) mutex_unlock(&scan_mutex); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; #ifdef CONFIG_SAMP_HOTNESS int hotness_adj = 0; #endif if (tsk->flags & PF_KTHREAD) continue; /* if task no longer has any memory ignore it */ if (test_task_flag(tsk, TIF_MM_RELEASED)) continue; if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); mutex_unlock(&scan_mutex); return 0; } } p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); #if defined(CONFIG_ZSWAP) if (atomic_read(&zswap_stored_pages)) { lowmem_print(3, "shown tasksize : %d\n", tasksize); tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS) / atomic_read(&zswap_stored_pages); lowmem_print(3, "real tasksize : %d\n", tasksize); } #endif #ifdef CONFIG_SAMP_HOTNESS hotness_adj = p->signal->hotness_adj; #endif task_unlock(p); if (tasksize <= 0) continue; if (selected) { #ifdef CONFIG_SAMP_HOTNESS if (min_score_adj <= lowmem_adj[4]) { #endif if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; #ifdef CONFIG_SAMP_HOTNESS } else { if (hotness_adj > selected_hotness_adj) continue; if (hotness_adj == selected_hotness_adj && tasksize <= selected_tasksize) continue; } #endif } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; #ifdef CONFIG_SAMP_HOTNESS selected_hotness_adj = hotness_adj; #endif lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); } if (selected) { #if defined(CONFIG_CMA_PAGE_COUNTING) #ifdef CONFIG_SAMP_HOTNESS lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "ofree %d, ofile %d(%c), is_kswapd %d - " "cma_free %lu priority %d cma_i_file %lu cma_a_file %lu, hotness %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, flag ? '-' : '+', !!current_is_kswapd(), nr_cma_free, sc->priority, nr_cma_inactive_file, nr_cma_active_file, selected_hotness_adj); #else lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "ofree %d, ofile %d(%c), is_kswapd %d - " "cma_free %lu priority %d cma_i_file %lu cma_a_file %lu\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, flag ? '-' : '+', !!current_is_kswapd(), nr_cma_free, sc->priority, nr_cma_inactive_file, nr_cma_active_file); #endif #else #ifdef CONFIG_SAMP_HOTNESS lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "free memory = %d, reclaimable memory = %d " "is_kswapd %d cma_free %lu priority %d, hotness %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, !!current_is_kswapd(), nr_cma_free, sc->priority, selected_hotness_adj); #else lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "free memory = %d, reclaimable memory = %d " "is_kswapd %d cma_free %lu priority %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, !!current_is_kswapd(), nr_cma_free, sc->priority); #endif #endif lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; rcu_read_unlock(); #ifdef LMK_COUNT_READ lmk_count++; #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO if ((selected_oom_score_adj < lowmem_adj[5]) && __ratelimit(&lmk_rs)) { lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); show_mem(SHOW_MEM_FILTER_NODES); dump_tasks_info(); } #endif /* give the system time to free up the memory */ msleep_interruptible(20); if(reclaim_state) reclaim_state->reclaimed_slab = selected_tasksize; } else rcu_read_unlock(); lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); mutex_unlock(&scan_mutex); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int selected_tasksize = 0; int selected_oom_score_adj; int selected_oom_adj = 0; int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; int reserved_free = 0; int cma_free = 0; unsigned long nr_to_scan = sc->nr_to_scan; struct zone *zone; int use_cma = can_use_cma_pages(sc->gfp_mask); if (nr_to_scan > 0) { if (!mutex_trylock(&scan_mutex)) { if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) { msleep_interruptible(lowmem_sleep_ms); } return 0; } } for_each_zone(zone) { if (is_normal(zone)) reserved_free = zone->watermark[WMARK_MIN] + zone->lowmem_reserve[_ZONE]; cma_free += zone_page_state(zone, NR_FREE_CMA_PAGES); } other_free = global_page_state(NR_FREE_PAGES); if (global_page_state(NR_SHMEM) + global_page_state(NR_MLOCK) + total_swapcache_pages < global_page_state(NR_FILE_PAGES)) other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - global_page_state(NR_MLOCK) - total_swapcache_pages; else other_file = 0; if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if ((other_free - reserved_free - (use_cma ? 0 : cma_free)) < lowmem_minfree[i] && other_file < lowmem_minfree[i]) { min_score_adj = lowmem_adj[i]; break; } } if (nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d, rfree %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj, reserved_free); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); if (nr_to_scan > 0) mutex_unlock(&scan_mutex); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; if (tsk->flags & PF_KTHREAD) continue; if (test_task_flag(tsk, TIF_MM_RELEASED)) continue; if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { lowmem_print(2, "skipping , waiting for process %d (%s) dead\n", tsk->pid, tsk->comm); rcu_read_unlock(); if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) { msleep_interruptible(lowmem_sleep_ms); } mutex_unlock(&scan_mutex); return 0; } } p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; selected_oom_adj = p->signal->oom_adj; lowmem_print(2, "select %d (%s), oom_adj %d score_adj %d, size %d, to kill\n", p->pid, p->comm, selected_oom_adj, oom_score_adj, tasksize); } if (selected) { bool should_dump_meminfo = false; lowmem_print(1, "[%s] send sigkill to %d (%s), oom_adj %d, score_adj %d," " min_score_adj %d, size %dK, free %dK, file %dK, " " reserved_free %dK, cma_free %dK, use_cma %d\n", current->comm, selected->pid, selected->comm, selected_oom_adj, selected_oom_score_adj, min_score_adj, selected_tasksize << 2, other_free << 2, other_file << 2, reserved_free << 2, cma_free << 2, use_cma); if (!current_is_kswapd() && current->reclaim_state) current->reclaim_state->trigger_lmk++; lowmem_deathpending_timeout = jiffies + HZ; #ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES #define DUMP_INFO_OOM_SCORE_ADJ_THRESHOLD ((7 * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE) if (selected_oom_score_adj < DUMP_INFO_OOM_SCORE_ADJ_THRESHOLD) #else if (selected->signal->oom_adj < 7) #endif should_dump_meminfo = true; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; rcu_read_unlock(); if (should_dump_meminfo) { show_meminfo(); dump_tasks(); } if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) msleep_interruptible(lowmem_sleep_ms); } else rcu_read_unlock(); lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); mutex_unlock(&scan_mutex); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int selected_tasksize = 0; int selected_oom_score_adj; int selected_oom_adj = 0; int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; int reserved_free = 0; unsigned long nr_to_scan = sc->nr_to_scan; int fork_boost = 0; size_t *min_array; struct zone *zone; if (nr_to_scan > 0) { if (!mutex_trylock(&scan_mutex)) { if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) { msleep_interruptible(lowmem_sleep_ms); } return 0; } } for_each_zone(zone) { if(is_normal(zone)) { reserved_free = zone->watermark[WMARK_MIN] + zone->lowmem_reserve[_ZONE]; break; } } other_free = global_page_state(NR_FREE_PAGES); other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - global_page_state(NR_MLOCK) ; if (lowmem_fork_boost && time_before_eq(jiffies, lowmem_fork_boost_timeout)) { for (i = 0; i < lowmem_minfree_size; i++) minfree_tmp[i] = lowmem_minfree[i] + lowmem_fork_boost_minfree[i]; min_array = minfree_tmp; } else min_array = lowmem_minfree; if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if ((other_free - reserved_free) < min_array[i] && other_file < min_array[i]) { min_score_adj = lowmem_adj[i]; fork_boost = lowmem_fork_boost_minfree[i]; break; } } if (nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d, rfree %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj, reserved_free); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); if (nr_to_scan > 0) mutex_unlock(&scan_mutex); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; if (tsk->flags & PF_KTHREAD) continue; if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { lowmem_print(2, "skipping , waiting for process %d (%s) dead\n", tsk->pid, tsk->comm); rcu_read_unlock(); if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) { msleep_interruptible(lowmem_sleep_ms); } mutex_unlock(&scan_mutex); return 0; } } p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < selected_oom_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; if (selected && oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; selected_oom_adj = p->signal->oom_adj; lowmem_print(2, "select %d (%s), oom_adj %d score_adj %d, size %d, to kill\n", p->pid, p->comm, selected_oom_adj, oom_score_adj, tasksize); } if (selected) { lowmem_print(1, "[%s] send sigkill to %d (%s), oom_adj %d, score_adj %d," " min_score_adj %d, size %dK, free %dK, file %dK, fork_boost %dK," " reserved_free %dK\n", current->comm, selected->pid, selected->comm, selected_oom_adj, selected_oom_score_adj, min_score_adj, selected_tasksize << 2, other_free << 2, other_file << 2, fork_boost << 2, reserved_free << 2); lowmem_deathpending_timeout = jiffies + HZ; if (selected_oom_adj < 7) { show_meminfo(); dump_tasks(); } send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; rcu_read_unlock(); if (!(lowmem_only_kswapd_sleep && !current_is_kswapd())) { msleep_interruptible(lowmem_sleep_ms); } } else rcu_read_unlock(); lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); mutex_unlock(&scan_mutex); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; int minfree = 0; int selected_tasksize = 0; short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; unsigned long nr_to_scan = sc->nr_to_scan; #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 1); #endif unsigned long nr_cma_free; if (nr_to_scan > 0) { if (mutex_lock_interruptible(&scan_mutex) < 0) return 0; } other_free = global_page_state(NR_FREE_PAGES); nr_cma_free = global_page_state(NR_FREE_CMA_PAGES); if (!current_is_kswapd() || sc->priority <= 6) other_free -= nr_cma_free; if (global_page_state(NR_SHMEM) + total_swapcache_pages() < global_page_state(NR_FILE_PAGES)) other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages(); else other_file = 0; if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { minfree = lowmem_minfree[i]; if (other_free < minfree && other_file < minfree) { min_score_adj = lowmem_adj[i]; break; } } if (nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); if (nr_to_scan > 0) mutex_unlock(&scan_mutex); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; short oom_score_adj; if (tsk->flags & PF_KTHREAD || tsk->state & TASK_UNINTERRUPTIBLE) continue; /* if task no longer has any memory ignore it */ if (test_task_flag(tsk, TIF_MM_RELEASED)) continue; if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); mutex_unlock(&scan_mutex); return 0; } } p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n", p->comm, p->pid, oom_score_adj, tasksize); } if (selected) { lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \ " to free %ldkB on behalf of '%s' (%d) because\n" \ " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \ " Free memory is %ldkB above reserved.\n" \ " is_kswapd %d priority %d\n" \ " Free CMA is %ldkB\n" \ " Total reserve is %ldkB\n" \ " Total free pages is %ldkB\n" \ " Total file cache is %ldkB\n" \ " Slab Reclaimable is %ldkB\n" \ " Slab UnReclaimable is %ldkB\n" \ " Total Slab is %ldkB\n" \ " GFP mask is 0x%x\n", selected->comm, selected->pid, selected_oom_score_adj, selected_tasksize * (long)(PAGE_SIZE / 1024), current->comm, current->pid, other_file * (long)(PAGE_SIZE / 1024), minfree * (long)(PAGE_SIZE / 1024), min_score_adj, other_free * (long)(PAGE_SIZE / 1024), !!current_is_kswapd(), sc->priority, nr_cma_free * (long)(PAGE_SIZE / 1024), totalreserve_pages * (long)(PAGE_SIZE / 1024), global_page_state(NR_FREE_PAGES) * (long)(PAGE_SIZE / 1024), global_page_state(NR_FILE_PAGES) * (long)(PAGE_SIZE / 1024), global_page_state(NR_SLAB_RECLAIMABLE) * (long)(PAGE_SIZE / 1024), global_page_state(NR_SLAB_UNRECLAIMABLE) * (long)(PAGE_SIZE / 1024), global_page_state(NR_SLAB_RECLAIMABLE) * (long)(PAGE_SIZE / 1024) + global_page_state(NR_SLAB_UNRECLAIMABLE) * (long)(PAGE_SIZE / 1024), sc->gfp_mask); if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) { show_mem(SHOW_MEM_FILTER_NODES); dump_tasks(NULL, NULL); show_mem_call_notifiers(); } lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; rcu_read_unlock(); #ifdef LMK_COUNT_READ lmk_count++; #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO if (__ratelimit(&lmk_rs)) { lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); show_mem(SHOW_MEM_FILTER_NODES); dump_tasks_info(); } #endif /* give the system time to free up the memory */ msleep_interruptible(20); } else rcu_read_unlock(); lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); mutex_unlock(&scan_mutex); return rem; }