static int android_oom_handler(struct notifier_block *nb, unsigned long val, void *data) { struct task_struct *tsk; #ifdef MULTIPLE_OOM_KILLER struct task_struct *selected[OOM_DEPTH] = {NULL,}; #else struct task_struct *selected = NULL; #endif int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; #ifdef MULTIPLE_OOM_KILLER int selected_tasksize[OOM_DEPTH] = {0,}; int selected_oom_score_adj[OOM_DEPTH] = {OOM_ADJUST_MAX,}; int all_selected_oom = 0; int max_selected_oom_idx = 0; #else int selected_tasksize = 0; int selected_oom_score_adj; #endif static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL/5, 1); unsigned long *freed = data; /* show status */ pr_warning("%s invoked Android-oom-killer: " "oom_adj=%d, oom_score_adj=%d\n", current->comm, current->signal->oom_adj, current->signal->oom_score_adj); dump_stack(); show_mem(SHOW_MEM_FILTER_NODES); if (__ratelimit(&oom_rs)) dump_tasks_info(); min_score_adj = 0; #ifdef MULTIPLE_OOM_KILLER for (i = 0; i < OOM_DEPTH; i++) selected_oom_score_adj[i] = min_score_adj; #else selected_oom_score_adj = min_score_adj; #endif #ifdef CONFIG_ZRAM_FOR_ANDROID atomic_set(&s_reclaim.lmk_running, 1); #endif read_lock(&tasklist_lock); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; #ifdef MULTIPLE_OOM_KILLER int is_exist_oom_task = 0; #endif if (tsk->flags & PF_KTHREAD) continue; p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; lowmem_print(2, "oom: ------ %d (%s), adj %d, size %d\n", p->pid, p->comm, oom_score_adj, tasksize); #ifdef MULTIPLE_OOM_KILLER if (all_selected_oom < OOM_DEPTH) { for (i = 0; i < OOM_DEPTH; i++) { if (!selected[i]) { is_exist_oom_task = 1; max_selected_oom_idx = i; break; } } } else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj || (selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj && selected_tasksize[max_selected_oom_idx] < tasksize)) { is_exist_oom_task = 1; } if (is_exist_oom_task) { selected[max_selected_oom_idx] = p; selected_tasksize[max_selected_oom_idx] = tasksize; selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj; if (all_selected_oom < OOM_DEPTH) all_selected_oom++; if (all_selected_oom == OOM_DEPTH) { for (i = 0; i < OOM_DEPTH; i++) { if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx]) max_selected_oom_idx = i; else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] && selected_tasksize[i] < selected_tasksize[max_selected_oom_idx]) max_selected_oom_idx = i; } } lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \ size %d, to kill\n", max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize); } #else if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); #endif } #ifdef MULTIPLE_OOM_KILLER for (i = 0; i < OOM_DEPTH; i++) { if (selected[i]) { lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\ size %d\n", selected[i]->pid, selected[i]->comm, selected_oom_score_adj[i], selected_tasksize[i]); send_sig(SIGKILL, selected[i], 0); rem -= selected_tasksize[i]; *freed += (unsigned long)selected_tasksize[i]; #ifdef OOM_COUNT_READ oom_count++; #endif } } #else if (selected) { lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize); send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; *freed += (unsigned long)selected_tasksize; #ifdef OOM_COUNT_READ oom_count++; #endif } #endif read_unlock(&tasklist_lock); #ifdef CONFIG_ZRAM_FOR_ANDROID atomic_set(&s_reclaim.lmk_running, 0); #endif lowmem_print(2, "oom: get memory %lu", *freed); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; #ifdef ENHANCED_LMK_ROUTINE struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,}; #else struct task_struct *selected = NULL; #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 0); #else static DEFINE_RATELIMIT_STATE(lmk_rs, 6*DEFAULT_RATELIMIT_INTERVAL, 0); #endif #endif int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; #ifdef ENHANCED_LMK_ROUTINE int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,}; int selected_oom_score_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,}; int all_selected_oom = 0; int max_selected_oom_idx = 0; #else int selected_tasksize = 0; int selected_oom_score_adj; #endif int array_size = ARRAY_SIZE(lowmem_adj); #if (!defined(CONFIG_MACH_JF) \ && !defined(CONFIG_SEC_PRODUCT_8960)\ ) unsigned long nr_to_scan = sc->nr_to_scan; #endif #ifndef CONFIG_CMA int other_free = global_page_state(NR_FREE_PAGES); #else int other_free = global_page_state(NR_FREE_PAGES) - global_page_state(NR_FREE_CMA_PAGES); #endif int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); #ifdef CONFIG_ZRAM_FOR_ANDROID other_file -= total_swapcache_pages; #endif /* CONFIG_ZRAM_FOR_ANDROID */ if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < lowmem_minfree[i] && other_file < lowmem_minfree[i]) { min_score_adj = lowmem_adj[i]; break; } } if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) selected_oom_score_adj[i] = min_score_adj; #else selected_oom_score_adj = min_score_adj; #endif #ifdef CONFIG_ZRAM_FOR_ANDROID atomic_set(&s_reclaim.lmk_running, 1); #endif /* CONFIG_ZRAM_FOR_ANDROID */ read_lock(&tasklist_lock); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; #ifdef ENHANCED_LMK_ROUTINE int is_exist_oom_task = 0; #endif if (tsk->flags & PF_KTHREAD) continue; p = find_lock_task_mm(tsk); if (!p) continue; if (test_tsk_thread_flag(p, TIF_MEMDIE) && time_before_eq(jiffies, lowmem_deathpending_timeout)) { task_unlock(p); read_unlock(&tasklist_lock); #ifdef CONFIG_ZRAM_FOR_ANDROID atomic_set(&s_reclaim.lmk_running, 0); #endif /* CONFIG_ZRAM_FOR_ANDROID */ return 0; } oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; #ifdef ENHANCED_LMK_ROUTINE if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) { for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (!selected[i]) { is_exist_oom_task = 1; max_selected_oom_idx = i; break; } } } else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj || (selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj && selected_tasksize[max_selected_oom_idx] < tasksize)) { is_exist_oom_task = 1; } if (is_exist_oom_task) { selected[max_selected_oom_idx] = p; selected_tasksize[max_selected_oom_idx] = tasksize; selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj; if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) all_selected_oom++; if (all_selected_oom == LOWMEM_DEATHPENDING_DEPTH) { for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx]) max_selected_oom_idx = i; else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] && selected_tasksize[i] < selected_tasksize[max_selected_oom_idx]) max_selected_oom_idx = i; } } lowmem_print(2, "select %d (%s), adj %d, \ size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); } #else if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); #endif } #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (selected[i]) { lowmem_print(1, "send sigkill to %d (%s), adj %d,\ size %d, free memory = %d, reclaimable memory = %d\n", selected[i]->pid, selected[i]->comm, selected_oom_score_adj[i], selected_tasksize[i], other_free, other_file); lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected[i], 0); set_tsk_thread_flag(selected[i], TIF_MEMDIE); rem -= selected_tasksize[i]; #ifdef LMK_COUNT_READ lmk_count++; #endif } } #else if (selected) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize); lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; #ifdef LMK_COUNT_READ lmk_count++; #endif } #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO if (__ratelimit(&lmk_rs)) { lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE show_mem(SHOW_MEM_FILTER_NODES); dump_tasks_info(); #endif } #endif lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); read_unlock(&tasklist_lock); #ifdef CONFIG_ZRAM_FOR_ANDROID atomic_set(&s_reclaim.lmk_running, 0); #endif /* CONFIG_ZRAM_FOR_ANDROID */ return rem; }
static int android_oom_handler(struct notifier_block *nb, unsigned long val, void *data) { struct task_struct *tsk; #ifdef MULTIPLE_OOM_KILLER struct task_struct *selected[OOM_DEPTH] = {NULL,}; #else struct task_struct *selected = NULL; #endif int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; #ifdef MULTIPLE_OOM_KILLER int selected_tasksize[OOM_DEPTH] = {0,}; int selected_oom_score_adj[OOM_DEPTH] = {OOM_ADJUST_MAX,}; int all_selected_oom = 0; int max_selected_oom_idx = 0; #else int selected_tasksize = 0; int selected_oom_score_adj; #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL/5, 1); #endif unsigned long *freed = data; #if defined(CONFIG_CMA_PAGE_COUNTING) unsigned long nr_cma_free; unsigned long nr_cma_inactive_file; unsigned long nr_cma_active_file; int other_free; int other_file; nr_cma_free = global_page_state(NR_FREE_CMA_PAGES); other_free = global_page_state(NR_FREE_PAGES) - nr_cma_free; nr_cma_inactive_file = global_page_state(NR_CMA_INACTIVE_FILE); nr_cma_active_file = global_page_state(NR_CMA_ACTIVE_FILE); other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages - nr_cma_inactive_file - nr_cma_active_file; #endif /* show status */ pr_warning("%s invoked Android-oom-killer: " "oom_adj=%d, oom_score_adj=%d\n", current->comm, current->signal->oom_adj, current->signal->oom_score_adj); #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO if (__ratelimit(&oom_rs)) { dump_stack(); show_mem(SHOW_MEM_FILTER_NODES); dump_tasks_info(); } #endif min_score_adj = 0; #ifdef MULTIPLE_OOM_KILLER for (i = 0; i < OOM_DEPTH; i++) selected_oom_score_adj[i] = min_score_adj; #else selected_oom_score_adj = min_score_adj; #endif read_lock(&tasklist_lock); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; #ifdef MULTIPLE_OOM_KILLER int is_exist_oom_task = 0; #endif if (tsk->flags & PF_KTHREAD) continue; p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; lowmem_print(2, "oom: ------ %d (%s), adj %d, size %d\n", p->pid, p->comm, oom_score_adj, tasksize); #ifdef MULTIPLE_OOM_KILLER if (all_selected_oom < OOM_DEPTH) { for (i = 0; i < OOM_DEPTH; i++) { if (!selected[i]) { is_exist_oom_task = 1; max_selected_oom_idx = i; break; } } } else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj || (selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj && selected_tasksize[max_selected_oom_idx] < tasksize)) { is_exist_oom_task = 1; } if (is_exist_oom_task) { selected[max_selected_oom_idx] = p; selected_tasksize[max_selected_oom_idx] = tasksize; selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj; if (all_selected_oom < OOM_DEPTH) all_selected_oom++; if (all_selected_oom == OOM_DEPTH) { for (i = 0; i < OOM_DEPTH; i++) { if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx]) max_selected_oom_idx = i; else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] && selected_tasksize[i] < selected_tasksize[max_selected_oom_idx]) max_selected_oom_idx = i; } } lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \ size %d, to kill\n", max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize); } #else if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); #endif } #ifdef MULTIPLE_OOM_KILLER for (i = 0; i < OOM_DEPTH; i++) { if (selected[i]) { #if defined(CONFIG_CMA_PAGE_COUNTING) lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, " "size %d ofree %d ofile %d " "cma_free %lu cma_i_file %lu cma_a_file %lu\n", selected[i]->pid, selected[i]->comm, selected_oom_score_adj[i], selected_tasksize[i], other_free, other_file, nr_cma_free, nr_cma_inactive_file, nr_cma_active_file); #else lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\ size %d\n", selected[i]->pid, selected[i]->comm, selected_oom_score_adj[i], selected_tasksize[i]); #endif send_sig(SIGKILL, selected[i], 0); rem -= selected_tasksize[i]; *freed += (unsigned long)selected_tasksize[i]; #ifdef OOM_COUNT_READ oom_count++; #endif } } #else if (selected) { lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize); send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; *freed += (unsigned long)selected_tasksize; #ifdef OOM_COUNT_READ oom_count++; #endif } #endif read_unlock(&tasklist_lock); lowmem_print(2, "oom: get memory %lu", *freed); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int selected_tasksize = 0; int selected_oom_score_adj; #ifdef CONFIG_SAMP_HOTNESS int selected_hotness_adj = 0; #endif int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; unsigned long nr_to_scan = sc->nr_to_scan; #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 1); #endif unsigned long nr_cma_free; struct reclaim_state *reclaim_state = current->reclaim_state; #if defined(CONFIG_CMA_PAGE_COUNTING) unsigned long nr_cma_inactive_file; unsigned long nr_cma_active_file; unsigned long cma_page_ratio; bool is_active_high; bool flag = 0; #endif if (nr_to_scan > 0) { if (mutex_lock_interruptible(&scan_mutex) < 0) return 0; } other_free = global_page_state(NR_FREE_PAGES); nr_cma_free = global_page_state(NR_FREE_CMA_PAGES); #ifdef CONFIG_ZSWAP if (!current_is_kswapd() || sc->priority <= 6) #endif other_free -= nr_cma_free; #if defined(CONFIG_CMA_PAGE_COUNTING) nr_cma_inactive_file = global_page_state(NR_CMA_INACTIVE_FILE); nr_cma_active_file = global_page_state(NR_CMA_ACTIVE_FILE); cma_page_ratio = 100 * global_page_state(NR_CMA_INACTIVE_FILE) / global_page_state(NR_INACTIVE_FILE); is_active_high = (global_page_state(NR_ACTIVE_FILE) > global_page_state(NR_INACTIVE_FILE)) ? 1 : 0; #endif other_file = global_page_state(NR_FILE_PAGES); #if defined(CONFIG_CMA_PAGE_COUNTING) && defined(CONFIG_EXCLUDE_LRU_LIVING_IN_CMA) if (get_nr_swap_pages() < SSWAP_LMK_THRESHOLD && cma_page_ratio >= CMA_PAGE_RATIO && !is_active_high) { other_file = other_file - (nr_cma_inactive_file + nr_cma_active_file); flag = 1; } #endif if (global_page_state(NR_SHMEM) + total_swapcache_pages < other_file) other_file -= global_page_state(NR_SHMEM) + total_swapcache_pages; else other_file = 0; if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < lowmem_minfree[i] && other_file < lowmem_minfree[i]) { min_score_adj = lowmem_adj[i]; break; } } if (nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); if (nr_to_scan > 0) mutex_unlock(&scan_mutex); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; #ifdef CONFIG_SAMP_HOTNESS int hotness_adj = 0; #endif if (tsk->flags & PF_KTHREAD) continue; /* if task no longer has any memory ignore it */ if (test_task_flag(tsk, TIF_MM_RELEASED)) continue; if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); mutex_unlock(&scan_mutex); return 0; } } p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); #if defined(CONFIG_ZSWAP) if (atomic_read(&zswap_stored_pages)) { lowmem_print(3, "shown tasksize : %d\n", tasksize); tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS) / atomic_read(&zswap_stored_pages); lowmem_print(3, "real tasksize : %d\n", tasksize); } #endif #ifdef CONFIG_SAMP_HOTNESS hotness_adj = p->signal->hotness_adj; #endif task_unlock(p); if (tasksize <= 0) continue; if (selected) { #ifdef CONFIG_SAMP_HOTNESS if (min_score_adj <= lowmem_adj[4]) { #endif if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; #ifdef CONFIG_SAMP_HOTNESS } else { if (hotness_adj > selected_hotness_adj) continue; if (hotness_adj == selected_hotness_adj && tasksize <= selected_tasksize) continue; } #endif } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; #ifdef CONFIG_SAMP_HOTNESS selected_hotness_adj = hotness_adj; #endif lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); } if (selected) { #if defined(CONFIG_CMA_PAGE_COUNTING) #ifdef CONFIG_SAMP_HOTNESS lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "ofree %d, ofile %d(%c), is_kswapd %d - " "cma_free %lu priority %d cma_i_file %lu cma_a_file %lu, hotness %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, flag ? '-' : '+', !!current_is_kswapd(), nr_cma_free, sc->priority, nr_cma_inactive_file, nr_cma_active_file, selected_hotness_adj); #else lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "ofree %d, ofile %d(%c), is_kswapd %d - " "cma_free %lu priority %d cma_i_file %lu cma_a_file %lu\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, flag ? '-' : '+', !!current_is_kswapd(), nr_cma_free, sc->priority, nr_cma_inactive_file, nr_cma_active_file); #endif #else #ifdef CONFIG_SAMP_HOTNESS lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "free memory = %d, reclaimable memory = %d " "is_kswapd %d cma_free %lu priority %d, hotness %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, !!current_is_kswapd(), nr_cma_free, sc->priority, selected_hotness_adj); #else lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, " "free memory = %d, reclaimable memory = %d " "is_kswapd %d cma_free %lu priority %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize, other_free, other_file, !!current_is_kswapd(), nr_cma_free, sc->priority); #endif #endif lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; rcu_read_unlock(); #ifdef LMK_COUNT_READ lmk_count++; #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO if ((selected_oom_score_adj < lowmem_adj[5]) && __ratelimit(&lmk_rs)) { lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); show_mem(SHOW_MEM_FILTER_NODES); dump_tasks_info(); } #endif /* give the system time to free up the memory */ msleep_interruptible(20); if(reclaim_state) reclaim_state->reclaimed_slab = selected_tasksize; } else rcu_read_unlock(); lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); mutex_unlock(&scan_mutex); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; int minfree = 0; int selected_tasksize = 0; short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; unsigned long nr_to_scan = sc->nr_to_scan; #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 1); #endif unsigned long nr_cma_free; if (nr_to_scan > 0) { if (mutex_lock_interruptible(&scan_mutex) < 0) return 0; } other_free = global_page_state(NR_FREE_PAGES); nr_cma_free = global_page_state(NR_FREE_CMA_PAGES); if (!current_is_kswapd() || sc->priority <= 6) other_free -= nr_cma_free; if (global_page_state(NR_SHMEM) + total_swapcache_pages() < global_page_state(NR_FILE_PAGES)) other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages(); else other_file = 0; if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { minfree = lowmem_minfree[i]; if (other_free < minfree && other_file < minfree) { min_score_adj = lowmem_adj[i]; break; } } if (nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); if (nr_to_scan > 0) mutex_unlock(&scan_mutex); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; short oom_score_adj; if (tsk->flags & PF_KTHREAD || tsk->state & TASK_UNINTERRUPTIBLE) continue; /* if task no longer has any memory ignore it */ if (test_task_flag(tsk, TIF_MM_RELEASED)) continue; if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); mutex_unlock(&scan_mutex); return 0; } } p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n", p->comm, p->pid, oom_score_adj, tasksize); } if (selected) { lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \ " to free %ldkB on behalf of '%s' (%d) because\n" \ " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \ " Free memory is %ldkB above reserved.\n" \ " is_kswapd %d priority %d\n" \ " Free CMA is %ldkB\n" \ " Total reserve is %ldkB\n" \ " Total free pages is %ldkB\n" \ " Total file cache is %ldkB\n" \ " Slab Reclaimable is %ldkB\n" \ " Slab UnReclaimable is %ldkB\n" \ " Total Slab is %ldkB\n" \ " GFP mask is 0x%x\n", selected->comm, selected->pid, selected_oom_score_adj, selected_tasksize * (long)(PAGE_SIZE / 1024), current->comm, current->pid, other_file * (long)(PAGE_SIZE / 1024), minfree * (long)(PAGE_SIZE / 1024), min_score_adj, other_free * (long)(PAGE_SIZE / 1024), !!current_is_kswapd(), sc->priority, nr_cma_free * (long)(PAGE_SIZE / 1024), totalreserve_pages * (long)(PAGE_SIZE / 1024), global_page_state(NR_FREE_PAGES) * (long)(PAGE_SIZE / 1024), global_page_state(NR_FILE_PAGES) * (long)(PAGE_SIZE / 1024), global_page_state(NR_SLAB_RECLAIMABLE) * (long)(PAGE_SIZE / 1024), global_page_state(NR_SLAB_UNRECLAIMABLE) * (long)(PAGE_SIZE / 1024), global_page_state(NR_SLAB_RECLAIMABLE) * (long)(PAGE_SIZE / 1024) + global_page_state(NR_SLAB_UNRECLAIMABLE) * (long)(PAGE_SIZE / 1024), sc->gfp_mask); if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) { show_mem(SHOW_MEM_FILTER_NODES); dump_tasks(NULL, NULL); show_mem_call_notifiers(); } lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; rcu_read_unlock(); #ifdef LMK_COUNT_READ lmk_count++; #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO if (__ratelimit(&lmk_rs)) { lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); show_mem(SHOW_MEM_FILTER_NODES); dump_tasks_info(); } #endif /* give the system time to free up the memory */ msleep_interruptible(20); } else rcu_read_unlock(); lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", nr_to_scan, sc->gfp_mask, rem); mutex_unlock(&scan_mutex); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int minfree = 0; int selected_tasksize = 0; int selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free; int other_file; struct reclaim_state *reclaim_state; #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 1); #endif if (sc->nr_to_scan > 0) { if (mutex_lock_interruptible(&scan_mutex) < 0) return 0; } other_free = global_page_state(NR_FREE_PAGES); other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); reclaim_state = current->reclaim_state; #if defined(CONFIG_ZSWAP) other_file -= total_swapcache_pages; #endif if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { minfree = lowmem_minfree[i]; if (other_free < minfree && other_file < minfree) { min_score_adj = lowmem_adj[i]; break; } } if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); if (sc->nr_to_scan > 0) mutex_unlock(&scan_mutex); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; if (tsk->flags & PF_KTHREAD) continue; /* if task no longer has any memory ignore it */ if (test_task_flag(tsk, TIF_MM_RELEASED)) continue; if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); mutex_unlock(&scan_mutex); return 0; } } p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); #if defined(CONFIG_ZSWAP) if (atomic_read(&zswap_stored_pages)) { lowmem_print(3, "shown tasksize : %d\n", tasksize); tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS) / atomic_read(&zswap_stored_pages); lowmem_print(3, "real tasksize : %d\n", tasksize); } #endif task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; lowmem_print(2, "select '%s' (%d), adj %d, size %d, to kill\n", p->comm, p->pid, oom_score_adj, tasksize); } if (selected) { lowmem_print(1, "Killing '%s' (%d), adj %d,\n" \ " to free %ldkB on behalf of '%s' (%d) because\n" \ " cache %ldkB is below limit %ldkB for oom_score_adj %d\n" \ " Free memory is %ldkB above reserved\n", selected->comm, selected->pid, selected_oom_score_adj, selected_tasksize * (long)(PAGE_SIZE / 1024), current->comm, current->pid, other_file * (long)(PAGE_SIZE / 1024), minfree * (long)(PAGE_SIZE / 1024), min_score_adj, other_free * (long)(PAGE_SIZE / 1024)); lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; rcu_read_unlock(); #ifdef LMK_COUNT_READ lmk_count++; #endif #ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO if (__ratelimit(&lmk_rs)) { dump_tasks_info(); } #endif /* give the system time to free up the memory */ msleep_interruptible(20); if(reclaim_state) reclaim_state->reclaimed_slab += selected_tasksize; } else rcu_read_unlock(); lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); mutex_unlock(&scan_mutex); return rem; }