static ssize_t show_valid_zones(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); unsigned long start_pfn, end_pfn; unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; struct page *first_page; struct zone *zone; start_pfn = section_nr_to_pfn(mem->start_section_nr); end_pfn = start_pfn + nr_pages; first_page = pfn_to_page(start_pfn); /* The block contains more than one zone can not be offlined. */ if (!test_pages_in_a_zone(start_pfn, end_pfn)) return sprintf(buf, "none\n"); zone = page_zone(first_page); if (zone_idx(zone) == ZONE_MOVABLE - 1) { /*The mem block is the last memoryblock of this zone.*/ if (end_pfn == zone_end_pfn(zone)) return sprintf(buf, "%s %s\n", zone->name, (zone + 1)->name); } if (zone_idx(zone) == ZONE_MOVABLE) { /*The mem block is the first memoryblock of ZONE_MOVABLE.*/ if (start_pfn == zone->zone_start_pfn) return sprintf(buf, "%s %s\n", zone->name, (zone - 1)->name); } return sprintf(buf, "%s\n", zone->name); }
static void __init set_non_bootmem_pages_init(void) { struct zone *z; for_each_zone(z) { unsigned long start, end; int nid = z->zone_pgdat->node_id; #ifdef CONFIG_HIGHMEM int idx = zone_idx(z); #endif start = z->zone_start_pfn; end = start + z->spanned_pages; start = max(start, node_free_pfn[nid]); start = max(start, max_low_pfn); #ifdef CONFIG_HIGHMEM if (idx == ZONE_HIGHMEM) totalhigh_pages += z->spanned_pages; #endif if (kdata_huge) { unsigned long percpu_pfn = node_percpu_pfn[nid]; if (start < percpu_pfn && end > percpu_pfn) end = percpu_pfn; } #ifdef CONFIG_PCI if (start <= pci_reserve_start_pfn && end > pci_reserve_start_pfn) { if (end > pci_reserve_end_pfn) init_free_pfn_range(pci_reserve_end_pfn, end); end = pci_reserve_start_pfn; } #endif init_free_pfn_range(start, end); } }
static void *pack_shadow(unsigned long eviction, struct zone *zone) { eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone); eviction = (eviction << ZONES_SHIFT) | zone_idx(zone); eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *p; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_adj = OOM_ADJUST_MAX + 1; int selected_tasksize = 0; int selected_oom_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); int lru_file = global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_FILE); struct zone *zone; int fork_boost; int *adj_array; if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) { dump_deathpending(lowmem_deathpending); return 0; } if (lowmem_fork_boost && time_before_eq(jiffies, lowmem_fork_boost_timeout)) { fork_boost = lowmem_minfree[lowmem_minfree_size - 1] >> discount; if (unlikely(other_file < fork_boost)) other_file = 0; else other_file -= fork_boost; adj_array = fork_boost_adj; lowmem_print(3, "lowmem_shrink other_file: %d, fork_boost: %d\n", other_file, fork_boost); }
static int check_classzone_need_balance(zone_t * classzone) { zone_t * first_zone; int class_idx = zone_idx(classzone); first_zone = classzone->zone_pgdat->node_zones; while (classzone >= first_zone) { if (classzone->free_pages > classzone->watermarks[class_idx].high) return 0; classzone--; } return 1; }
void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) { gfp_t gfp_mask; struct zone *preferred_zone; struct zonelist *zonelist; enum zone_type high_zoneidx, classzone_idx; unsigned long balance_gap; gfp_mask = sc->gfp_mask; zonelist = node_zonelist(0, gfp_mask); high_zoneidx = gfp_zone(gfp_mask); first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); classzone_idx = zone_idx(preferred_zone); balance_gap = min(low_wmark_pages(preferred_zone), (preferred_zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0, high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX + balance_gap, 0, 0))) { if (lmk_fast_run) tune_lmk_zone_param(zonelist, classzone_idx, other_free, other_file); else tune_lmk_zone_param(zonelist, classzone_idx, other_free, NULL); if (zone_watermark_ok(preferred_zone, 0, 0, ZONE_HIGHMEM, 0)) *other_free -= preferred_zone->lowmem_reserve[ZONE_HIGHMEM]; else *other_free -= zone_page_state(preferred_zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem " "ofree %d, %d\n", *other_free, *other_file); } else { tune_lmk_zone_param(zonelist, classzone_idx, other_free, other_file); lowmem_print(4, "lowmem_shrink tunning for others ofree %d, " "%d\n", *other_free, *other_file); } }
static inline void get_free_ram(int *other_free, int *other_file) { struct zone *zone; *other_free = global_page_state(NR_FREE_PAGES); *other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { *other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } }
int arch_add_memory(int nid, u64 start, u64 size, bool for_device) { unsigned long zone_start_pfn, zone_end_pfn, nr_pages; unsigned long start_pfn = PFN_DOWN(start); unsigned long size_pages = PFN_DOWN(size); struct zone *zone; int rc; rc = vmem_add_mapping(start, size); if (rc) return rc; for_each_zone(zone) { if (zone_idx(zone) != ZONE_MOVABLE) { /* Add range within existing zone limits */ zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; } else { /* Add remaining range to ZONE_MOVABLE */ zone_start_pfn = start_pfn; zone_end_pfn = start_pfn + size_pages; } if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) continue; nr_pages = (start_pfn + size_pages > zone_end_pfn) ? zone_end_pfn - start_pfn : size_pages; rc = __add_pages(nid, zone, start_pfn, nr_pages); if (rc) break; start_pfn += nr_pages; size_pages -= nr_pages; if (!size_pages) break; } if (rc) vmem_remove_mapping(start, size); return rc; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; #ifdef ENHANCED_LMK_ROUTINE struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,}; #else struct task_struct *selected = NULL; #endif int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; #ifdef ENHANCED_LMK_ROUTINE int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,}; int selected_oom_score_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,}; int all_selected_oom = 0; int max_selected_oom_idx = 0; #else int selected_tasksize = 0; int selected_oom_score_adj; #endif #ifdef CONFIG_SAMP_HOTNESS int selected_hotness_adj = 0; #endif int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); struct zone *zone; #if defined(CONFIG_ZRAM_FOR_ANDROID) || defined(CONFIG_ZSWAP) other_file -= total_swapcache_pages; #endif if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * * Note: Currently you need CONFIG_PROFILING * for this to work correctly. */ #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (lowmem_deathpending[i] && time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; } #else if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; #endif if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < lowmem_minfree[i] && other_file < lowmem_minfree[i]) { min_score_adj = lowmem_adj[i]; break; } } if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) selected_oom_score_adj[i] = min_score_adj; #else selected_oom_score_adj = min_score_adj; #endif #ifdef CONFIG_ZRAM_FOR_ANDROID atomic_set(&s_reclaim.lmk_running, 1); #endif read_lock(&tasklist_lock); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; #ifdef ENHANCED_LMK_ROUTINE int is_exist_oom_task = 0; #endif #ifdef CONFIG_SAMP_HOTNESS int hotness_adj = 0; #endif if (tsk->flags & PF_KTHREAD) continue; p = find_lock_task_mm(tsk); if (!p) continue; oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); #ifdef CONFIG_SAMP_HOTNESS hotness_adj = p->signal->hotness_adj; #endif task_unlock(p); if (tasksize <= 0) continue; #ifdef ENHANCED_LMK_ROUTINE if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) { for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (!selected[i]) { is_exist_oom_task = 1; max_selected_oom_idx = i; break; } } } else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj || (selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj && selected_tasksize[max_selected_oom_idx] < tasksize)) { is_exist_oom_task = 1; } if (is_exist_oom_task) { selected[max_selected_oom_idx] = p; selected_tasksize[max_selected_oom_idx] = tasksize; selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj; if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) all_selected_oom++; if (all_selected_oom == LOWMEM_DEATHPENDING_DEPTH) { for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx]) max_selected_oom_idx = i; else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] && selected_tasksize[i] < selected_tasksize[max_selected_oom_idx]) max_selected_oom_idx = i; } } lowmem_print(2, "select %d (%s), adj %d, \ size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); } #else if (selected) { #ifdef CONFIG_SAMP_HOTNESS if (min_score_adj <= lowmem_adj[4]) { #endif if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; #ifdef CONFIG_SAMP_HOTNESS } else { if (hotness_adj > selected_hotness_adj) continue; if (hotness_adj == selected_hotness_adj && tasksize <= selected_tasksize) continue; } #endif } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; #ifdef CONFIG_SAMP_HOTNESS selected_hotness_adj = hotness_adj; #endif lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); #endif } #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (selected[i]) { #ifdef CONFIG_SAMP_HOTNESS lowmem_print(1, "send sigkill to %d (%s), adj %d,\ size %d ,hotness %d\n", selected[i]->pid, selected[i]->comm, selected_oom_score_adj[i], selected_tasksize[i], selected_hotness_adj); #else lowmem_print(1, "send sigkill to %d (%s), adj %d,\ size %d\n", selected[i]->pid, selected[i]->comm, selected_oom_score_adj[i], selected_tasksize[i]); #endif lowmem_deathpending[i] = selected[i]; lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected[i], 0); rem -= selected_tasksize[i]; #ifdef LMK_COUNT_READ lmk_count++; #endif } } #else if (selected) { #ifdef CONFIG_SAMP_HOTNESS lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d ,hotness %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize,selected_hotness_adj); #else lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize); #endif lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; #ifdef LMK_COUNT_READ lmk_count++; #endif } #endif lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); read_unlock(&tasklist_lock); #ifdef CONFIG_ZRAM_FOR_ANDROID atomic_set(&s_reclaim.lmk_running, 0); #endif return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_adj = OOM_ADJUST_MAX + 1; int selected_tasksize = 0; int selected_oom_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - global_page_state(NR_MLOCK); int fork_boost = 0; int *adj_array; size_t *min_array; struct zone *zone; if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; if (lowmem_fork_boost && time_before_eq(jiffies, lowmem_fork_boost_timeout)) { for (i = 0; i < lowmem_minfree_size; i++) minfree_tmp[i] = lowmem_minfree[i] + lowmem_fork_boost_minfree[i] ; adj_array = fork_boost_adj; min_array = minfree_tmp; } else { adj_array = lowmem_adj; min_array = lowmem_minfree; } if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < min_array[i] && (other_file < min_array[i] || !shrink_cache_possible(sc->gfp_mask))) { min_adj = adj_array[i]; fork_boost = lowmem_fork_boost_minfree[i]; break; } } if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } selected_oom_adj = min_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; int oom_adj; if (tsk->flags & PF_KTHREAD) continue; p = find_lock_task_mm(tsk); if (!p) continue; oom_adj = p->signal->oom_adj; if (oom_adj < min_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_adj < selected_oom_adj) continue; if (oom_adj == selected_oom_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_adj = oom_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_adj, tasksize); } if (selected) { if (last_min_adj > selected_oom_adj && (selected_oom_adj == 12 || selected_oom_adj == 9 || selected_oom_adj == 7)) { last_min_adj = selected_oom_adj; lowmem_print(1, "lowmem_shrink: monitor memory status at selected_oom_adj=%d\n", selected_oom_adj); show_meminfo(); dump_tasks(); } lowmem_print(1, "[%s] send sigkill to %d (%s), adj %d, size %dK, min_adj=%d," " free=%dK, file=%dK, fork_boost=%d\n", current->comm, selected->pid, selected->comm, selected_oom_adj, selected_tasksize << 2, min_adj, other_free << 2, other_file << 2, fork_boost << 2); lowmem_deathpending = selected; lowmem_deathpending_timeout = jiffies + HZ; if (selected_oom_adj < 7) { show_meminfo(); dump_tasks(); } send_sig(SIGKILL, selected, 0); rem -= selected_tasksize; } lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); rcu_read_unlock(); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *p; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_adj = OOM_ADJUST_MAX + 1; int selected_tasksize = 0; int selected_oom_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); struct zone *zone; if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < lowmem_minfree[i] && other_file < lowmem_minfree[i]) { min_adj = lowmem_adj[i]; break; } } if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } selected_oom_adj = min_adj; read_lock(&tasklist_lock); for_each_process(p) { struct mm_struct *mm; struct signal_struct *sig; int oom_adj; task_lock(p); mm = p->mm; sig = p->signal; if (!mm || !sig) { task_unlock(p); continue; } oom_adj = sig->oom_adj; if (oom_adj < min_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_adj < selected_oom_adj) continue; if (oom_adj == selected_oom_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_adj = oom_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_adj, tasksize); } if (selected) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_adj, selected_tasksize); lowmem_deathpending = selected; lowmem_deathpending_timeout = jiffies + HZ; force_sig(SIGKILL, selected); rem -= selected_tasksize; } lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); read_unlock(&tasklist_lock); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *p; #ifdef ENHANCED_LMK_ROUTINE struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,}; #else struct task_struct *selected = NULL; #endif int rem = 0; int tasksize; int i; int min_adj = OOM_ADJUST_MAX + 1; #ifdef ENHANCED_LMK_ROUTINE int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,}; int selected_oom_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,}; int all_selected_oom = 0; int bSelected=0; int kicked_out_size=0x7FFFFFFF; int kicked_out_idx=-1; #else int selected_tasksize = 0; int selected_oom_adj; #endif int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); struct zone *zone; if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (lowmem_deathpending[i] && time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; } #else if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; #endif if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < lowmem_minfree[i] && other_file < lowmem_minfree[i]) { min_adj = lowmem_adj[i]; break; } } if (min_adj == OOM_ADJUST_MAX + 1) return 0; if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) selected_oom_adj[i] = min_adj; #else selected_oom_adj = min_adj; #endif read_lock(&tasklist_lock); for_each_process(p) { struct mm_struct *mm; struct signal_struct *sig; int oom_adj; task_lock(p); mm = p->mm; sig = p->signal; if (!mm || !sig) { task_unlock(p); continue; } oom_adj = sig->oom_adj; if (oom_adj < min_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(mm); task_unlock(p); if (tasksize <= 0) continue; #ifdef ENHANCED_LMK_ROUTINE bSelected=0; for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { /* Check if slots are full */ if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) { if (selected[i]) continue; all_selected_oom++; } else if (oom_adj <= selected_oom_adj[i]) continue; selected[i] = p; selected_tasksize[i] = tasksize; selected_oom_adj[i] = oom_adj; lowmem_print(2, "select[%d] %d (%s), adj %d, size %d, to kill\n", i,p->pid, p->comm, oom_adj, tasksize); bSelected=1; break; } /* Find the smallest size slot among the same oom_adj slots */ kicked_out_idx=-1; kicked_out_size=0x7FFFFFFF; for (i = 0; !bSelected && i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (selected[i]) { if (oom_adj == selected_oom_adj[i] && selected_tasksize[i]<kicked_out_size) { kicked_out_idx=i; kicked_out_size=selected_tasksize[i]; } } } /* Replace the smallest size slot with the bigger one.*/ if (kicked_out_idx != -1 && tasksize > kicked_out_size) { selected[kicked_out_idx] = p; selected_tasksize[kicked_out_idx] = tasksize; selected_oom_adj[kicked_out_idx] = oom_adj; lowmem_print(2, "reselect[%d] %d (%s), adj %d, size %d, to kill\n", kicked_out_idx,p->pid, p->comm, oom_adj, tasksize); } #else if (selected) { if (oom_adj < selected_oom_adj) continue; if (oom_adj == selected_oom_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_adj = oom_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_adj, tasksize); #endif } #ifdef ENHANCED_LMK_ROUTINE for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) { if (selected[i]) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected[i]->pid, selected[i]->comm, selected_oom_adj[i], selected_tasksize[i]); lowmem_deathpending[i] = selected[i]; lowmem_deathpending_timeout = jiffies + HZ; force_sig(SIGKILL, selected[i]); rem -= selected_tasksize[i]; } } #else if (selected) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_adj, selected_tasksize); lowmem_deathpending = selected; lowmem_deathpending_timeout = jiffies + HZ; force_sig(SIGKILL, selected); rem -= selected_tasksize; } #endif lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); read_unlock(&tasklist_lock); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int selected_tasksize = 0; int selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); int lru_file = global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_FILE); struct zone *zone; if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } #ifdef CONFIG_SWAP if(fudgeswap != 0){ struct sysinfo si; si_swapinfo(&si); if(si.freeswap > 0){ if(fudgeswap > si.freeswap) other_file += si.freeswap; else other_file += fudgeswap; } } #endif if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < lowmem_minfree[i]) { if (other_file < lowmem_minfree[i] || (lowmem_check_filepages && (lru_file < lowmem_minfile[i]))) { min_score_adj = lowmem_adj[i]; break; } } } if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_score_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0 || min_score_adj == OOM_ADJUST_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } selected_oom_score_adj = min_score_adj; rcu_read_lock(); for_each_process(tsk) { struct task_struct *p; int oom_score_adj; if (tsk->flags & PF_KTHREAD) continue; p = find_lock_task_mm(tsk); if (!p) continue; if (test_tsk_thread_flag(p, TIF_MEMDIE) && time_before_eq(jiffies, lowmem_deathpending_timeout)) { task_unlock(p); rcu_read_unlock(); return 0; } oom_score_adj = p->signal->oom_score_adj; if (oom_score_adj < min_score_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_score_adj < selected_oom_score_adj) continue; if (oom_score_adj == selected_oom_score_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_score_adj, tasksize); } if (selected) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_score_adj, selected_tasksize); lowmem_deathpending_timeout = jiffies + HZ; send_sig(SIGKILL, selected, 0); set_tsk_thread_flag(selected, TIF_MEMDIE); rem -= selected_tasksize; } lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); rcu_read_unlock(); return rem; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *p; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_adj = OOM_ADJUST_MAX + 1; int selected_tasksize = 0; int selected_oom_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); int lru_file = global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_FILE); struct zone *zone; int fork_boost = 0; size_t minfree_boosted[6] = {0, 0, 0, 0, 0, 0}; size_t *min_array; int *adj_array; if (offlining) { /* Discount all free space in the section being offlined */ for_each_zone(zone) { if (zone_idx(zone) == ZONE_MOVABLE) { other_free -= zone_page_state(zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink discounted " "%lu pages in movable zone\n", zone_page_state(zone, NR_FREE_PAGES)); } } } /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) { dump_deathpending(lowmem_deathpending); return 0; } if (lowmem_fork_boost && time_before_eq(jiffies, lowmem_fork_boost_timeout)) { for (i = 0; i < lowmem_minfree_size; i++) minfree_boosted[i] = lowmem_minfree[i] + lowmem_fork_boost_minfree[i] ; /* Switch to fork_boost adj/minfree within boost_duration */ adj_array = fork_boost_adj; min_array = minfree_boosted; } else { adj_array = lowmem_adj; min_array = lowmem_minfree; } #ifdef CONFIG_SWAP if(fudgeswap != 0){ struct sysinfo si; si_swapinfo(&si); if(si.freeswap > 0){ if(fudgeswap > si.freeswap) other_file += si.freeswap; else other_file += fudgeswap; } } #endif if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { if (other_free < min_array[i]) { if (other_file < min_array[i] || (lowmem_check_filepages && (lru_file < min_array[i]))) { min_adj = adj_array[i]; fork_boost = lowmem_fork_boost_minfree[i]; break; } } } if (sc->nr_to_scan > 0) lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); return rem; } selected_oom_adj = min_adj; read_lock(&tasklist_lock); for_each_process(p) { struct mm_struct *mm; struct signal_struct *sig; int oom_adj; task_lock(p); mm = p->mm; sig = p->signal; if (!mm || !sig) { task_unlock(p); continue; } oom_adj = sig->oom_adj; if (oom_adj < min_adj) { task_unlock(p); continue; } tasksize = get_mm_rss(mm); task_unlock(p); if (tasksize <= 0) continue; if (selected) { if (oom_adj < selected_oom_adj) continue; if (oom_adj == selected_oom_adj && tasksize <= selected_tasksize) continue; } selected = p; selected_tasksize = tasksize; selected_oom_adj = oom_adj; lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_adj, tasksize); } if (selected) { lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_adj, selected_tasksize); if (lowmem_fork_boost) lowmem_print(1, "current=[%s], min_adj=%d," " reclaim=%dK, free=%dK," " file=%dK, fork_boost=%dK\n", current->comm, min_adj, selected_tasksize << 2, other_free << 2, other_file << 2, fork_boost << 2); /* Show memory info if we reach certain adjs */ if (selected_oom_adj < last_min_selected_adj && (selected_oom_adj == 12 || selected_oom_adj == 9 || selected_oom_adj == 7)) { last_min_selected_adj = selected_oom_adj; show_mem(SHOW_MEM_FILTER_NODES); dump_tasks(); } /* Dump tasks if we hit low-memory condition */ if (selected_oom_adj < 7) { show_mem(SHOW_MEM_FILTER_NODES); dump_tasks(); } lowmem_deathpending = selected; lowmem_deathpending_timeout = jiffies + HZ; force_sig(SIGKILL, selected); rem -= selected_tasksize; } lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", sc->nr_to_scan, sc->gfp_mask, rem); read_unlock(&tasklist_lock); return rem; }