static enum oom_constraint constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask, nodemask_t *nodemask, unsigned long *totalpages) { struct zone *zone; struct zoneref *z; enum zone_type high_zoneidx = gfp_zone(gfp_mask); bool cpuset_limited = false; int nid; /* Default to all available memory */ *totalpages = totalram_pages + total_swap_pages; if (!zonelist) return CONSTRAINT_NONE; /* * Reach here only when __GFP_NOFAIL is used. So, we should avoid * to kill current.We have to random task kill in this case. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. */ if (gfp_mask & __GFP_THISNODE) return CONSTRAINT_NONE; /* * This is not a __GFP_THISNODE allocation, so a truncated nodemask in * the page allocator means a mempolicy is in effect. Cpuset policy * is enforced in get_page_from_freelist(). */ if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) { *totalpages = total_swap_pages; for_each_node_mask(nid, *nodemask) *totalpages += node_spanned_pages(nid); return CONSTRAINT_MEMORY_POLICY; } /* Check this allocation failure is caused by cpuset's wall function */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) cpuset_limited = true; if (cpuset_limited) { *totalpages = total_swap_pages; for_each_node_mask(nid, cpuset_current_mems_allowed) *totalpages += node_spanned_pages(nid); return CONSTRAINT_CPUSET; } return CONSTRAINT_NONE; }
void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) { gfp_t gfp_mask; struct zone *preferred_zone; struct zonelist *zonelist; enum zone_type high_zoneidx, classzone_idx; unsigned long balance_gap; gfp_mask = sc->gfp_mask; zonelist = node_zonelist(0, gfp_mask); high_zoneidx = gfp_zone(gfp_mask); first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); classzone_idx = zone_idx(preferred_zone); balance_gap = min(low_wmark_pages(preferred_zone), (preferred_zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0, high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX + balance_gap, 0, 0))) { if (lmk_fast_run) tune_lmk_zone_param(zonelist, classzone_idx, other_free, other_file); else tune_lmk_zone_param(zonelist, classzone_idx, other_free, NULL); if (zone_watermark_ok(preferred_zone, 0, 0, ZONE_HIGHMEM, 0)) *other_free -= preferred_zone->lowmem_reserve[ZONE_HIGHMEM]; else *other_free -= zone_page_state(preferred_zone, NR_FREE_PAGES); lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem " "ofree %d, %d\n", *other_free, *other_file); } else { tune_lmk_zone_param(zonelist, classzone_idx, other_free, other_file); lowmem_print(4, "lowmem_shrink tunning for others ofree %d, " "%d\n", *other_free, *other_file); } }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; int minfree = 0; int selected_tasksize = 0; short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); int print_extra_info = 0; static unsigned long lowmem_print_extra_info_timeout = 0; #ifdef CONFIG_MTK_GMO_RAM_OPTIMIZE int other_anon = global_page_state(NR_INACTIVE_ANON) - global_page_state(NR_ACTIVE_ANON); #endif #ifdef CONFIG_MT_ENG_BUILD /*dump memory info when framework low memory*/ int pid_dump = -1; // process which need to be dump //int pid_sec_mem = -1; int max_mem = 0; static int pid_flm_warn = -1; static unsigned long flm_warn_timeout = 0; #endif // CONFIG_MT_ENG_BUILD /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) return -1; /* We are in MTKPASR stage! */ if (unlikely(current->flags & PF_MTKPASR)) { return -1; } if (!spin_trylock(&lowmem_shrink_lock)){ lowmem_print(4, "lowmem_shrink lock faild\n"); return -1; } #ifdef CONFIG_ZRAM other_file -= total_swapcache_pages(); #endif #ifdef CONFIG_HIGHMEM /* * Check whether it is caused by low memory in normal zone! * This will help solve over-reclaiming situation while total free pages is enough, but normal zone is under low memory. */ if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) { int nid; struct zone *z; /* Restore other_free */ other_free += totalreserve_pages; /* Go through all memory nodes & substract (free, file) from ZONE_HIGHMEM */ for_each_online_node(nid) { z = &NODE_DATA(nid)->node_zones[ZONE_HIGHMEM]; other_free -= zone_page_state(z, NR_FREE_PAGES); other_file -= zone_page_state(z, NR_FILE_PAGES); /* Don't substract NR_SHMEM twice! */ other_file += zone_page_state(z, NR_SHMEM); /* Subtract high watermark of normal zone */ z = &NODE_DATA(nid)->node_zones[ZONE_NORMAL]; other_free -= high_wmark_pages(z); } /* Normalize */ other_free *= total_low_ratio; other_file *= total_low_ratio; }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; int i; int min_score_adj = OOM_SCORE_ADJ_MAX + 1; int minfree = 0; int selected_tasksize = 0; int selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM); #ifdef CONFIG_MT_ENG_BUILD int print_extra_info = 0; static unsigned long lowmem_print_extra_info_timeout = 0; /*dump memory info when framework low memory*/ int pid_dump = -1; // process which need to be dump int pid_sec_mem = -1; int max_mem = 0; #endif // CONFIG_MT_ENG_BUILD /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan * that we have nothing further to offer on * this pass. * */ if (lowmem_deathpending && time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; #ifdef CONFIG_MT_ENG_BUILD add_kmem_status_lmk_counter(); #endif #ifdef CONFIG_SWAP other_file -= total_swapcache_pages; #endif #ifdef CONFIG_HIGHMEM /* * Check whether it is caused by low memory in normal zone! * This will help solve over-reclaiming situation while total free pages is enough, but normal zone is under low memory. */ if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) { int nid; struct zone *z; /* Go through all memory nodes & substract (free, file) from ZONE_HIGHMEM */ for_each_online_node(nid) { z = &NODE_DATA(nid)->node_zones[ZONE_HIGHMEM]; other_free -= zone_page_state(z, NR_FREE_PAGES); other_file -= zone_page_state(z, NR_FILE_PAGES); /* Don't substract it twice! */ other_file += zone_page_state(z, NR_SHMEM); } other_free *= total_low_ratio; other_file *= total_low_ratio; }