Ejemplo n.º 1
0
/*
 * Refresh the thresholds for each zone.
 */
void refresh_zone_stat_thresholds(void)
{
	struct zone *zone;
	int cpu;
	int threshold;

	for_each_populated_zone(zone) {
		unsigned long max_drift, tolerate_drift;

		threshold = calculate_normal_threshold(zone);

		for_each_online_cpu(cpu)
			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
							= threshold;

		/*
		 * Only set percpu_drift_mark if there is a danger that
		 * NR_FREE_PAGES reports the low watermark is ok when in fact
		 * the min watermark could be breached by an allocation
		 */
		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
		max_drift = num_online_cpus() * threshold;
		if (max_drift > tolerate_drift)
			zone->percpu_drift_mark = high_wmark_pages(zone) +
					max_drift;
	}
}
Ejemplo n.º 2
0
void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
{
	gfp_t gfp_mask;
	struct zone *preferred_zone;
	struct zonelist *zonelist;
	enum zone_type high_zoneidx, classzone_idx;
	unsigned long balance_gap;

	gfp_mask = sc->gfp_mask;
	zonelist = node_zonelist(0, gfp_mask);
	high_zoneidx = gfp_zone(gfp_mask);
	first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
	classzone_idx = zone_idx(preferred_zone);

	balance_gap = min(low_wmark_pages(preferred_zone),
			  (preferred_zone->present_pages +
			   KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
			   KSWAPD_ZONE_BALANCE_GAP_RATIO);

	if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
			  high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
			  balance_gap, 0, 0))) {
		if (lmk_fast_run)
			tune_lmk_zone_param(zonelist, classzone_idx, other_free,
				       other_file);
		else
			tune_lmk_zone_param(zonelist, classzone_idx, other_free,
				       NULL);

		if (zone_watermark_ok(preferred_zone, 0, 0, ZONE_HIGHMEM, 0))
			*other_free -=
				  preferred_zone->lowmem_reserve[ZONE_HIGHMEM];
		else
			*other_free -= zone_page_state(preferred_zone,
						      NR_FREE_PAGES);

		lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
			     "ofree %d, %d\n", *other_free, *other_file);
	} else {
		tune_lmk_zone_param(zonelist, classzone_idx, other_free,
			       other_file);

		lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
			     "%d\n", *other_free, *other_file);
	}
}
Ejemplo n.º 3
0
/*
 * Refresh the thresholds for each zone.
 */
void refresh_zone_stat_thresholds(void)
{
	struct pglist_data *pgdat;
	struct zone *zone;
	int cpu;
	int threshold;

	/* Zero current pgdat thresholds */
	for_each_online_pgdat(pgdat) {
		for_each_online_cpu(cpu) {
			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
		}
	}

	for_each_populated_zone(zone) {
		struct pglist_data *pgdat = zone->zone_pgdat;
		unsigned long max_drift, tolerate_drift;

		threshold = calculate_normal_threshold(zone);

		for_each_online_cpu(cpu) {
			int pgdat_threshold;

			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
							= threshold;

			/* Base nodestat threshold on the largest populated zone. */
			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
				= max(threshold, pgdat_threshold);
		}

		/*
		 * Only set percpu_drift_mark if there is a danger that
		 * NR_FREE_PAGES reports the low watermark is ok when in fact
		 * the min watermark could be breached by an allocation
		 */
		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
		max_drift = num_online_cpus() * threshold;
		if (max_drift > tolerate_drift)
			zone->percpu_drift_mark = high_wmark_pages(zone) +
					max_drift;
	}
}
Ejemplo n.º 4
0
void refresh_zone_stat_thresholds(void)
{
	struct zone *zone;
	int cpu;
	int threshold;

	for_each_populated_zone(zone) {
		unsigned long max_drift, tolerate_drift;

		threshold = calculate_normal_threshold(zone);

		for_each_online_cpu(cpu)
			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
							= threshold;

		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
		max_drift = num_online_cpus() * threshold;
		if (max_drift > tolerate_drift)
			zone->percpu_drift_mark = high_wmark_pages(zone) +
					max_drift;
	}
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	int rem = 0;
	int tasksize;
	int i;
	short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int minfree = 0;
	int selected_tasksize = 0;
	short selected_oom_score_adj;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
	int other_file = global_page_state(NR_FILE_PAGES) -
						global_page_state(NR_SHMEM);

	int print_extra_info = 0;
	static unsigned long lowmem_print_extra_info_timeout = 0;

#ifdef CONFIG_MTK_GMO_RAM_OPTIMIZE
	int other_anon = global_page_state(NR_INACTIVE_ANON) - global_page_state(NR_ACTIVE_ANON);
#endif
#ifdef CONFIG_MT_ENG_BUILD	
	/*dump memory info when framework low memory*/
	int pid_dump = -1; // process which need to be dump
	//int pid_sec_mem = -1;
	int max_mem = 0;
	static int pid_flm_warn = -1;
	static unsigned long flm_warn_timeout = 0;
#endif // CONFIG_MT_ENG_BUILD
        /*
	 * If we already have a death outstanding, then
	 * bail out right away; indicating to vmscan
	 * that we have nothing further to offer on
	 * this pass.
	 *
	 */
	if (lowmem_deathpending &&
	    time_before_eq(jiffies, lowmem_deathpending_timeout))
		return -1;
	
	/* We are in MTKPASR stage! */
	if (unlikely(current->flags & PF_MTKPASR)) {
		return -1;
	}
		
	if (!spin_trylock(&lowmem_shrink_lock)){
	    lowmem_print(4, "lowmem_shrink lock faild\n");
	    return -1;
	}

#ifdef CONFIG_ZRAM
	other_file -= total_swapcache_pages();
#endif

#ifdef CONFIG_HIGHMEM
    	/* 
	 * Check whether it is caused by low memory in normal zone!
	 * This will help solve over-reclaiming situation while total free pages is enough, but normal zone is under low memory.
	 */
	if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) {
		int nid;
		struct zone *z;

		/* Restore other_free */
		other_free += totalreserve_pages;

		/* Go through all memory nodes & substract (free, file) from ZONE_HIGHMEM */
		for_each_online_node(nid) {
			z = &NODE_DATA(nid)->node_zones[ZONE_HIGHMEM];
			other_free -= zone_page_state(z, NR_FREE_PAGES);
			other_file -= zone_page_state(z, NR_FILE_PAGES);
			/* Don't substract NR_SHMEM twice! */
			other_file += zone_page_state(z, NR_SHMEM);
			/* Subtract high watermark of normal zone */
			z = &NODE_DATA(nid)->node_zones[ZONE_NORMAL];
			other_free -= high_wmark_pages(z);
		}

		/* Normalize */
		other_free *= total_low_ratio;
		other_file *= total_low_ratio;
	}