Ejemplo n.º 1
0
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
	int i;
	unsigned int load[8], avg_load[8];

	switch(endurance_level)
	{
	case 0:
		core_limit = NR_CPUS;
	break;
	case 1:
		core_limit = NR_CPUS / 2;
	break;
	case 2:
		core_limit = NR_CPUS / 4;
	break;
	default:
		core_limit = NR_CPUS;
	break;
	}

	for(i = 0 ; i < core_limit; i++)
	{
		if(cpu_online(i))
			load[i] = get_curr_load(i);
		else
			load[i] = 0;

		avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
		last_load[i] = load[i];
	}

	for(i = 0 ; i < core_limit; i++)
	{
	if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1))
	{
	if(DEBUG)
		pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i);
		if(!((i+1) > 7)) {
			last_time[i+1] = ktime_to_ms(ktime_get());
			cpu_up(i+1);
		}
	}
	else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1))
	{
		if(DEBUG)
			pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i);
			if(!(i+1)==0) {
				now[i+1] = ktime_to_ms(ktime_get());
				if((now[i+1] - last_time[i+1]) > MIN_CPU_UP_TIME)
					cpu_down(i+1);
			}
		}
	}
#ifdef CONFIG_USES_MALI_MP2_GPU
	if(gpu_hotplug_enabled) {
		if(DEBUG)
			pr_info("%s: current gpu load %d\n", THUNDERPLUG, get_gpu_load());
		if(get_gpu_load() > gpu_min_load_threshold) {
			if(get_gpu_cores_enabled() < 2) {
				enable_gpu_cores(2);
				if(DEBUG)
					pr_info("%s: gpu1 onlined\n", THUNDERPLUG);
			}
		}
		else {
			if(get_gpu_cores_enabled() > 1) {
				enable_gpu_cores(1);
				if(DEBUG)
					pr_info("%s: gpu1 offlined\n", THUNDERPLUG);
			}
		}
	}
#endif

#ifdef CONFIG_SCHED_HMP
    if(tplug_hp_style == 1 && !isSuspended)
#else
	if(tplug_hp_enabled != 0 && !isSuspended)
#endif
		queue_delayed_work_on(0, tplug_wq, &tplug_work,
			msecs_to_jiffies(sampling_time));
	else {
		if(!isSuspended)
			cpus_online_all();
		else
			thunderplug_suspend();
	}

}
Ejemplo n.º 2
0
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
	int i;
	unsigned int load[8], avg_load[8];

	switch(endurance_level)
	{
	case 0:
		core_limit = 8;
	break;
	case 1:
		core_limit = 4;
	break;
	case 2:
		core_limit = 2;
	break;
	default:
		core_limit = 8;
	break;
	}

	for(i = 0 ; i < core_limit; i++)
	{
		if(cpu_online(i))
			load[i] = get_curr_load(i);
		else
			load[i] = 0;

		avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
		last_load[i] = load[i];
	}

	for(i = 0 ; i < core_limit; i++)
	{
	if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1))
	{
	if(DEBUG)
		pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i);
		if(!((i+1) > 7))
			cpu_up(i+1);
	}
	else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1))
	{
	if(DEBUG)
		pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i);
		if(!(i+1)==0)
			cpu_down(i+1);
	}
	}

	if(tplug_hp_enabled != 0 && !isSuspended)
		queue_delayed_work_on(0, tplug_wq, &tplug_work,
			msecs_to_jiffies(sampling_time));
	else {
		if(!isSuspended)
			cpus_online_all();
		else
			thunderplug_suspend();
	}

}
Ejemplo n.º 3
0
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
    int i,j;
    unsigned int load[6], avg_load[6];
    unsigned int avg_cpu_load;

    for(i = 0 ; i < core_limit; i++)
    {
        if(cpu_online(i))
            load[i] = get_curr_load(i);
        else
            load[i] = 0;

        avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
        last_load[i] = load[i];
    }

    // First, decide if to get some CPU online
    // CPU 0 is always online

    avg_cpu_load = avg_load[0];

    for(j = suspend_cpu_num ; j < core_limit; j++)
    {
        i = cpuidx[j];
        if (cpu_is_offline(i)) {
            if (avg_cpu_load > load_threshold) {
                if(DEBUG)
                    pr_info("%s : bringing back cpu%d, load avg: %d\n", V4TKPLUG,i,avg_cpu_load);
                last_time[i] = ktime_to_ms(ktime_get());
                cpu_up(i);
                if(DEBUG) print_cpus_all();
                break;
            }
        } else {
            avg_cpu_load = (avg_cpu_load + avg_load[i]*j)/(j+1);
        }
    }

    // Now check if any CPU we can put offline
    avg_cpu_load = avg_load[0];

    for(j = suspend_cpu_num; j < core_limit; j++)
    {
        i = cpuidx[j];
        // if next CPU is already offline or if this is last CPU
        if (cpu_online(i)) {
            if ((j==(core_limit-1) ) || cpu_is_offline(cpuidx[j+1])) {
                if (avg_cpu_load < CPU_LOAD_LOW_THRESHOLD) {
                    now[i] = ktime_to_ms(ktime_get());
                    if((now[i] - last_time[i]) > MIN_CPU_UP_TIME)
                    {
                        if(DEBUG)
                            pr_info("%s : offlining cpu%d, load avg: %d\n", V4TKPLUG,i,avg_cpu_load);
                        cpu_down(i);
                        if(DEBUG) print_cpus_all();
                    }
                    break;
                }
            } else {
                avg_cpu_load = (avg_cpu_load + avg_load[i]*j)/(j+1);
            }
        }
    }

    if(tplug_hp_enabled != 0 && !isSuspended)
        queue_delayed_work_on(0, tplug_wq, &tplug_work,
                              msecs_to_jiffies(sampling_time));
    else {
        if(!isSuspended)
            cpus_online_all();
        else
            v4tkplug_suspend();
    }

}