/* On return cpumask will be altered to indicate CPUs changed.
 * CPUs with states changed will be set in the mask,
 * CPUs with status unchanged will be unset in the mask. */
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
				cpumask_var_t cpus)
{
	int cpu;
	int cpuret = 0;
	int ret = 0;

	if (cpumask_empty(cpus))
		return 0;

	for_each_cpu(cpu, cpus) {
		switch (state) {
		case DOWN:
			cpuret = cpu_down(cpu);
			break;
		case UP:
			cpuret = cpu_up(cpu);
			break;
		}
		if (cpuret) {
			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
					__func__,
					((state == UP) ? "up" : "down"),
					cpu, cpuret);
			if (!ret)
				ret = cpuret;
			if (state == UP) {
				/* clear bits for unchanged cpus, return */
				cpumask_shift_right(cpus, cpus, cpu);
				cpumask_shift_left(cpus, cpus, cpu);
				break;
			} else {
				/* clear bit for unchanged cpu, continue */
				cpumask_clear_cpu(cpu, cpus);
			}
		}
	}

	return ret;
}
static ssize_t __ref store_online(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int from_nid, to_nid;
	ssize_t ret;

	cpu_hotplug_driver_lock();
	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpuid);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		break;
	case '1':
		from_nid = cpu_to_node(cpuid);
		ret = cpu_up(cpuid);

		/*
		 * When hot adding memory to memoryless node and enabling a cpu
		 * on the node, node number of the cpu may internally change.
		 */
		to_nid = cpu_to_node(cpuid);
		if (from_nid != to_nid)
			change_cpu_under_node(cpu, from_nid, to_nid);

		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
		break;
	default:
		ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	return ret;
}
Пример #3
0
void disable_nonboot_cpus(void)
{
	int cpu, error;

	error = 0;
	cpus_clear(frozen_cpus);
	printk("Freezing cpus ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == 0)
			continue;
		error = cpu_down(cpu);
		if (!error) {
			cpu_set(cpu, frozen_cpus);
			printk("CPU%d is down\n", cpu);
			continue;
		}
		printk("Error taking cpu %d down: %d\n", cpu, error);
	}
	BUG_ON(raw_smp_processor_id() != 0);
	if (error)
		panic("cpus not sleeping");
}
Пример #4
0
/* Call with core_control_mutex locked */
static int __ref update_offline_cores(int val)
{
	int cpu = 0;
	int ret = 0;

	if (!core_control_enabled)
		return 0;

	cpus_offlined = msm_thermal_info.core_control_mask & val;

	for_each_possible_cpu(cpu) {
		if (!(cpus_offlined & BIT(cpu)))
			continue;
		if (!cpu_online(cpu))
			continue;
		ret = cpu_down(cpu);
		if (ret)
			pr_err("%s: Unable to offline cpu%d\n",
				KBUILD_MODNAME, cpu);
	}
	return ret;
}
Пример #5
0
static void min_max_constraints_workfunc(struct work_struct *work)
{
	int count = -1;
	bool up = false;
	unsigned int cpu;

	int nr_cpus = num_online_cpus();
	int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
	int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	if (is_lp_cluster())
		return;

	if (nr_cpus < min_cpus) {
		up = true;
		count = min_cpus - nr_cpus;
	} else if (nr_cpus > max_cpus && max_cpus >= min_cpus) {
		count = nr_cpus - max_cpus;
	}

	for (;count > 0; count--) {
		if (up) {
			cpu = cpumask_next_zero(0, cpu_online_mask);
			if (cpu < nr_cpu_ids)
				cpu_up(cpu);
			else
				break;
		} else {
			cpu = cpumask_next(0, cpu_online_mask);
			if (cpu < nr_cpu_ids)
				cpu_down(cpu);
			else
				break;
		}
	}
}
static ssize_t __ref store_cpucore_max_num_limit(struct kobject *kobj,
			struct attribute *attr, const char *buf, size_t count)
{
	int input, delta, cpu;

	if (!sscanf(buf, "%u", &input))
		return -EINVAL;

	if (input < 1 || input > 4) {
		pr_err("Must keep input range 1 ~ 4\n");
		return -EINVAL;
	}

	delta = input - num_online_cpus();

	if (delta > 0) {
		cpu = 1;
		while (delta) {
			if (!cpu_online(cpu)) {
				cpu_up(cpu);
				delta--;
			}
			cpu++;
		}
	} else if (delta < 0) {
		cpu = 3;
		while (delta) {
			if (cpu_online(cpu)) {
				cpu_down(cpu);
				delta++;
			}
			cpu--;
		}
	}

	max_num_cpu = input;

	return count;
}
Пример #7
0
static int setup_cpu_watcher(struct notifier_block *notifier,
			      unsigned long event, void *data)
{
	int cpu;
	static struct xenbus_watch cpu_watch = {
		.node = "cpu",
		.callback = handle_vcpu_hotplug_event};

	(void)register_xenbus_watch(&cpu_watch);

	for_each_possible_cpu(cpu) {
		if (vcpu_online(cpu) == 0) {
			(void)cpu_down(cpu);
			set_cpu_present(cpu, false);
		}
	}

	return NOTIFY_DONE;
}

static int __init setup_vcpu_hotplug_event(void)
{
	static struct notifier_block xsn_cpu = {
		.notifier_call = setup_cpu_watcher };

#ifdef CONFIG_X86
	if (!xen_pv_domain() && !xen_pvh_domain())
#else
	if (!xen_domain())
#endif
		return -ENODEV;

	register_xenstore_notifier(&xsn_cpu);

	return 0;
}

arch_initcall(setup_vcpu_hotplug_event);
Пример #8
0
static int acpi_processor_handle_eject(struct acpi_processor *pr)
{
	if (cpu_online(pr->id))
		cpu_down(pr->id);

	get_online_cpus();
	/*
	 * The cpu might become online again at this point. So we check whether
	 * the cpu has been onlined or not. If the cpu became online, it means
	 * that someone wants to use the cpu. So acpi_processor_handle_eject()
	 * returns -EAGAIN.
	 */
	if (unlikely(cpu_online(pr->id))) {
		put_online_cpus();
		pr_warn("Failed to remove CPU %d, because other task "
			"brought the CPU back online\n", pr->id);
		return -EAGAIN;
	}
	arch_unregister_cpu(pr->id);
	acpi_unmap_lsapic(pr->id);
	put_online_cpus();
	return (0);
}
Пример #9
0
static ssize_t store_online(struct sys_device *dev, const char *buf,
			    size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
	ssize_t ret;

	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpu->sysdev.id);
		if (!ret)
			kobject_hotplug(&dev->kobj, KOBJ_OFFLINE);
		break;
	case '1':
		ret = cpu_up(cpu->sysdev.id);
		break;
	default:
		ret = -EINVAL;
	}

	if (ret >= 0)
		ret = count;
	return ret;
}
static void hps_early_suspend(struct early_suspend *h)
{
    hps_warn("hps_early_suspend\n");

    mutex_lock(&hps_ctxt.lock);
    hps_ctxt.state = STATE_EARLY_SUSPEND;

    hps_ctxt.rush_boost_enabled_backup = hps_ctxt.rush_boost_enabled;
    hps_ctxt.rush_boost_enabled = 0;

    //Reset data structure of statistics process while enter early suspend mode.
    hps_ctxt.up_loads_sum = 0;
    hps_ctxt.up_loads_count = 0;
    hps_ctxt.up_loads_history_index = 0;
    hps_ctxt.up_loads_history[hps_ctxt.es_up_times - 1] = 0;
    hps_ctxt.down_loads_sum = 0;
    hps_ctxt.down_loads_count = 0;
    hps_ctxt.down_loads_history_index = 0;
    hps_ctxt.down_loads_history[hps_ctxt.es_down_times - 1] = 0;
    
    if (hps_ctxt.is_hmp && hps_ctxt.early_suspend_enabled)
    {
        unsigned int cpu;
        for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
        {
            if (cpu_online(cpu))
                cpu_down(cpu);
        }
    }
    mutex_unlock(&hps_ctxt.lock);
		atomic_set(&hps_ctxt.is_ondemand, 1);
    hps_warn("state: %u, enabled: %u, early_suspend_enabled: %u, suspend_enabled: %u, rush_boost_enabled: %u\n",
        hps_ctxt.state, hps_ctxt.enabled, hps_ctxt.early_suspend_enabled, hps_ctxt.suspend_enabled, hps_ctxt.rush_boost_enabled);

    return;
}
Пример #11
0
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
	int i;
	unsigned int load[8], avg_load[8];

	switch(endurance_level)
	{
	case 0:
		core_limit = NR_CPUS;
	break;
	case 1:
		core_limit = NR_CPUS / 2;
	break;
	case 2:
		core_limit = NR_CPUS / 4;
	break;
	default:
		core_limit = NR_CPUS;
	break;
	}

	for(i = 0 ; i < core_limit; i++)
	{
		if(cpu_online(i))
			load[i] = get_curr_load(i);
		else
			load[i] = 0;

		avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
		last_load[i] = load[i];
	}

	for(i = 0 ; i < core_limit; i++)
	{
	if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1))
	{
	if(DEBUG)
		pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i);
		if(!((i+1) > 7)) {
			last_time[i+1] = ktime_to_ms(ktime_get());
			cpu_up(i+1);
		}
	}
	else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1))
	{
		if(DEBUG)
			pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i);
			if(!(i+1)==0) {
				now[i+1] = ktime_to_ms(ktime_get());
				if((now[i+1] - last_time[i+1]) > MIN_CPU_UP_TIME)
					cpu_down(i+1);
			}
		}
	}
#ifdef CONFIG_USES_MALI_MP2_GPU
	if(gpu_hotplug_enabled) {
		if(DEBUG)
			pr_info("%s: current gpu load %d\n", THUNDERPLUG, get_gpu_load());
		if(get_gpu_load() > gpu_min_load_threshold) {
			if(get_gpu_cores_enabled() < 2) {
				enable_gpu_cores(2);
				if(DEBUG)
					pr_info("%s: gpu1 onlined\n", THUNDERPLUG);
			}
		}
		else {
			if(get_gpu_cores_enabled() > 1) {
				enable_gpu_cores(1);
				if(DEBUG)
					pr_info("%s: gpu1 offlined\n", THUNDERPLUG);
			}
		}
	}
#endif

#ifdef CONFIG_SCHED_HMP
    if(tplug_hp_style == 1 && !isSuspended)
#else
	if(tplug_hp_enabled != 0 && !isSuspended)
#endif
		queue_delayed_work_on(0, tplug_wq, &tplug_work,
			msecs_to_jiffies(sampling_time));
	else {
		if(!isSuspended)
			cpus_online_all();
		else
			thunderplug_suspend();
	}

}
static void tegra_auto_hotplug_work_func(struct work_struct *work)
{
	bool up = false;
	unsigned int cpu = nr_cpu_ids;

	mutex_lock(tegra3_cpu_lock);
	if (mp_policy && !is_lp_cluster()) {
		mutex_unlock(tegra3_cpu_lock);
		return;
	}

	switch (hp_state) {
	case TEGRA_HP_DISABLED:
	case TEGRA_HP_IDLE:
		break;
	case TEGRA_HP_DOWN:
		cpu = tegra_get_slowest_cpu_n();
		if (cpu < nr_cpu_ids) {
			up = false;
			queue_delayed_work(
				hotplug_wq, &hotplug_work, down_delay);
			hp_stats_update(cpu, false);
		} else if (!is_lp_cluster() && !no_lp) {
			if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
				CPU_DEBUG_PRINTK(CPU_DEBUG_HOTPLUG, " enter LPCPU");
				hp_stats_update(CONFIG_NR_CPUS, true);
				hp_stats_update(0, false);
				/* catch-up with governor target speed */
				tegra_cpu_set_speed_cap(NULL);
			} else
				queue_delayed_work(
					hotplug_wq, &hotplug_work, down_delay);
		}
		break;
	case TEGRA_HP_UP:
		if (is_lp_cluster() && !no_lp) {
			if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
				CPU_DEBUG_PRINTK(CPU_DEBUG_HOTPLUG,
						 " leave LPCPU (%s)", __func__);
				hp_stats_update(CONFIG_NR_CPUS, false);
				hp_stats_update(0, true);
				/* catch-up with governor target speed */
				tegra_cpu_set_speed_cap(NULL);
			}
		} else {
			switch (tegra_cpu_speed_balance()) {
			/* cpu speed is up and balanced - one more on-line */
			case TEGRA_CPU_SPEED_BALANCED:
				cpu = cpumask_next_zero(0, cpu_online_mask);
				if (cpu < nr_cpu_ids) {
					up = true;
					hp_stats_update(cpu, true);
				}
				break;
			/* cpu speed is up, but skewed - remove one core */
			case TEGRA_CPU_SPEED_SKEWED:
				cpu = tegra_get_slowest_cpu_n();
				if (cpu < nr_cpu_ids) {
					up = false;
					hp_stats_update(cpu, false);
				}
				break;
			/* cpu speed is up, but under-utilized - do nothing */
			case TEGRA_CPU_SPEED_BIASED:
			default:
				break;
			}
		}
		queue_delayed_work(
			hotplug_wq, &hotplug_work, up2gn_delay);
		break;
	default:
		pr_err(CPU_HOTPLUG_TAG"%s: invalid tegra hotplug state %d\n",
		       __func__, hp_state);
	}

	mutex_unlock(tegra3_cpu_lock);

	if (system_state > SYSTEM_RUNNING) {
		pr_info(CPU_HOTPLUG_TAG" system is not running\n");
	} else if (cpu < nr_cpu_ids) {
		if (up) {
			updateCurrentCPUTotalActiveTime();
			cpu_up(cpu);
			pr_info(CPU_HOTPLUG_TAG" turn on CPU %d, online CPU 0-3=[%d%d%d%d]\n",
					cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
		} else {
			updateCurrentCPUTotalActiveTime();
			cpu_down(cpu);

			pr_info(CPU_HOTPLUG_TAG" turn off CPU %d, online CPU 0-3=[%d%d%d%d]\n",
					cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
		}
	}
}
Пример #13
0
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
	int i;
	unsigned int load[8], avg_load[8];

	switch(endurance_level)
	{
	case 0:
		core_limit = 8;
	break;
	case 1:
		core_limit = 4;
	break;
	case 2:
		core_limit = 2;
	break;
	default:
		core_limit = 8;
	break;
	}

	for(i = 0 ; i < core_limit; i++)
	{
		if(cpu_online(i))
			load[i] = get_curr_load(i);
		else
			load[i] = 0;

		avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
		last_load[i] = load[i];
	}

	for(i = 0 ; i < core_limit; i++)
	{
	if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1))
	{
	if(DEBUG)
		pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i);
		if(!((i+1) > 7))
			cpu_up(i+1);
	}
	else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1))
	{
	if(DEBUG)
		pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i);
		if(!(i+1)==0)
			cpu_down(i+1);
	}
	}

	if(tplug_hp_enabled != 0 && !isSuspended)
		queue_delayed_work_on(0, tplug_wq, &tplug_work,
			msecs_to_jiffies(sampling_time));
	else {
		if(!isSuspended)
			cpus_online_all();
		else
			thunderplug_suspend();
	}

}
Пример #14
0
static void hotplug_timer(struct work_struct *work)
{
	struct cpu_hotplug_info tmp_hotplug_info[4];
	int i;
	unsigned int load = 0;
	unsigned int cpu_rq_min=0;
	unsigned long nr_rq_min = -1UL;
	unsigned int select_off_cpu = 0;
	enum flag flag_hotplug;

	mutex_lock(&hotplug_lock);

	if (user_lock == 1)
		goto no_hotplug;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(hotplug_cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int)cputime64_sub(cur_idle_time,
							tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int)cputime64_sub(cur_wall_time,
							tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if (wall_time < idle_time)
			goto no_hotplug;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		load += tmp_info->load;
		/*find minimum runqueue length*/
		tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);

		if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
			nr_rq_min = tmp_hotplug_info[i].nr_running;

			cpu_rq_min = i;
		}
	}

	for (i = NUM_CPUS - 1; i > 0; --i) {
		if (cpu_online(i) == 0) {
			select_off_cpu = i;
			break;
		}
	}

	/*standallone hotplug*/
	flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);

	/*cpu hotplug*/
	if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
#endif
		cpu_up(select_off_cpu);
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d on\n", select_off_cpu);
#endif
		hotpluging_rate = CHECK_DELAY * 4;
	} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
#endif
		cpu_down(cpu_rq_min);
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d off!\n", cpu_rq_min);
#endif
		hotpluging_rate = CHECK_DELAY;
	} 

no_hotplug:

	queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);

	mutex_unlock(&hotplug_lock);
}
Пример #15
0
static void set_cpu_config(enum ux500_uc new_uc)
{
	bool update = false;
	int cpu;
	int min_freq, max_freq;

	if (new_uc != current_uc)
		update = true;
	else if ((user_config_updated) && (new_uc == UX500_UC_USER))
		update = true;

	pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
		__func__, new_uc, current_uc, update);

	if (!update)
		goto exit;

	/* Cpu hotplug */
	if (!(usecase_conf[new_uc].second_cpu_online) &&
	    (num_online_cpus() > 1))
		cpu_down(1);
	else if ((usecase_conf[new_uc].second_cpu_online) &&
		 (num_online_cpus() < 2))
		cpu_up(1);

	if (usecase_conf[new_uc].max_arm)
		max_freq = usecase_conf[new_uc].max_arm;
	else
		max_freq = system_max_freq;

	if (usecase_conf[new_uc].min_arm)
		min_freq = usecase_conf[new_uc].min_arm;
	else
		min_freq = system_min_freq;

	for_each_online_cpu(cpu)
		set_cpufreq(cpu,
			    min_freq,
			    max_freq);

	/* Kinda doing the job twice, but this is needed for reference keeping */
	if (usecase_conf[new_uc].min_arm)
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "usecase",
					     usecase_conf[new_uc].min_arm);
	else
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "usecase",
					     PRCMU_QOS_DEFAULT_VALUE);

	/* Cpu idle */
	cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);

	/* L2 prefetch */
	if (usecase_conf[new_uc].l2_prefetch_en)
		outer_prefetch_enable();
	else
		outer_prefetch_disable();

	/* Force cpuidle state */
	cpuidle_force_state(usecase_conf[new_uc].forced_state);

	/* QOS override */
	prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);

	current_uc = new_uc;

exit:
	/* Its ok to clear even if new_uc != UX500_UC_USER */
	user_config_updated = false;
}
Пример #16
0
static void tegra_auto_hotplug_work_func(struct work_struct *work)
{
	bool up = false;
	unsigned int cpu = nr_cpu_ids;
	unsigned long now = jiffies;

	mutex_lock(tegra3_cpu_lock);

	switch (hp_state) {
	case TEGRA_HP_DISABLED:
	case TEGRA_HP_IDLE:
		break;
	case TEGRA_HP_DOWN:
		cpu = tegra_get_slowest_cpu_n();
		if (cpu < nr_cpu_ids) {
			up = false;
		} else if (!is_lp_cluster() && !no_lp &&
			   ((now - last_change_time) >= down_delay)) {
				/* start show-p1984, 2012.05.13 */
				if (!cpu_clk) {
					printk(KERN_INFO "[cpu-tegra3]: re setting cpu_clk");
					cpu_clk = clk_get_sys(NULL, "cpu");
				}
				if (!cpu_lp_clk) {
					printk(KERN_INFO "[cpu-tegra3]: re setting cpu_lp_clk");
					cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
				}
				if (IS_ERR(cpu_clk) || IS_ERR(cpu_lp_clk)) {
					printk(KERN_INFO "[cpu-tegra3]: Error, cpu_clk/cpu_lp_lck not set");
					break;
				}
				/* end show-p1984, 2012.05.13 */
			if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
				hp_stats_update(CONFIG_NR_CPUS, true);
				hp_stats_update(0, false);
				/* catch-up with governor target speed */
				tegra_cpu_set_speed_cap(NULL);
				break;
			}
		}
		queue_delayed_work(
			hotplug_wq, &hotplug_work, up2gn_delay);
		break;
	case TEGRA_HP_UP:
		if (is_lp_cluster() && !no_lp) {
			if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
				last_change_time = now;
				hp_stats_update(CONFIG_NR_CPUS, false);
				hp_stats_update(0, true);
				/* catch-up with governor target speed */
				tegra_cpu_set_speed_cap(NULL);
			}
		} else {
			switch (tegra_cpu_speed_balance()) {
			/* cpu speed is up and balanced - one more on-line */
			case TEGRA_CPU_SPEED_BALANCED:
				cpu = cpumask_next_zero(0, cpu_online_mask);
				if (cpu < nr_cpu_ids)
					up = true;
				break;
			/* cpu speed is up, but skewed - remove one core */
			case TEGRA_CPU_SPEED_SKEWED:
				cpu = tegra_get_slowest_cpu_n();
				if (cpu < nr_cpu_ids)
					up = false;
				break;
			/* cpu speed is up, but under-utilized - do nothing */
			case TEGRA_CPU_SPEED_BIASED:
			default:
				break;
			}
		}
		queue_delayed_work(
			hotplug_wq, &hotplug_work, up2gn_delay);
		break;
	default:
		pr_err("%s: invalid tegra hotplug state %d\n",
		       __func__, hp_state);
	}

	if (!up && ((now - last_change_time) < down_delay))
			cpu = nr_cpu_ids;

	if (cpu < nr_cpu_ids) {
		last_change_time = now;
		hp_stats_update(cpu, up);
	}
	mutex_unlock(tegra3_cpu_lock);

	/* Ignore hotplug during shutdown. This prevents us doing
	* work that can fail.
	*/
	if (system_state <= SYSTEM_RUNNING && cpu < nr_cpu_ids) {
		if (up){
			printk(KERN_INFO "cpu_up(%u)+\n",cpu);
			cpu_up(cpu);
			printk(KERN_INFO "cpu_up(%u)-\n",cpu);
		}
		else{
			printk(KERN_INFO "cpu_down(%u)+\n",cpu);
			cpu_down(cpu);
			printk(KERN_INFO "cpu_down(%u)-\n",cpu);
		}
	}
}
Пример #17
0
static void hotplug_timer(struct work_struct *work)
{
	extern unsigned int sysctl_sched_olord_period;
	unsigned int i, load = 0;
	int offline_target = -1, online_target = -1;
	struct cpu_time_info *tmp_info;
	cputime64_t cur_wall_time, cur_idle_time;
	unsigned int idle_time, wall_time;
	
	printk(KERN_INFO "%u\n", sysctl_sched_olord_period);
	
	mutex_lock(&hotplug_lock);
	
	/* Find the target CPUs for online and offline */
	for (i = 0; i < (sizeof cpus / sizeof (int)); i++){

		//printk(KERN_INFO "cpus[%u]: %u\n", i, cpus[i]);

		if(cpu_online(cpus[i])){
			offline_target = cpus[i];
			break;
		}
		else
			online_target = cpus[i];
	}
	
	//printk(KERN_INFO "offline: %d, online %d\n", offline_target, online_target);
	
	
	/* Calculate load */
	tmp_info = &per_cpu(hotplug_cpu_time, offline_target);

	cur_idle_time = get_cpu_idle_time_us(offline_target, &cur_wall_time);

	/* Use cputime64_sub for older kernels */
	//idle_time = (unsigned int)cputime64_sub(cur_idle_time,
	//		tmp_info->prev_cpu_idle);
	idle_time = (unsigned int)(cur_idle_time - tmp_info->prev_cpu_idle);

	tmp_info->prev_cpu_idle = cur_idle_time;

	/* Use cputime64_sub for older kernels */
	//wall_time = (unsigned int)cputime64_sub(cur_wall_time,
	//		tmp_info->prev_cpu_wall);
	wall_time = (cur_wall_time - tmp_info->prev_cpu_wall);

	tmp_info->prev_cpu_wall = cur_wall_time;

	if (wall_time < idle_time)
		goto no_hotplug;

	load = 100 * (wall_time - idle_time) / wall_time;

	//printk(KERN_INFO "Load %u\n", load);
	
	/* Offline */
	if (((load < trans_load_l_inuse)) &&
	    (num_online_cpus() > 1) && (offline_target > 0)) {
		//printk(KERN_INFO "load: %u cpu %u turning off\n", load, offline_target);
		cpu_down(offline_target);
		hotpluging_rate = CHECK_DELAY;
		 
	/* Online */
	} else if (((load > trans_load_h_inuse)) &&
		(num_present_cpus() > num_online_cpus()) &&
		   (online_target != -1)) {
		//printk(KERN_INFO "load: %u cpu %u turning on\n", load, online_target);
		cpu_up(online_target);
		hotpluging_rate = CHECK_DELAY * 10;
	}
		
no_hotplug:

	mutex_unlock(&hotplug_lock);

	/* If we're being removed, don't queue more work */
	if (likely(die == 0))
		queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);

}
Пример #18
0
/*
 * hps algo - hmp
 */
void hps_algo_hmp(void)
{
    unsigned int cpu;
    unsigned int val;
    struct cpumask little_online_cpumask;
    struct cpumask big_online_cpumask;
    unsigned int little_num_base, little_num_limit, little_num_online;
    unsigned int big_num_base, big_num_limit, big_num_online;
    //log purpose
    char str1[64];
    char str2[64];
    int i, j;
    char * str1_ptr = str1;
    char * str2_ptr = str2;

    /*
     * run algo or not by hps_ctxt.enabled
     */
    if (!hps_ctxt.enabled)
    {
        atomic_set(&hps_ctxt.is_ondemand, 0);
        return;
    }

    /*
     * calculate cpu loading
     */
    hps_ctxt.cur_loads = 0;
    str1_ptr = str1;
    str2_ptr = str2;

    for_each_possible_cpu(cpu)
    {
        per_cpu(hps_percpu_ctxt, cpu).load = hps_cpu_get_percpu_load(cpu);
        hps_ctxt.cur_loads += per_cpu(hps_percpu_ctxt, cpu).load;

        if (hps_ctxt.cur_dump_enabled)
        {
            if (cpu_online(cpu))
                i = sprintf(str1_ptr, "%4u", 1);
            else
                i = sprintf(str1_ptr, "%4u", 0);
            str1_ptr += i;
            j = sprintf(str2_ptr, "%4u", per_cpu(hps_percpu_ctxt, cpu).load);
            str2_ptr += j;
        }
    }
    hps_ctxt.cur_nr_heavy_task = hps_cpu_get_nr_heavy_task();
    hps_cpu_get_tlp(&hps_ctxt.cur_tlp, &hps_ctxt.cur_iowait);

    /*
     * algo - begin
     */
    mutex_lock(&hps_ctxt.lock);
    hps_ctxt.action = ACTION_NONE;
    atomic_set(&hps_ctxt.is_ondemand, 0);

    /*
     * algo - get boundary
     */
    little_num_limit = min(hps_ctxt.little_num_limit_thermal, hps_ctxt.little_num_limit_low_battery);
    little_num_base = hps_ctxt.little_num_base_perf_serv;
    cpumask_and(&little_online_cpumask, &hps_ctxt.little_cpumask, cpu_online_mask);
    little_num_online = cpumask_weight(&little_online_cpumask);
    //TODO: no need if is_hmp
    big_num_limit = min(hps_ctxt.big_num_limit_thermal, hps_ctxt.big_num_limit_low_battery);
    big_num_base = max(hps_ctxt.cur_nr_heavy_task, hps_ctxt.big_num_base_perf_serv);
    cpumask_and(&big_online_cpumask, &hps_ctxt.big_cpumask, cpu_online_mask);
    big_num_online = cpumask_weight(&big_online_cpumask);
    if (hps_ctxt.cur_dump_enabled)
    {
        hps_debug(" CPU:%s\n", str1);
        hps_debug("LOAD:%s\n", str2);
        hps_debug("loads(%u), hvy_tsk(%u), tlp(%u), iowait(%u), limit_t(%u)(%u), limit_lb(%u)(%u), base_ps(%u)(%u)\n", 
            hps_ctxt.cur_loads, hps_ctxt.cur_nr_heavy_task, hps_ctxt.cur_tlp, hps_ctxt.cur_iowait,
            hps_ctxt.little_num_limit_thermal, hps_ctxt.big_num_limit_thermal,
            hps_ctxt.little_num_limit_low_battery, hps_ctxt.big_num_limit_low_battery,
            hps_ctxt.little_num_base_perf_serv, hps_ctxt.big_num_base_perf_serv);
    }

//ALGO_LIMIT:
    /*
     * algo - thermal, low battery
     */
    if (big_num_online > big_num_limit)
    {
        val =  big_num_online - big_num_limit;
        for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
        {
            if (cpumask_test_cpu(cpu, &big_online_cpumask))
            {
                cpu_down(cpu);
                cpumask_clear_cpu(cpu, &big_online_cpumask);
                --big_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_LIMIT_BIG, (unsigned long *)&hps_ctxt.action);
    }
    if (little_num_online > little_num_limit)
    {
        val =  little_num_online - little_num_limit;
        for (cpu = hps_ctxt.little_cpu_id_max; cpu > hps_ctxt.little_cpu_id_min; --cpu)
        {
            if (cpumask_test_cpu(cpu, &little_online_cpumask))
            {
                cpu_down(cpu);
                cpumask_clear_cpu(cpu, &little_online_cpumask);
                --little_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_LIMIT_LITTLE, (unsigned long *)&hps_ctxt.action);
    }
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_BASE:
    /*
     * algo - PerfService, heavy task detect
     */
    BUG_ON(big_num_online > big_num_limit);
    BUG_ON(little_num_online > little_num_limit);
    if ((big_num_online < big_num_base) && (big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
    {
        val =  min(big_num_base, big_num_limit) - big_num_online;
        for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
        {
            if (!cpumask_test_cpu(cpu, &big_online_cpumask))
            {
                cpu_up(cpu);
                cpumask_set_cpu(cpu, &big_online_cpumask);
                ++big_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_BASE_BIG, (unsigned long *)&hps_ctxt.action);
    }
    if ((little_num_online < little_num_base) && (little_num_online < little_num_limit) &&
        (little_num_online + big_num_online < hps_ctxt.little_num_base_perf_serv + hps_ctxt.big_num_base_perf_serv))
    {
        val =  min(little_num_base, little_num_limit) - little_num_online;
        if (big_num_online > hps_ctxt.big_num_base_perf_serv)
            val -= big_num_online - hps_ctxt.big_num_base_perf_serv;
        for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
        {
            if (!cpumask_test_cpu(cpu, &little_online_cpumask))
            {
                cpu_up(cpu);
                cpumask_set_cpu(cpu, &little_online_cpumask);
                ++little_num_online;
                if (--val == 0)
                    break;
            }
        }
        BUG_ON(val);
        set_bit(ACTION_BASE_LITTLE, (unsigned long *)&hps_ctxt.action);
    }
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

    /*
     * update history - tlp
     */
    val = hps_ctxt.tlp_history[hps_ctxt.tlp_history_index];
    hps_ctxt.tlp_history[hps_ctxt.tlp_history_index] = hps_ctxt.cur_tlp;
    hps_ctxt.tlp_sum += hps_ctxt.cur_tlp;
    hps_ctxt.tlp_history_index = (hps_ctxt.tlp_history_index + 1 == hps_ctxt.tlp_times) ? 0 : hps_ctxt.tlp_history_index + 1;
    ++hps_ctxt.tlp_count;
    if (hps_ctxt.tlp_count > hps_ctxt.tlp_times)
    {
        BUG_ON(hps_ctxt.tlp_sum < val);
        hps_ctxt.tlp_sum -= val;
        hps_ctxt.tlp_avg = hps_ctxt.tlp_sum / hps_ctxt.tlp_times;
    }
    else
    {
        hps_ctxt.tlp_avg = hps_ctxt.tlp_sum / hps_ctxt.tlp_count;
    }
    if (hps_ctxt.stats_dump_enabled)
        hps_ctxt_print_algo_stats_tlp(0);

//ALGO_RUSH_BOOST:
    /*
     * algo - rush boost
     */
    if (hps_ctxt.rush_boost_enabled)
    {
        if (hps_ctxt.cur_loads > hps_ctxt.rush_boost_threshold * (little_num_online + big_num_online))
            ++hps_ctxt.rush_count;
        else
            hps_ctxt.rush_count = 0;

        if ((hps_ctxt.rush_count >= hps_ctxt.rush_boost_times) &&
            ((little_num_online + big_num_online) * 100 < hps_ctxt.tlp_avg))
        {
            val = hps_ctxt.tlp_avg / 100 + (hps_ctxt.tlp_avg % 100 ? 1 : 0);
            BUG_ON(!(val > little_num_online + big_num_online));
            if (val > num_possible_cpus())
                val = num_possible_cpus();

            val -= little_num_online + big_num_online;
            if ((val) && (little_num_online < little_num_limit))
            {
                for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &little_online_cpumask);
                        ++little_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_RUSH_BOOST_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
            else if ((val) && (big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
            {
                for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &big_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &big_online_cpumask);
                        ++big_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_RUSH_BOOST_BIG, (unsigned long *)&hps_ctxt.action);
            }
        }
    } //if (hps_ctxt.rush_boost_enabled)
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_UP:
    /*
     * algo - cpu up
     */
    if ((little_num_online + big_num_online) < num_possible_cpus())
    {
        /*
         * update history - up
         */
        val = hps_ctxt.up_loads_history[hps_ctxt.up_loads_history_index];
        hps_ctxt.up_loads_history[hps_ctxt.up_loads_history_index] = hps_ctxt.cur_loads;
        hps_ctxt.up_loads_sum += hps_ctxt.cur_loads;
        hps_ctxt.up_loads_history_index = (hps_ctxt.up_loads_history_index + 1 == hps_ctxt.up_times) ? 0 : hps_ctxt.up_loads_history_index + 1;
        ++hps_ctxt.up_loads_count;
        //XXX: use >= or >, which is benifit? use >
        if (hps_ctxt.up_loads_count > hps_ctxt.up_times)
        {
            BUG_ON(hps_ctxt.up_loads_sum < val);
            hps_ctxt.up_loads_sum -= val;
        }
        if (hps_ctxt.stats_dump_enabled)
            hps_ctxt_print_algo_stats_up(0);

        if (hps_ctxt.up_loads_count >= hps_ctxt.up_times)
        {
            if (hps_ctxt.up_loads_sum > hps_ctxt.up_threshold * hps_ctxt.up_times * (little_num_online + big_num_online))
            {
                if (little_num_online < little_num_limit)
                {
                    for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                    {
                        if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                        {
                            cpu_up(cpu);
                            cpumask_set_cpu(cpu, &little_online_cpumask);
                            ++little_num_online;
                            break;
                        }
                    }
                    set_bit(ACTION_UP_LITTLE, (unsigned long *)&hps_ctxt.action);
                }
                else if ((big_num_online < big_num_limit) && (hps_ctxt.state == STATE_LATE_RESUME))
                {
                    for (cpu = hps_ctxt.big_cpu_id_min; cpu <= hps_ctxt.big_cpu_id_max; ++cpu)
                    {
                        if (!cpumask_test_cpu(cpu, &big_online_cpumask))
                        {
                            cpu_up(cpu);
                            cpumask_set_cpu(cpu, &big_online_cpumask);
                            ++big_num_online;
                            break;
                        }
                    }
                    set_bit(ACTION_UP_BIG, (unsigned long *)&hps_ctxt.action);
                }
            }
        } //if (hps_ctxt.up_loads_count >= hps_ctxt.up_times)
    } //if ((little_num_online + big_num_online) < num_possible_cpus())
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_DOWN:
    /*
     * algo - cpu down (inc. quick landing)
     */
    if (little_num_online + big_num_online > 1)
    {
        /*
         * update history - down
         */
        val = hps_ctxt.down_loads_history[hps_ctxt.down_loads_history_index];
        hps_ctxt.down_loads_history[hps_ctxt.down_loads_history_index] = hps_ctxt.cur_loads;
        hps_ctxt.down_loads_sum += hps_ctxt.cur_loads;
        hps_ctxt.down_loads_history_index = (hps_ctxt.down_loads_history_index + 1 == hps_ctxt.down_times) ? 0 : hps_ctxt.down_loads_history_index + 1;
        ++hps_ctxt.down_loads_count;
        //XXX: use >= or >, which is benifit? use >
        if (hps_ctxt.down_loads_count > hps_ctxt.down_times)
        {
            BUG_ON(hps_ctxt.down_loads_sum < val);
            hps_ctxt.down_loads_sum -= val;
        }
        if (hps_ctxt.stats_dump_enabled)
            hps_ctxt_print_algo_stats_down(0);

        if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
        {
            unsigned int down_threshold = hps_ctxt.down_threshold * hps_ctxt.down_times;

            val = little_num_online + big_num_online;
            while (hps_ctxt.down_loads_sum < down_threshold * (val - 1))
                --val;
            val = little_num_online + big_num_online - val;

            if ((val) && (big_num_online > big_num_base))
            {
                for (cpu = hps_ctxt.big_cpu_id_max; cpu >= hps_ctxt.big_cpu_id_min; --cpu)
                {
                    if (cpumask_test_cpu(cpu, &big_online_cpumask))
                    {
                        cpu_down(cpu);
                        cpumask_clear_cpu(cpu, &big_online_cpumask);
                        --big_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_DOWN_BIG, (unsigned long *)&hps_ctxt.action);
            }
            else if ((val) && (little_num_online > little_num_base))
            {
                for (cpu = hps_ctxt.little_cpu_id_max; cpu > hps_ctxt.little_cpu_id_min; --cpu)
                {
                    if (cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_down(cpu);
                        cpumask_clear_cpu(cpu, &little_online_cpumask);
                        --little_num_online;
                        if (--val == 0)
                            break;
                    }
                }
                set_bit(ACTION_DOWN_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
        } //if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    } //if (little_num_online + big_num_online > 1)
    if (hps_ctxt.action)
        goto ALGO_END_WITH_ACTION;

//ALGO_BIG_TO_LITTLE:
    /*
     * algo - b2L
     */
    if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    {
        if ((little_num_online < little_num_limit) && (big_num_online > big_num_base))
        {
            //find last online big
            for (val = hps_ctxt.big_cpu_id_max; val >= hps_ctxt.big_cpu_id_min; --val)
            {
                if (cpumask_test_cpu(val, &big_online_cpumask))
                    break;
            }
            BUG_ON(val < hps_ctxt.big_cpu_id_min);

            //verify whether b2L will open 1 little
            if (per_cpu(hps_percpu_ctxt, val).load * CPU_DMIPS_BIG_LITTLE_DIFF / 100 + 
                hps_ctxt.up_loads_sum / hps_ctxt.up_times <= hps_ctxt.up_threshold  * (little_num_online + big_num_online))
            {
                //up 1 little
                for (cpu = hps_ctxt.little_cpu_id_min; cpu <= hps_ctxt.little_cpu_id_max; ++cpu)
                {
                    if (!cpumask_test_cpu(cpu, &little_online_cpumask))
                    {
                        cpu_up(cpu);
                        cpumask_set_cpu(cpu, &little_online_cpumask);
                        ++little_num_online;
                        break;
                    }
                }

                //down 1 big
                cpu_down(val);
                cpumask_clear_cpu(cpu, &big_online_cpumask);
                --big_num_online;
                set_bit(ACTION_BIG_TO_LITTLE, (unsigned long *)&hps_ctxt.action);
            }
        } //if ((little_num_online < little_num_limit) && (big_num_online > big_num_base))
    } //if (hps_ctxt.down_loads_count >= hps_ctxt.down_times)
    if (!hps_ctxt.action)
        goto ALGO_END_WO_ACTION;

    /*
     * algo - end
     */
ALGO_END_WITH_ACTION:
    hps_warn("(%04x)(%u)(%u)action end(%u)(%u)(%u)(%u) (%u)(%u)(%u)(%u)(%u)(%u) (%u)(%u)(%u) (%u)(%u)(%u) (%u)(%u)(%u)(%u)(%u)\n",
        hps_ctxt.action, little_num_online, big_num_online,
        hps_ctxt.cur_loads, hps_ctxt.cur_tlp, hps_ctxt.cur_iowait, hps_ctxt.cur_nr_heavy_task, 
        hps_ctxt.little_num_limit_thermal, hps_ctxt.big_num_limit_thermal,
        hps_ctxt.little_num_limit_low_battery, hps_ctxt.big_num_limit_low_battery,
        hps_ctxt.little_num_base_perf_serv, hps_ctxt.big_num_base_perf_serv,
        hps_ctxt.up_loads_sum, hps_ctxt.up_loads_count, hps_ctxt.up_loads_history_index, 
        hps_ctxt.down_loads_sum, hps_ctxt.down_loads_count, hps_ctxt.down_loads_history_index, 
        hps_ctxt.rush_count, hps_ctxt.tlp_sum, hps_ctxt.tlp_count, hps_ctxt.tlp_history_index, hps_ctxt.tlp_avg);
    hps_ctxt_reset_stas_nolock();
ALGO_END_WO_ACTION:
    mutex_unlock(&hps_ctxt.lock);

    return;
}
static int __ref __cpu_hotplug(bool out_flag, enum hotplug_cmd cmd)
{
	int i = 0;
	int ret = 0;

	if (exynos_dm_hotplug_disabled())
		return 0;

#if defined(CONFIG_SCHED_HMP)
	if (out_flag) {
		if (do_disable_hotplug)
			goto blk_out;

		if (cmd == CMD_BIG_OUT && !in_low_power_mode) {
			for (i = setup_max_cpus - 1; i >= NR_CA7; i--) {
				if (cpu_online(i)) {
					ret = cpu_down(i);
					if (ret)
						goto blk_out;
				}
			}
		} else {
			for (i = setup_max_cpus - 1; i > 0; i--) {
				if (cpu_online(i)) {
					ret = cpu_down(i);
					if (ret)
						goto blk_out;
				}
			}
		}
	} else {
		if (in_suspend_prepared)
			goto blk_out;

		if (cmd == CMD_BIG_IN) {
			if (in_low_power_mode)
				goto blk_out;

			for (i = NR_CA7; i < setup_max_cpus; i++) {
				if (!cpu_online(i)) {
					ret = cpu_up(i);
					if (ret)
						goto blk_out;
				}
			}
		} else {
			if (big_hotpluged && !do_disable_hotplug) {
				for (i = 1; i < NR_CA7; i++) {
					if (!cpu_online(i)) {
						ret = cpu_up(i);
						if (ret)
							goto blk_out;
					}
				}
			} else {
				for (i = 1; i < setup_max_cpus; i++) {
					if (do_hotplug_out && i >= NR_CA7)
						goto blk_out;

					if (!cpu_online(i)) {
						ret = cpu_up(i);
						if (ret)
							goto blk_out;
					}
				}
			}
		}
	}
#else
	if (out_flag) {
		if (do_disable_hotplug)
			goto blk_out;

		for (i = setup_max_cpus - 1; i > 0; i--) {
			if (cpu_online(i)) {
				ret = cpu_down(i);
				if (ret)
					goto blk_out;
			}
		}
	} else {
		if (in_suspend_prepared)
			goto blk_out;

		for (i = 1; i < setup_max_cpus; i++) {
			if (!cpu_online(i)) {
				ret = cpu_up(i);
				if (ret)
					goto blk_out;
			}
		}
	}
#endif

blk_out:
	return ret;
}
Пример #20
0
static void __cpuinit tplug_work_fn(struct work_struct *work)
{
    int i,j;
    unsigned int load[6], avg_load[6];
    unsigned int avg_cpu_load;

    for(i = 0 ; i < core_limit; i++)
    {
        if(cpu_online(i))
            load[i] = get_curr_load(i);
        else
            load[i] = 0;

        avg_load[i] = ((int) load[i] + (int) last_load[i]) / 2;
        last_load[i] = load[i];
    }

    // First, decide if to get some CPU online
    // CPU 0 is always online

    avg_cpu_load = avg_load[0];

    for(j = suspend_cpu_num ; j < core_limit; j++)
    {
        i = cpuidx[j];
        if (cpu_is_offline(i)) {
            if (avg_cpu_load > load_threshold) {
                if(DEBUG)
                    pr_info("%s : bringing back cpu%d, load avg: %d\n", V4TKPLUG,i,avg_cpu_load);
                last_time[i] = ktime_to_ms(ktime_get());
                cpu_up(i);
                if(DEBUG) print_cpus_all();
                break;
            }
        } else {
            avg_cpu_load = (avg_cpu_load + avg_load[i]*j)/(j+1);
        }
    }

    // Now check if any CPU we can put offline
    avg_cpu_load = avg_load[0];

    for(j = suspend_cpu_num; j < core_limit; j++)
    {
        i = cpuidx[j];
        // if next CPU is already offline or if this is last CPU
        if (cpu_online(i)) {
            if ((j==(core_limit-1) ) || cpu_is_offline(cpuidx[j+1])) {
                if (avg_cpu_load < CPU_LOAD_LOW_THRESHOLD) {
                    now[i] = ktime_to_ms(ktime_get());
                    if((now[i] - last_time[i]) > MIN_CPU_UP_TIME)
                    {
                        if(DEBUG)
                            pr_info("%s : offlining cpu%d, load avg: %d\n", V4TKPLUG,i,avg_cpu_load);
                        cpu_down(i);
                        if(DEBUG) print_cpus_all();
                    }
                    break;
                }
            } else {
                avg_cpu_load = (avg_cpu_load + avg_load[i]*j)/(j+1);
            }
        }
    }

    if(tplug_hp_enabled != 0 && !isSuspended)
        queue_delayed_work_on(0, tplug_wq, &tplug_work,
                              msecs_to_jiffies(sampling_time));
    else {
        if(!isSuspended)
            cpus_online_all();
        else
            v4tkplug_suspend();
    }

}
Пример #21
0
static int cpu_subsys_offline(struct device *dev)
{
	return cpu_down(dev->id);
}
Пример #22
0
static void __cpuinit intelli_plug_work_fn(struct work_struct *work)
{
	unsigned int nr_run_stat;
	unsigned int cpu_count = 0;
	unsigned int nr_cpus = 0;

	int decision = 0;
	int i;

	if (intelli_plug_active == 1) {
		nr_run_stat = calculate_thread_stats();
#ifdef DEBUG_INTELLI_PLUG
		pr_info("nr_run_stat: %u\n", nr_run_stat);
#endif
		cpu_count = nr_run_stat;
		// detect artificial loads or constant loads
		// using msm rqstats
		nr_cpus = num_online_cpus();
		if (!eco_mode_active && (nr_cpus >= 1 && nr_cpus < 4)) {
			decision = mp_decision();
			if (decision) {
				switch (nr_cpus) {
				case 2:
					cpu_count = 3;
#ifdef DEBUG_INTELLI_PLUG
					pr_info("nr_run(2) => %u\n", nr_run_stat);
#endif
					break;
				case 3:
					cpu_count = 4;
#ifdef DEBUG_INTELLI_PLUG
					pr_info("nr_run(3) => %u\n", nr_run_stat);
#endif
					break;
				}
			}
		}
		/* it's busy.. lets help it a bit */
		if (cpu_count > 2) {
			if (busy_persist_count == 0) {
				sampling_time = BUSY_SAMPLING_MS;
				busy_persist_count = BUSY_PERSISTENCE;
			}
		} else {
			if (busy_persist_count > 0)
				busy_persist_count--;
			else
				sampling_time = DEF_SAMPLING_MS;
		}

		if (!suspended) {
			switch (cpu_count) {
			case 1:
				if (persist_count > 0)
					persist_count--;
				if (persist_count == 0) {
					//take down everyone
					for (i = 3; i > 0; i--)
						cpu_down(i);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 1: %u\n", persist_count);
#endif
				break;
			case 2:
				persist_count = DUAL_CORE_PERSISTENCE;
				if (!decision)
					persist_count = DUAL_CORE_PERSISTENCE / CPU_DOWN_FACTOR;
				if (nr_cpus < 2) {
					for (i = 1; i < cpu_count; i++)
						cpu_up(i);
				} else {
					for (i = 3; i >  1; i--)
						cpu_down(i);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 2: %u\n", persist_count);
#endif
				break;
			case 3:
				persist_count = TRI_CORE_PERSISTENCE;
				if (!decision)
					persist_count = TRI_CORE_PERSISTENCE / CPU_DOWN_FACTOR;
				if (nr_cpus < 3) {
					for (i = 1; i < cpu_count; i++)
						cpu_up(i);
				} else {
					for (i = 3; i > 2; i--)
						cpu_down(i);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 3: %u\n", persist_count);
#endif
				break;
			case 4:
				persist_count = QUAD_CORE_PERSISTENCE;
				if (!decision)
					persist_count = QUAD_CORE_PERSISTENCE / CPU_DOWN_FACTOR;
				if (nr_cpus < 4)
					for (i = 1; i < cpu_count; i++)
						cpu_up(i);
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 4: %u\n", persist_count);
#endif
				break;
			default:
				pr_err("Run Stat Error: Bad value %u\n", nr_run_stat);
				break;
			}
		}
#ifdef DEBUG_INTELLI_PLUG
		else
			pr_info("intelli_plug is suspened!\n");
#endif
	}
	schedule_delayed_work_on(0, &intelli_plug_work,
		msecs_to_jiffies(sampling_time));
}
Пример #23
0
void factory_cpu0_idle_test(void)
{
    int cpu = 0;
#ifdef CONFIG_SMP
    int i = 0;
    int ret = 0;
    int cpu_pwrdn_flag[nr_cpu_ids];
#endif

    spin_lock(&factory_lock);
    cpu = smp_processor_id();
    spin_unlock(&factory_lock);

    printk("[%s]it's cpu%d\n", __func__, cpu);

#ifdef CONFIG_SMP
    mutex_lock(&ftm_cpu_prepare);
    disable_hotplug_policy(true, nr_cpu_ids);
    memset(cpu_pwrdn_flag, 0, nr_cpu_ids * sizeof(int));
    for (i = 1; i < nr_cpu_ids; i++) {
        if (cpu_online(i)) {
            cpu_pwrdn_flag[i] = 1;
            ret = cpu_down(i);
            dcm_info("[%s]cpu_down(cpu%d) return %d, cpu1_killed=%u\n", __func__, i, ret, cpu1_killed);
        } else {
            dcm_info("[%s]no need to power down cpu%d\n", __func__, i);
        }
    }
    mutex_unlock(&ftm_cpu_prepare);
#endif

#ifdef CONFIG_LOCAL_WDT
    mpcore_wk_wdt_stop();
#endif
    mtk_wdt_disable(); // disable watch dog
    
    //this should be set by low power requirement.
#ifdef IDLE_LOW_POWER_TEST
    enable_low_power_settings();
#endif
    local_irq_disable();
    go_to_idle();
    local_irq_enable();
#ifdef IDLE_LOW_POWER_TEST
    disable_low_power_settings();
#endif

#ifdef CONFIG_SMP
    mutex_lock(&ftm_cpu_prepare);
    for (i = 1; i < nr_cpu_ids; i++) {
        if (cpu_pwrdn_flag[i] == 1) {
            ret = cpu_up(i);
            dcm_info("[%s]cpu_up(cpu%d) return %d, cpu1_killed=%u\n", __func__, i, ret, cpu1_killed);
        } else {
            dcm_info("[%s]no need to power up cpu%d\n", __func__, i);
        }
    }
    disable_hotplug_policy(false, nr_cpu_ids);
    mutex_unlock(&ftm_cpu_prepare);
#endif
}
Пример #24
0
static void tegra_auto_cpuplug_work_func(struct work_struct *work)
{
	bool up = false;
	unsigned int cpu = nr_cpu_ids;
	unsigned int min_cpus;

	mutex_lock(tegra3_cpu_lock);
	if (hp_state != TEGRA_HP_DISABLED) {
		switch (last_state) {
		case TEGRA_HP_UP:
			cpu = cpumask_next_zero(0, cpu_online_mask);
			if (cpu < nr_cpu_ids) {
				up = true;
				hp_stats_update(cpu, true);
			}
			break;
		case TEGRA_HP_DOWN:
			cpu = tegra_get_slowest_cpu_n();
			if (cpu < nr_cpu_ids) {
				min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
				if (min_cpus < num_online_cpus()) {
					up = false;
					hp_stats_update(cpu, false);
				} else {
					cpu = nr_cpu_ids;
				}
			} else if (!is_lp_cluster() && !no_lp) {

				/* For some reason this sometimes results in a null
				   pointer dereference. Set the clocks again if this
				   case occurs.
				   start show-p1984, 2012.05.13
				 */
				if (!cpu_clk) {
					printk(KERN_INFO "[cpu-tegra3]: re setting cpu_clk");
					cpu_clk = clk_get_sys(NULL, "cpu");
				}
				if (!cpu_lp_clk) {
					printk(KERN_INFO "[cpu-tegra3]: re setting cpu_lp_clk");
					cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
				}
				if (IS_ERR(cpu_clk) || IS_ERR(cpu_lp_clk)) {
					printk(KERN_INFO "[cpu-tegra3]: Error, cpu_clk/cpu_lp_lck not set");
					break;
				}
				/* end show-p1984, 2012.05.13 */

				if (!clk_set_parent(cpu_clk, cpu_lp_clk)) {
					CPU_DEBUG_PRINTK(CPU_DEBUG_HOTPLUG, " ENTER LPCPU");
					hp_stats_update(CONFIG_NR_CPUS, true);
					hp_stats_update(0, false);
					/* catch-up with governor target speed */
					tegra_cpu_set_speed_cap(NULL);
				} else
					pr_err(CPU_HOTPLUG_TAG" clk_set_parent fail\n");
			}
			break;
		}
	}
	mutex_unlock(tegra3_cpu_lock);

	if (system_state > SYSTEM_RUNNING) {
		pr_info(CPU_HOTPLUG_TAG" SYSTEM is not running\n");
	} else if (cpu < nr_cpu_ids) {
		if (up) {
			updateCurrentCPUTotalActiveTime();
			cpu_up(cpu);
			pr_info(CPU_HOTPLUG_TAG" TURN ON CPU %d, online CPU 0-3=[%d%d%d%d]\n",
					cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
		} else {
			updateCurrentCPUTotalActiveTime();
			cpu_down(cpu);
			pr_info(CPU_HOTPLUG_TAG" TURN OFF CPU %d, online CPU 0-3=[%d%d%d%d]\n",
					cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
		}
	}

	mutex_lock(tegra3_cpu_lock);
	is_plugging = false;
	mutex_unlock(tegra3_cpu_lock);
}
Пример #25
0
static int __ref __cpu_hotplug(bool out_flag, enum hotplug_cmd cmd)
{
	int i = 0;
	int ret = 0;
#if defined(CONFIG_SCHED_HMP)
	int hotplug_out_limit = 0;
#endif

	if (exynos_dm_hotplug_disabled())
		return 0;

#if defined(CONFIG_SCHED_HMP)
	if (out_flag) {
		if (do_disable_hotplug)
			goto blk_out;

		if (cmd == CMD_SLEEP_PREPARE) {
			for (i = setup_max_cpus - 1; i >= NR_CLUST0_CPUS; i--) {
                                if (cpu_online(i)) {
                                        ret = cpu_down(i);
                                        if (ret)
                                                goto blk_out;
                                }
			}
			for (i = 1; i < nr_sleep_prepare_cpus; i++) {
				if (!cpu_online(i)) {
					ret = cpu_up(i);
					if (ret)
						goto blk_out;
				}
			}
		}
		else if (cmd == CMD_CLUST1_OUT && !in_low_power_mode) {
			for (i = setup_max_cpus - 1; i >= NR_CLUST0_CPUS; i--) {
				if (cpu_online(i)) {
					ret = cpu_down(i);
					if (ret)
						goto blk_out;
				}
			}
		} else {
			if (cmd == CMD_CLUST0_ONE_OUT) {
				if (!in_low_power_mode)
					goto blk_out;

				for (i = NR_CLUST0_CPUS - 2; i > 0; i--) {
					if (cpu_online(i)) {
						ret = cpu_down(i);
						if (ret)
							goto blk_out;
					}
				}
			} else {
				if (cluster0_hotplug_in)
					hotplug_out_limit = NR_CLUST0_CPUS - 2;

				for (i = setup_max_cpus - 1; i > hotplug_out_limit; i--) {
					if (cpu_online(i)) {
						ret = cpu_down(i);
						if (ret)
							goto blk_out;
					}
				}
			}
		}
	} else {
		if (in_suspend_prepared)
			goto blk_out;

		if (cmd == CMD_CLUST1_IN) {
			if (in_low_power_mode)
				goto blk_out;

			for (i = NR_CLUST0_CPUS; i < setup_max_cpus; i++) {
				if (!cpu_online(i)) {
					ret = cpu_up(i);
					if (ret)
						goto blk_out;
				}
			}
		} else {
			if (cmd == CMD_CLUST0_ONE_IN) {
				for (i = 1; i < NR_CLUST0_CPUS - 1; i++) {
					if (!cpu_online(i)) {
						ret = cpu_up(i);
						if (ret)
							goto blk_out;
					}
				}
			} else if ((cluster1_hotplugged && !do_disable_hotplug) ||
				(cmd == CMD_CLUST0_IN)) {
				for (i = 1; i < NR_CLUST0_CPUS; i++) {
					if (!cpu_online(i)) {
						ret = cpu_up(i);
						if (ret)
							goto blk_out;
					}
				}
			} else {
				if (lcd_is_on) {
					for (i = NR_CLUST0_CPUS; i < setup_max_cpus; i++) {
						if (do_hotplug_out)
							goto blk_out;

						if (!cpu_online(i)) {
							if (i == NR_CLUST0_CPUS)
								set_hmp_boostpulse(100000);

							ret = cpu_up(i);
							if (ret)
								goto blk_out;
						}
					}

					for (i = 1; i < NR_CLUST0_CPUS; i++) {
						if (!cpu_online(i)) {
							ret = cpu_up(i);
							if (ret)
								goto blk_out;
						}
					}
				} else {
					for (i = 1; i < setup_max_cpus; i++) {
						if (do_hotplug_out && i >= NR_CLUST0_CPUS)
							goto blk_out;

						if (!cpu_online(i)) {
							ret = cpu_up(i);
							if (ret)
								goto blk_out;
						}
					}
				}
			}
		}
	}
#else
	if (out_flag) {
		if (do_disable_hotplug)
			goto blk_out;

		for (i = setup_max_cpus - 1; i > 0; i--) {
			if (cpu_online(i)) {
				ret = cpu_down(i);
				if (ret)
					goto blk_out;
			}
		}
	} else {
		if (in_suspend_prepared)
			goto blk_out;

		for (i = 1; i < setup_max_cpus; i++) {
			if (!cpu_online(i)) {
				ret = cpu_up(i);
				if (ret)
					goto blk_out;
			}
		}
	}
#endif

blk_out:
	return ret;
}
static void __ref intelli_plug_work_fn(struct work_struct *work)
{
    unsigned int nr_run_stat;
    unsigned int cpu_count = 0;
    unsigned int nr_cpus = 0;

    int decision = 0;
    int i;

    nr_run_stat = calculate_thread_stats();
    if (debug_intelli_plug)
        pr_info("nr_run_stat: %u\n", nr_run_stat);
    cpu_count = nr_run_stat;
    /* detect artificial loads or constant loads
     * using msm rqstats
     */
    nr_cpus = num_online_cpus();
    if (!eco_mode_active && !strict_mode_active &&
            (nr_cpus >= 1 && nr_cpus < 4)) {
        decision = mp_decision();
        if (decision) {
            switch (nr_cpus) {
            case 2:
                cpu_count = 3;
                if (debug_intelli_plug)
                    pr_info("nr_run(2) => %u\n",
                            nr_run_stat);
                break;
            case 3:
                cpu_count = 4;
                if (debug_intelli_plug)
                    pr_info("nr_run(3) => %u\n",
                            nr_run_stat);
                break;
            }
        }
    }
    /* it's busy.. lets help it a bit */
    if (cpu_count > 2) {
        if (busy_persist_count == 0) {
            sampling_time = busy_sampling_ms;
            busy_persist_count = busy_persistence;
        }
    } else {
        if (busy_persist_count > 0)
            busy_persist_count--;
        else
            sampling_time = def_sampling_ms;
    }

    if (!hotplug_suspended) {
        switch (cpu_count) {
        case 1:
            if (persist_count > 0)
                persist_count--;
            if (persist_count == 0) {
                /* take down everyone */
                for (i = 3; i > 0; i--)
                    cpu_down(i);
            }
            if (debug_intelli_plug)
                pr_info("case 1: %u\n", persist_count);
            break;
        case 2:
            persist_count = dual_core_persistence;
            if (!decision)
                persist_count = dual_core_persistence /
                                cpu_down_factor;
            if (nr_cpus < 2) {
                for (i = 1; i < cpu_count; i++)
                    cpu_up(i);
            } else {
                for (i = 3; i >  1; i--)
                    cpu_down(i);
            }
            if (debug_intelli_plug)
                pr_info("case 2: %u\n", persist_count);
            break;
        case 3:
            persist_count = tri_core_persistence;
            if (!decision)
                persist_count = tri_core_persistence /
                                cpu_down_factor;
            if (nr_cpus < 3) {
                for (i = 1; i < cpu_count; i++)
                    cpu_up(i);
            } else {
                for (i = 3; i > 2; i--)
                    cpu_down(i);
            }
            if (debug_intelli_plug)
                pr_info("case 3: %u\n", persist_count);
            break;
        case 4:
            persist_count = quad_core_persistence;
            if (!decision)
                persist_count = quad_core_persistence /
                                cpu_down_factor;
            if (nr_cpus < 4)
                for (i = 1; i < cpu_count; i++)
                    cpu_up(i);
            if (debug_intelli_plug)
                pr_info("case 4: %u\n", persist_count);
            break;
        default:
            pr_err("Run Stat Error: Bad value %u\n",
                   nr_run_stat);
            break;
        }
    } else if (debug_intelli_plug)
        pr_info("intelli_plug is suspened!\n");

    queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
                          msecs_to_jiffies(sampling_time));
}
Пример #27
0
static void hotplug_timer(struct work_struct *work)
{
	struct cpu_hotplug_info tmp_hotplug_info[4];
	int i;
	unsigned int load = 0;
	unsigned int cpu_rq_min=0;
	unsigned long nr_rq_min = -1UL;
	unsigned int select_off_cpu = 0;
	enum flag flag_hotplug;

	mutex_lock(&hotplug_lock);

	// exit if we turned off dynamic hotplug by tegrak
	// cancel the timer
	if (!hotplug_on) {
		if (!second_core_on && cpu_online(1) == 1)
			cpu_down(1);
		goto off_hotplug;
	}

	if (user_lock == 1)
		goto no_hotplug;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(hotplug_cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int)cputime64_sub(cur_idle_time,
							tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int)cputime64_sub(cur_wall_time,
							tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if (wall_time < idle_time)
			goto no_hotplug;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		load += tmp_info->load;
		/*find minimum runqueue length*/
		tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);

		if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
			nr_rq_min = tmp_hotplug_info[i].nr_running;

			cpu_rq_min = i;
		}
	}

	for (i = NUM_CPUS - 1; i > 0; --i) {
		if (cpu_online(i) == 0) {
			select_off_cpu = i;
			break;
		}
	}

	/*standallone hotplug*/
	flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);

	/*do not ever hotplug out CPU 0*/
	if((cpu_rq_min == 0) && (flag_hotplug == HOTPLUG_OUT))
		goto no_hotplug;

	/*cpu hotplug*/
	if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
		DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
		cpu_up(select_off_cpu);
		DBG_PRINT("cpu%d on\n", select_off_cpu);
		hotpluging_rate = CHECK_DELAY * 4;
	} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
		DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
		cpu_down(cpu_rq_min);
		DBG_PRINT("cpu%d off!\n", cpu_rq_min);
		hotpluging_rate = CHECK_DELAY;
	} 

no_hotplug:
	//printk("hotplug_timer done.\n");

	queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);
off_hotplug:

	mutex_unlock(&hotplug_lock);
}
Пример #28
0
//Let's use struct GPT_CONFIG for all IOCTL: 
static long uvvp_hotplug_ioctl(struct file *file,
                            unsigned int cmd, unsigned long arg)
{
    #ifdef Lv_debug
    printk("\r\n******** uvvp_hotplug_ioctl cmd[%d]********\r\n",cmd);
    #endif 
    
    /*
     * 20121101 marc.huang 
     * mark to fix build warning
     */
    //void __user *argp = (void __user *)arg;
    //int __user *p = argp;
    
    int i, j, k, cpu_index, cpu_count;

    switch (cmd) {
        default:
            return -1;

        case UVVP_HOTPLUG_UP_CPU1:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_up(1) ********\r\n");
            cpu_up(1);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_CPU1:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_down(1) ********\r\n");
            cpu_down(1);
            return 0;
        
        case UVVP_HOTPLUG_UP_CPU2:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_up(2) ********\r\n");
            cpu_up(2);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_CPU2:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_down(2) ********\r\n");
            cpu_down(2);
            return 0;
        
        case UVVP_HOTPLUG_UP_CPU3:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_up(3) ********\r\n");
            cpu_up(3);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_CPU3:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_down(3) ********\r\n");
            cpu_down(3);
            return 0;
        
        case UVVP_HOTPLUG_UP_CPU4:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_up(4) ********\r\n");
            cpu_up(4);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_CPU4:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_down(4) ********\r\n");
            cpu_down(4);
            return 0;
        
        case UVVP_HOTPLUG_UP_CPU5:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_up(4) ********\r\n");
            cpu_up(5);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_CPU5:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_down(4) ********\r\n");
            cpu_down(5);
            return 0;
        
        case UVVP_HOTPLUG_UP_CPU6:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_up(6) ********\r\n");
            cpu_up(6);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_CPU6:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_down(6) ********\r\n");
            cpu_down(6);
            return 0;
        
        case UVVP_HOTPLUG_UP_CPU7:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_up(7) ********\r\n");
            cpu_up(7);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_CPU7:
            printk("\r\n******** uvvp_hotplug_ioctl cpu_down(7) ********\r\n");
            cpu_down(7);
            return 0;
        
        case UVVP_HOTPLUG_UP_DBG0:
            printk("\r\n******** uvvp_hotplug_ioctl spm_mtcmos_ctrl_dbg0(STA_POWER_ON) ********\r\n");
            spm_mtcmos_ctrl_dbg0(STA_POWER_ON);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_DBG0:
            printk("\r\n******** uvvp_hotplug_ioctl spm_mtcmos_ctrl_dbg0(STA_POWER_DOWN) ********\r\n");
            spm_mtcmos_ctrl_dbg0(STA_POWER_DOWN);
            return 0;
        
        case UVVP_HOTPLUG_UP_DBG1:
            printk("\r\n******** uvvp_hotplug_ioctl spm_mtcmos_ctrl_dbg1(STA_POWER_ON) ********\r\n");
            spm_mtcmos_ctrl_dbg1(STA_POWER_ON);
            return 0;
        
        case UVVP_HOTPLUG_DOWN_DBG1:
            printk("\r\n******** uvvp_hotplug_ioctl spm_mtcmos_ctrl_dbg1(STA_POWER_DOWN) ********\r\n");
            spm_mtcmos_ctrl_dbg1(STA_POWER_DOWN);
            return 0;
        
        case UVVP_HOTPLUG_STRESS_1_UP_DOWN_CPUS:
            printk("\r\n******** uvvp_hotplug_ioctl stress_test_1 cpu_up/cpu_down(1/2/3/4/5/6/7) ********\r\n");
            
            //0. turn on all the cpus
            for (i = 1; i < 8; ++i)
                cpu_up(i);
            
            for (i = 0; i < STRESS_TEST_1_COUNT; ++i)
            {
                for (j = 1; j < 8; ++j) //cpu_count
                {
                    for (k = 1; k < 8; ++k) //index
                    {
                        cpu_index = k;
                        cpu_count = j;
                        while (cpu_count--)
                        {
                            cpu_down(cpu_index);
                            if (++cpu_index == 8)
                                cpu_index = 1;
                        }
                        msleep(STRESS_TEST_1_DELAY_MS);
                        
                        cpu_index = k;
                        cpu_count = j;
                        while (cpu_count--)
                        {
                            cpu_up(cpu_index);
                            if (++cpu_index == 8)
                                cpu_index = 1;
                        }
                        msleep(STRESS_TEST_1_DELAY_MS);
                    }
                }
            }
                      
            /*            
            //1. turn off 1 cpu at one time
            for (i = 0; i < STRESS_TEST_1_COUNT; ++i)
            {
                for (j = 1; j < 4; ++j)
                {
                    cpu_down(j);
                    msleep(STRESS_TEST_1_DELAY_MS);
                    cpu_up(j);
                    msleep(STRESS_TEST_1_DELAY_MS);
                }
            }
            
            //2. turn off 2 cpus at one time
            for (i = 0; i < STRESS_TEST_1_COUNT; ++i)
            {
                for (j = 1; j < 4; ++j)
                {
                    cpu_down(j);
                    cpu_down( ((j + 1 == 4) ? 1 : j + 1) );
                    msleep(STRESS_TEST_1_DELAY_MS);
                    cpu_up(j);
                    cpu_up( ((j + 1 == 4) ? 1 : j + 1) );
                    msleep(STRESS_TEST_1_DELAY_MS);
                }
            }
            
            //3. turn off 3 cpus at one time
            for (i = 0; i < STRESS_TEST_1_COUNT; ++i)
            {
                for (j = 1; j < 4; ++j)
                {
                    cpu_down(j);
                }
                msleep(STRESS_TEST_1_DELAY_MS);
                
                for (j = 1; j < 4; ++j)
                {
                    cpu_up(j);
                }
                msleep(STRESS_TEST_1_DELAY_MS);
            }
            */
            return 0;
            
        case UVVP_HOTPLUG_STRESS_2_UP_DOWN_CPUS:
            printk("\r\n******** uvvp_hotplug_ioctl stress_test_2 cpu_up/cpu_down(1/2/3) ********\r\n");
            
            for (i = 0; i < STRESS_TEST_2_COUNT; ++i)
            {
                j = jiffies % 7 + 1;
                if (cpu_online(j))
                {
                    printk("@@@@@ %8d: cpu_down(%d) @@@@@\n", i, j);
                    cpu_down(j);
                }
                else
                {
                    printk("@@@@@ %8d: cpu_up(%d) @@@@@\n", i, j);
                    cpu_up(j);
                }
                msleep(STRESS_TEST_2_DELAY_MS);
            }
            
            return 0;
        
    }

    return 0;    
}
static void tegra_auto_hotplug_work_func(struct work_struct *work)
{
    bool up = false;
    unsigned int cpu = nr_cpu_ids;
    unsigned long now = jiffies;
    static unsigned long last_change_time;

    mutex_lock(tegra3_cpu_lock);

    switch (hp_state) {
    case TEGRA_HP_DISABLED:
    case TEGRA_HP_IDLE:
        break;
    case TEGRA_HP_DOWN:
        cpu = tegra_get_slowest_cpu_n();
        if (cpu < nr_cpu_ids) {
            up = false;
        } else if (!is_lp_cluster() && !no_lp &&
                   !pm_qos_request(PM_QOS_MIN_ONLINE_CPUS)) {
            if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, true);
                hp_stats_update(0, false);
                /* catch-up with governor target speed */
                tegra_cpu_set_speed_cap(NULL);
                break;
            }
        }
        queue_delayed_work(
            hotplug_wq, &hotplug_work, down_delay);
        break;
    case TEGRA_HP_UP:
        if (is_lp_cluster() && !no_lp) {
            if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, false);
                hp_stats_update(0, true);
                /* catch-up with governor target speed */
                tegra_cpu_set_speed_cap(NULL);
            }
        } else {
            switch (tegra_cpu_speed_balance()) {
            /* cpu speed is up and balanced - one more on-line */
            case TEGRA_CPU_SPEED_BALANCED:
                cpu = cpumask_next_zero(0, cpu_online_mask);
                if (cpu < nr_cpu_ids)
                    up = true;
                break;
            /* cpu speed is up, but skewed - remove one core */
            case TEGRA_CPU_SPEED_SKEWED:
                cpu = tegra_get_slowest_cpu_n();
                if (cpu < nr_cpu_ids)
                    up = false;
                break;
            /* cpu speed is up, but under-utilized - do nothing */
            case TEGRA_CPU_SPEED_BIASED:
            default:
                break;
            }
        }
        queue_delayed_work(
            hotplug_wq, &hotplug_work, up2gn_delay);
        break;
    default:
        pr_err("%s: invalid tegra hotplug state %d\n",
               __func__, hp_state);
    }

    if (!up && ((now - last_change_time) < down_delay))
        cpu = nr_cpu_ids;

    if (cpu < nr_cpu_ids) {
        last_change_time = now;
        hp_stats_update(cpu, up);
    }
    mutex_unlock(tegra3_cpu_lock);

    if (cpu < nr_cpu_ids) {
        if (up) {
            printk("cpu_up(%u)+\n",cpu);
            cpu_up(cpu);
            printk("cpu_up(%u)-\n",cpu);
        } else {
            printk("cpu_down(%u)+\n",cpu);
            cpu_down(cpu);
            printk("cpu_down(%u)-\n",cpu);
        }
    }
}
Пример #30
0
static void tegra_auto_hotplug_work_func(struct work_struct *work)
{
	bool up = false;
	unsigned int cpu = nr_cpu_ids;

	mutex_lock(tegra3_cpu_lock);

	switch (hp_state) {
	case TEGRA_HP_DISABLED:
	case TEGRA_HP_IDLE:
		break;
	case TEGRA_HP_DOWN:
		cpu = tegra_get_slowest_cpu_n();
		if (cpu < nr_cpu_ids) {
			up = false;
			queue_delayed_work(
				hotplug_wq, &hotplug_work, down_delay);
			hp_stats_update(cpu, false);
		} else if (!is_lp_cluster() && !no_lp) {
			if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
				hp_stats_update(CONFIG_NR_CPUS, true);
				hp_stats_update(0, false);
				/* catch-up with governor target speed */
				tegra_cpu_set_speed_cap(NULL);
			} else
				queue_delayed_work(
					hotplug_wq, &hotplug_work, down_delay);
		}
		break;
	case TEGRA_HP_UP:
		if (is_lp_cluster() && !no_lp) {
			if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
				hp_stats_update(CONFIG_NR_CPUS, false);
				hp_stats_update(0, true);
				/* catch-up with governor target speed */
				tegra_cpu_set_speed_cap(NULL);
			}
		} else {
			switch (tegra_cpu_speed_balance()) {
			/* cpu speed is up and balanced - one more on-line */
			case TEGRA_CPU_SPEED_BALANCED:
				cpu = cpumask_next_zero(0, cpu_online_mask);
				if (cpu < nr_cpu_ids) {
					up = true;
					hp_stats_update(cpu, true);
				}
				break;
			/* cpu speed is up, but skewed - remove one core */
			case TEGRA_CPU_SPEED_SKEWED:
				cpu = tegra_get_slowest_cpu_n();
				if (cpu < nr_cpu_ids) {
					up = false;
					hp_stats_update(cpu, false);
				}
				break;
			/* cpu speed is up, but under-utilized - do nothing */
			case TEGRA_CPU_SPEED_BIASED:
			default:
				break;
			}
		}
		queue_delayed_work(
			hotplug_wq, &hotplug_work, up2gn_delay);
		break;
	default:
		pr_err("%s: invalid tegra hotplug state %d\n",
		       __func__, hp_state);
	}
	mutex_unlock(tegra3_cpu_lock);

	if (cpu < nr_cpu_ids) {
		/*if (up)
			cpu_up(cpu);
		else
			cpu_down(cpu);*/
		if (up){
			if (num_online_cpus() < hotplug_num)
			cpu_up(cpu);
		else
			     printk("tegra_auto_hotplug_work_func: need up , but hotplug_num=%u num_online_cpus()=%u  \n",hotplug_num, num_online_cpus());
		}
		else
			cpu_down(cpu);
	}
}