Exemplo n.º 1
0
void factory_cpus_idle_test(void)
{
    int cpu = 0;
    int i = 0;
    unsigned char name[10] = {'\0'};
    struct task_struct *thread[nr_cpu_ids];
#ifdef CONFIG_SMP
    int ret = 0;
#endif

    spin_lock(&factory_lock);
    cpu = smp_processor_id();
    spin_unlock(&factory_lock);
    dcm_info("[%s]: it's cpu%d, num_online_cpus=%d\n", __func__, cpu, num_online_cpus());

#ifdef CONFIG_SMP
    mutex_lock(&ftm_cpu_prepare);
    disable_hotplug_policy(true, nr_cpu_ids);
    for (i = 1; i < nr_cpu_ids; i++) {
        ret = cpu_up(i);
        dcm_info("[%s]cpu_up(cpu%d) return %d, cpu1_killed=%u\n", __func__, i, ret, cpu1_killed);
    }
    mutex_unlock(&ftm_cpu_prepare);
#endif

    mtk_wdt_disable(); // disable watch dog

    // turn off backlight
#if defined(CONFIG_MTK_LEDS)
    mt65xx_leds_brightness_set(MT65XX_LED_TYPE_LCD, 0);
#endif

    for (i = nr_cpu_ids-1; i >= 0; i--) {
        cpuid[i] = i;
        init_completion(&each_thread_done[i]);
        sprintf(name, "idle-%d", i);
        thread[i] = kthread_create(cpu_enter_wfi[i], &cpuid[i], name);
        if (IS_ERR(thread[i])) {
            int ret = PTR_ERR(thread[i]);
            thread[i] = NULL;
            dcm_info("[%s]: kthread_create %s fail(%d)\n", __func__, name, ret);
            return;
        }
        dcm_info("[%s]: kthread_create %s done\n", __func__, name);
        kthread_bind(thread[i], i);
        dcm_info("[%s]: kthread_bind %s done\n", __func__, name);
        wake_up_process(thread[i]);
        dcm_info("[%s]: wake_up_process %s done\n", __func__, name);
        wait_for_completion(&each_thread_done[i]);
    }
    dcm_info("[%s]: cpu%d starts to complete_all all_threads_done\n", __func__, cpu);
    complete_all(&all_threads_done);
}
Exemplo n.º 2
0
static void __cpuinit hotplug_online_all_work_fn(struct work_struct *work)
{
	int cpu;
	for_each_possible_cpu(cpu) {
		if (likely(!cpu_online(cpu))) {
			cpu_up(cpu);
			pr_info("auto_hotplug: CPU%d up.\n", cpu);
		}
	}
	schedule_delayed_work(&hotplug_unpause_work, HZ);
	schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
}
/*
 * We need to make sure each present CPU is online.  The next kernel will scan
 * the device tree and assume primary threads are online and query secondary
 * threads via RTAS to online them if required.  If we don't online primary
 * threads, they will be stuck.  However, we also online secondary threads as we
 * may be using 'cede offline'.  In this case RTAS doesn't see the secondary
 * threads as offline -- and again, these CPUs will be stuck.
 *
 * So, we online all CPUs that should be running, including secondary threads.
 */
static void wake_offline_cpus(void)
{
	int cpu = 0;

	for_each_present_cpu(cpu) {
		if (!cpu_online(cpu)) {
			printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
			       cpu);
			cpu_up(cpu);
		}
	}
}
Exemplo n.º 4
0
int main(int argc, char *argv[])
{
	int ret = 0;
	int cpu = 1;
	unsigned int cpu_set = 1; /* Run on core0 */

#ifdef MPCTL_SERVER
	mpctl_server_init();
#endif

	mpdecision_server_init();

	ret = syscall(__NR_sched_setaffinity,0, sizeof(cpu_set), &cpu_set);
	if (ret < 0) {
		msg("Cannot set cpu affinity: %s\n", strerror(-ret));
		return ret;
	}

	core1_status = CORE_DOWN;
	cpu_up(1, core1_status);

	msg("Core1 status: %s\n", core1_status ? "online" : "offline");

	/* Priority ? */
	setpriority(PRIO_PROCESS, getpid(), -20);

	/* Command line overrides */
	parse_args(argc, argv);

	/* Enable kernel calculation of rq depth */
	write_file_uint32(RQ_POLL_MS, poll_ms);

	def_timer_fd = open(DEF_TIMER_MS, O_RDONLY);
	if (def_timer_fd < 0) {
		msg("Unable to open deferrable timer file\n");
		return -1;
	}

	pthread_create(&mp_decision, NULL, do_mp_decision, NULL);
	pthread_create(&hotplug_thread, NULL, do_hotplug, (void *)cpu);

	pthread_join(hotplug_thread, NULL);
	pthread_join(mp_decision, NULL);

	close(def_timer_fd);

	mpdecision_server_exit();

#ifdef MPCTL_SERVER
	mpctl_server_exit();
#endif
	return ret;
}
static ssize_t __ref store_online_control(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t ret;

	cpu_hotplug_driver_lock();
	switch (buf[0]) 
	{
		case '0': // control via sysfs
			ret = cpu_down(cpu->dev.id);
			if (!ret)
				kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
			online_control_mode[cpu->dev.id] = ONL_CONT_MODE_SYSFS;
			break;
			
		case '1': // forced online
			ret = cpu_up(cpu->dev.id);
			if (!ret)
				kobject_uevent(&dev->kobj, KOBJ_ONLINE);
			online_control_mode[cpu->dev.id] = ONL_CONT_MODE_ONLINE;
			break;
			
		case '2': // forced offline
			ret = cpu_down(cpu->dev.id);
			if (!ret)
				kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
			online_control_mode[cpu->dev.id] = ONL_CONT_MODE_OFFLINE;
			break;
			
		case '3': // only allowed for CPU core 4 - force offline but lock it to core 3
			if (cpu->dev.id == ID_CPU_CORE_4)
			{
				ret = cpu_down(cpu->dev.id);
				if (!ret)
					kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
				online_control_mode[cpu->dev.id] = ONL_CONT_MODE_LOCK4_3;
			}
			else
				ret = -EINVAL;
			break;
			
		default:
			ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	return ret;
}
Exemplo n.º 6
0
static void __cpuinit intelli_plug_work_fn(struct work_struct *work)
{
	unsigned int nr_run_stat, sampling_rate, online_cpus;
	unsigned int min_sampling_rate_jiffies = 0;

	if (intelli_plug_active == 1 && !excluded_governor) {
		online_cpus = num_online_cpus();
		nr_run_stat = calculate_thread_stats();

		if (!suspended) {
			switch (nr_run_stat) {
				case 1:
					if (persist_count > 0) {
						persist_count--;
					}
					else if (online_cpus == 2) {
						cpu_down(1);
					}
#ifdef DEBUG_INTELLI_PLUG
					pr_info("case 1: %u\n", persist_count);
#endif
					break;
				case 2:
					persist_count = DUAL_CORE_PERSISTENCE / CPU_DOWN_FACTOR;
					if (online_cpus == 1)
						cpu_up(1);
#ifdef DEBUG_INTELLI_PLUG
					pr_info("case 2: %u\n", persist_count);
#endif
					break;
				default:
					pr_err("Run Stat Error: Bad value %u\n", nr_run_stat);
					break;
			}
		}

		/* increase the sampling rate dynamically based on online cpus */
		min_sampling_rate_jiffies = msecs_to_jiffies(min_sampling_rate);
		sampling_rate = min_sampling_rate_jiffies * online_cpus;
	} else {
		/* increase the sampling rate for screen-off */
		sampling_rate = msecs_to_jiffies(min_sampling_rate) << 3;
#ifdef DEBUG_INTELLI_PLUG
		pr_info("intelli_plug is suspened!\n");
#endif
	}

#ifdef DEBUG_INTELLI_PLUG
	pr_info("sampling_rate is: %d\n", jiffies_to_msecs(sampling_rate));
#endif
	schedule_delayed_work_on(0, &intelli_plug_work, sampling_rate);
}
Exemplo n.º 7
0
static ssize_t __ref store_cpu_online_min(struct kobject *a, struct attribute *b,
				  const char *buf, size_t count)
{
	int ret = 0;
	ret = set_pmqos_data(cpu_online_min_qos_array, PM_QOS_CPU_ONLINE_MIN, buf);
	if (ret)
		return ret;

	if (num_online_cpus() < pm_qos_request(PM_QOS_CPU_ONLINE_MIN))
		cpu_up(1);

	return count;
}
static int __ref exynos_dm_hotplug_notifier(struct notifier_block *notifier,
					unsigned long pm_event, void *v)
{
	int i, j;

	switch (pm_event) {
	case PM_SUSPEND_PREPARE:
		mutex_lock(&thread_lock);
		in_suspend_prepared = true;

		if (!dynamic_hotplug(CMD_NORMAL))
			prev_cmd = CMD_NORMAL;

		exynos_dm_hotplug_disable();
		if (dm_hotplug_task) {
			kthread_stop(dm_hotplug_task);
			dm_hotplug_task = NULL;
		}

		for (i = 4; i < 11; i++) {
			j = i;
			if (j >= 8) j = 11 - j;
			if (!cpu_online(j)) {
				cpu_up(j);
			}
		}

		mutex_unlock(&thread_lock);
		break;

	case PM_POST_SUSPEND:
		mutex_lock(&thread_lock);
		exynos_dm_hotplug_enable();

		dm_hotplug_task =
			kthread_create(on_run, NULL, "thread_hotplug");
		if (IS_ERR(dm_hotplug_task)) {
			mutex_unlock(&thread_lock);
			pr_err("Failed in creation of thread.\n");
			return -EINVAL;
		}

		in_suspend_prepared = false;

		wake_up_process(dm_hotplug_task);
		mutex_unlock(&thread_lock);
		break;
	}

	return NOTIFY_OK;
}
Exemplo n.º 9
0
static inline void cpus_online_all(void)
{
    unsigned int cpu,j;

    for(j = suspend_cpu_num ; j < core_limit; j++)
    {
        cpu = cpuidx[j];
        if (cpu_is_offline(cpu))
            cpu_up(cpu);
    }

    pr_info("%s: all cpus were onlined\n", V4TKPLUG);
    if(DEBUG) print_cpus_all();
}
Exemplo n.º 10
0
void enable_nonboot_cpus(void)
{
	int cpu, error;

	printk("Thawing cpus ...\n");
	for_each_cpu_mask(cpu, frozen_cpus) {
		error = cpu_up(cpu);
		if (!error) {
			printk("CPU%d is up\n", cpu);
			continue;
		}
		printk("Error taking cpu %d up: %d\n", cpu, error);
		panic("Not enough cpus");
	}
static void cpufreq_replug_thread(struct work_struct *cpufreq_replug_work)
{
	int cpu;

	/* For UX500 platform, PRCMU need to update CPU1 policy */
	msleep(3000);

	for_each_cpu_not(cpu, cpu_online_mask) {

	if (cpu == 0)
		continue;
		cpu_up(cpu);
	}
}
static inline void online_all_fn(struct work_struct *work)
{
	unsigned int cpu;

	for_each_cpu_not_adj(cpu, cpu_online_mask) {
		if (cpu == 0)
			continue;
		cpu_up(cpu);
#if DEBUG
		pr_info("CPU%u up.\n", cpu);
		pr_info("CPU(s) running: %u\n", num_online_cpus());
#endif
	}
}
static void __ref bcl_handle_hotplug(struct work_struct *work)
{
	int ret = 0, _cpu = 0;

	mutex_lock(&bcl_hotplug_mutex);
	if (cpumask_empty(bcl_cpu_online_mask))
		bcl_update_online_mask();
#ifndef CONFIG_LGE_PM
	if  (bcl_soc_state == BCL_LOW_THRESHOLD
		|| bcl_vph_state == BCL_LOW_THRESHOLD)
#else
	if  (bcl_vph_state == BCL_LOW_THRESHOLD)
#endif
		bcl_hotplug_request = bcl_soc_hotplug_mask;
	else if (bcl_ibat_state == BCL_HIGH_THRESHOLD)
		bcl_hotplug_request = bcl_hotplug_mask;
	else
		bcl_hotplug_request = 0;

	for_each_possible_cpu(_cpu) {
		if ((!(bcl_hotplug_mask & BIT(_cpu))
			&& !(bcl_soc_hotplug_mask & BIT(_cpu)))
			|| !(cpumask_test_cpu(_cpu, bcl_cpu_online_mask)))
			continue;

		if (bcl_hotplug_request & BIT(_cpu)) {
			if (!cpu_online(_cpu))
				continue;
			ret = cpu_down(_cpu);
			if (ret)
				pr_err("Error %d offlining core %d\n",
					ret, _cpu);
			else
				pr_info("Set Offline CPU:%d\n", _cpu);
		} else {
			if (cpu_online(_cpu))
				continue;
			ret = cpu_up(_cpu);
			if (ret)
				pr_err("Error %d onlining core %d\n",
					ret, _cpu);
			else
				pr_info("Allow Online CPU:%d\n", _cpu);
		}
	}

	mutex_unlock(&bcl_hotplug_mutex);
	return;
}
Exemplo n.º 14
0
static void __ref do_core_control(long temp)
{
	int i = 0;
	int ret = 0;

	if (!core_control_enabled)
		return;

	mutex_lock(&core_control_mutex);
	if (msm_thermal_info.core_control_mask &&
		temp >= msm_thermal_info.core_limit_temp_degC) {
		for (i = num_possible_cpus(); i > 0; i--) {
			if (!(msm_thermal_info.core_control_mask & BIT(i)))
				continue;
			if (cpus_offlined & BIT(i) && !cpu_online(i))
				continue;
			pr_info("%s: Set Offline: CPU%d Temp: %ld\n",
					KBUILD_MODNAME, i, temp);
			ret = cpu_down(i);
			if (ret)
				pr_err("%s: Error %d offline core %d\n",
					KBUILD_MODNAME, ret, i);
			cpus_offlined |= BIT(i);
			break;
		}
	} else if (msm_thermal_info.core_control_mask && cpus_offlined &&
		temp <= (msm_thermal_info.core_limit_temp_degC -
			msm_thermal_info.core_temp_hysteresis_degC)) {
		for (i = 0; i < num_possible_cpus(); i++) {
			if (!(cpus_offlined & BIT(i)))
				continue;
			cpus_offlined &= ~BIT(i);
			pr_info("%s: Allow Online CPU%d Temp: %ld\n",
					KBUILD_MODNAME, i, temp);
			/*
			 * If this core is already online, then bring up the
			 * next offlined core.
			 */
			if (cpu_online(i))
				continue;
			ret = cpu_up(i);
			if (ret)
				pr_err("%s: Error %d online core %d\n",
						KBUILD_MODNAME, ret, i);
			break;
		}
	}
	mutex_unlock(&core_control_mutex);
}
Exemplo n.º 15
0
static int update_core_config(unsigned int cpunumber, bool up)
{
	int ret = -EINVAL;
	unsigned int nr_cpus = num_online_cpus();
	int max_cpus = tegra_cpq_max_cpus();
	int min_cpus = tegra_cpq_min_cpus();

#if CPUQUIET_DEBUG_VERBOSE
	pr_info(CPUQUIET_TAG "%s\n", __func__);
#endif
				
	if (cpq_state == TEGRA_CPQ_DISABLED || cpunumber >= nr_cpu_ids)
		return ret;

	/* sync with tegra_cpuquiet_work_func 
	 else if we are currently switching to LP and an up
	 comes we can end up with more then 1 core up and
	 governor stopped and !lp mode */
    if (!mutex_trylock (&hotplug_lock)){
#if CPUQUIET_DEBUG_VERBOSE
		pr_info(CPUQUIET_TAG "%s failed to get hotplug_lock\n", __func__);
#endif
        return -EBUSY;
	}
			
	if (up) {
		if(is_lp_cluster()) {
			ret = -EBUSY;
		} else {
			if (nr_cpus < max_cpus){
				show_status("UP", 0, cpunumber);
				ret = cpu_up(cpunumber);
			}
		}
	} else {
		if (is_lp_cluster()) {
			ret = -EBUSY;
		} else {
			if (nr_cpus > 1 && nr_cpus > min_cpus){
				show_status("DOWN", 0, cpunumber);
				ret = cpu_down(cpunumber);
			}
		}
	}

	mutex_unlock(&hotplug_lock);
			
	return ret;
}
Exemplo n.º 16
0
static void hotplug_online_single_work_fn(struct work_struct *work)
{
        int cpu;

        for_each_possible_cpu(cpu) {
                if (cpu) {
                        if (!cpu_online(cpu)) {
                                cpu_up(cpu);
                                pr_info("auto_hotplug: CPU%d up.\n", cpu);
                                break;
                        }
                }
        }
        schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
}
Exemplo n.º 17
0
static void hotplug_online_all_work_fn(struct work_struct *work)
{
        int cpu;
        for_each_possible_cpu(cpu) {
                if (likely(!cpu_online(cpu))) {
                        cpu_up(cpu);
                        pr_info("auto_hotplug: CPU%d up.\n", cpu);
                }
        }
        /*
         * Pause for 2 seconds before even considering offlining a CPU
         */
        schedule_delayed_work(&hotplug_unpause_work, HZ * 2);
        schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
}
Exemplo n.º 18
0
static void __cpuinit sleepy_plug_resume(struct power_suspend *handler)
{
	mutex_lock(&sleepy_plug_mutex);
	/* keep cores awake long enough for faster wake up */
	suspended = false;
	mutex_unlock(&sleepy_plug_mutex);

	/* wake up everyone */
	num_of_active_cores = 2;

	cpu_up(1);

	queue_delayed_work_on(0, sleepy_plug_wq, &sleepy_plug_work,
		msecs_to_jiffies(10));
}
static int enable_fast_hotplug(const char *val, const struct kernel_param *kp){
	int cpu;
	int ret = param_set_bool(val, kp);
	if(!fast_hotplug_enabled){
		pr_info(HOTPLUG_INFO_TAG"Fast hotplug disabled\n");
		mutex_lock(&mutex);
		flush_workqueue(hotplug_wq);
		for_each_possible_cpu(cpu){
			if(cpu == 0)
				continue;
			cpu_up(cpu);
		}
		is_sleeping = true;
		mutex_unlock(&mutex);
	} else {
Exemplo n.º 20
0
/* Iterate through possible CPUs and bring online the first offline found */
static inline void up_one(void)
{
	unsigned int cpu;

	/* All CPUs are online, return */
	if (num_online_cpus() == max_online)
		goto out;

	cpu = cpumask_next_zero(0, cpu_online_mask);
	if (cpu < nr_cpu_ids)
		cpu_up(cpu);
out:
	down_timer = 0;
	up_timer = 0;
}
Exemplo n.º 21
0
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
	unsigned int cpu;

	/* FIXME: This should be done in userspace --RR */
	for_each_present_cpu(cpu) {
		if (num_online_cpus() >= setup_max_cpus)
			break;
		if (!cpu_online(cpu))
			cpu_up(cpu);
	}

	/* Any cleanup work */
	printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
	smp_cpus_done(setup_max_cpus);
}
Exemplo n.º 22
0
/*
 *	Start up the first thread on a CPU.
 *	First thread is specified for the master CPU.
 */
void
cpu_launch_first_thread(
	register thread_t	th)
{
	register int	mycpu;

	mycpu = cpu_number();

#if	MACH_ASSERT
	if (watchacts & WA_BOOT)
		printf("cpu_launch_first_thread(%x) cpu=%d\n", th, mycpu);
#endif	/* MACH_ASSERT */

	cpu_up(mycpu);

	start_timer(&kernel_timer[mycpu]);

	/*
	 * Block all interrupts for choose_thread.
	 */
	(void) splhigh();

	if (th == THREAD_NULL) {
	    th = cpu_to_processor(mycpu)->idle_thread;
		if (th == THREAD_NULL || !rem_runq(th))
		    panic("cpu_launch_first_thread");
	}

	rtclock_reset();		/* start realtime clock ticking */
	PMAP_ACTIVATE_KERNEL(mycpu);

	thread_machine_set_current(th);
	thread_lock(th);
	th->state &= ~TH_UNINT;
	thread_unlock(th);
	timer_switch(&th->system_timer);

	PMAP_ACTIVATE_USER(th->top_act, mycpu);

	assert(mycpu == cpu_number());

        /* The following is necessary to keep things balanced */
        disable_preemption();

	load_context(th);
	/*NOTREACHED*/
}
Exemplo n.º 23
0
void __ref intelli_plug_perf_boost(bool on)
{
	unsigned int cpu;

	if (intelli_plug_active) {
		flush_workqueue(intelliplug_wq);
		if (on) {
			for_each_possible_cpu(cpu) {
				if (!cpu_online(cpu))
					cpu_up(cpu);
			}
		} else {
			queue_delayed_work_on(0, intelliplug_wq,
				&intelli_plug_work,
				msecs_to_jiffies(sampling_time));
		}
	}
Exemplo n.º 24
0
void enable_nonboot_cpus(void)
{
    int cpu, error;

    printk("Enabling non-boot CPUs  ...\n");

    for_each_cpu_mask ( cpu, frozen_cpus )
    {
        if ( (error = cpu_up(cpu)) )
        {
            BUG_ON(error == -EBUSY);
            printk("Error taking CPU%d up: %d\n", cpu, error);
        }
    }

    cpus_clear(frozen_cpus);
}
static void __ref enable_little_cluster(void)
{
	unsigned int cpu;
	unsigned int num_up = 0;

	for_each_present_cpu(cpu) {
		if (is_little_cpu(cpu) && !cpu_online(cpu)) {
			cpu_up(cpu);
			num_up++;
		}
	}

	if (!little_cluster_enabled)
		pr_info("cluster_plug: %d little cpus enabled\n", num_up);

	little_cluster_enabled = true;
}
Exemplo n.º 26
0
static void __cpuinit intelli_plug_late_resume(struct early_suspend *handler)
{
	int i;

	mutex_lock(&intelli_plug_mutex);
	/* keep cores awake long enough for faster wake up */
	persist_count = DUAL_CORE_PERSISTENCE;
	suspended = false;
	mutex_unlock(&intelli_plug_mutex);

	start_rq_work();

	for (i = 1; i < CPUS_AVAILABLE; i++) {
		cpu_up(i);
	}
	schedule_delayed_work_on(0, &intelli_plug_work, msecs_to_jiffies(10));
}
Exemplo n.º 27
0
static void hotplug_online_single_work_fn(struct work_struct *work)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		if (cpu) {
			if (!cpu_online(cpu)) {
				cpu_up(cpu);
				if (debug) {
				pr_info("auto_hotplug: CPU%d up.\n", cpu);
				}
				break;
			}
		}
	}
	schedule_delayed_work_on(0, &hotplug_decision_work, min_sampling_rate);
}
Exemplo n.º 28
0
static void __ref bcl_handle_hotplug(void)
{
	int ret = 0, _cpu = 0;

	mutex_lock(&bcl_hotplug_mutex);

	if (bcl_vph_state == BCL_LOW_THRESHOLD)
		bcl_hotplug_request = bcl_hotplug_mask;
	else
		bcl_hotplug_request = 0;

	if (bcl_hotplug_request == prev_hotplug_request)
		goto handle_hotplug_exit;

	for_each_possible_cpu(_cpu) {
		if (!(bcl_hotplug_mask & BIT(_cpu)))
			continue;

		if (bcl_hotplug_request & BIT(_cpu)) {
			if (!cpu_online(_cpu))
				continue;
			ret = cpu_down(_cpu);
			if (ret)
				pr_err("Error %d offlining core %d\n",
					ret, _cpu);
			else
				pr_info("Set Offline CPU:%d\n", _cpu);
		} else {
			if (cpu_online(_cpu))
				continue;
			ret = cpu_up(_cpu);
			if (ret)
				pr_err("Error %d onlining core %d\n",
					ret, _cpu);
			else
				pr_info("Allow Online CPU:%d\n", _cpu);
		}
	}

	prev_hotplug_request = bcl_hotplug_request;
handle_hotplug_exit:
	mutex_unlock(&bcl_hotplug_mutex);
	return;
}
Exemplo n.º 29
0
static void cpu_core_state_workfunc(struct work_struct *work)
{
	int i = 0;
	int cpu = 0;

	for (i = 0; i < 3; i++){
		cpu = i + 1;
		if (cpu_core_state[i] == 0 && cpu_online(cpu)){
			show_status("DOWN", 0, cpu);
			cpu_down(cpu);
		} else if (cpu_core_state[i] == 1 && !cpu_online(cpu)){
			if (is_lp_cluster())
				tegra_cpuquiet_force_gmode();
			
			show_status("UP", 0, cpu);
			cpu_up(cpu);
		}
	}
}
Exemplo n.º 30
0
static void min_max_constraints_workfunc(struct work_struct *work)
{
	int count = -1;
	bool up = false;
	unsigned int cpu;

	int nr_cpus = num_online_cpus();
	int max_cpus = tegra_cpq_max_cpus();
	int min_cpus = tegra_cpq_min_cpus();
	
	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	if (is_lp_cluster())
		return;

	if (nr_cpus < min_cpus) {
		up = true;
		count = min_cpus - nr_cpus;
	} else if (nr_cpus > max_cpus && max_cpus >= min_cpus) {
		count = nr_cpus - max_cpus;
	}

	for (;count > 0; count--) {
		if (up) {
			cpu = best_core_to_turn_up();
			if (cpu < nr_cpu_ids){
				show_status("UP", 0, cpu);
				cpu_up(cpu);
			}
			else
				break;
		} else {
			cpu = cpumask_next(0, cpu_online_mask);
			if (cpu < nr_cpu_ids){
				show_status("DOWN", 0, cpu);
				cpu_down(cpu);
			}
			else
				break;
		}
	}
}