示例#1
0
文件: cpuidle.c 项目: 710leo/LVS
/**
 * __cpuidle_register_device - internal register function called before register
 * and enable routines
 * @dev: the cpu
 *
 * cpuidle_lock mutex must be held before this is called
 */
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
	int ret;
	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();

	if (!sys_dev)
		return -EINVAL;
	if (!try_module_get(cpuidle_driver->owner))
		return -EINVAL;

	init_completion(&dev->kobj_unregister);

	poll_idle_init(dev);

	per_cpu(cpuidle_devices, dev->cpu) = dev;
	list_add(&dev->device_list, &cpuidle_detected_devices);
	if ((ret = cpuidle_add_sysfs(sys_dev))) {
		module_put(cpuidle_driver->owner);
		return ret;
	}

	dev->registered = 1;
	return 0;
}
示例#2
0
/**
 * __cpuidle_register_device - internal register function called before register
 * and enable routines
 * @dev: the cpu
 *
 * cpuidle_lock mutex must be held before this is called
 */
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
	int ret;
	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();

	if (!dev)
		return -EINVAL;
	if (!try_module_get(cpuidle_driver->owner))
		return -EINVAL;

	init_completion(&dev->kobj_unregister);

	per_cpu(cpuidle_devices, dev->cpu) = dev;
	list_add(&dev->device_list, &cpuidle_detected_devices);
	ret = cpuidle_add_sysfs(cpu_dev);
	if (ret)
		goto err_sysfs;

	ret = cpuidle_coupled_register_device(dev);
	if (ret)
		goto err_coupled;

	dev->registered = 1;
	return 0;

err_coupled:
	cpuidle_remove_sysfs(cpu_dev);
	wait_for_completion(&dev->kobj_unregister);
err_sysfs:
	list_del(&dev->device_list);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;
	module_put(cpuidle_driver->owner);
	return ret;
}
示例#3
0
static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
			unsigned long action, void *hcpu)
{
	int hotcpu = (unsigned long)hcpu;
	struct cpuidle_device *dev =
				per_cpu(cpuidle_devices, hotcpu);

	if (dev && cpuidle_get_driver()) {
		switch (action) {
		case CPU_ONLINE:
		case CPU_ONLINE_FROZEN:
			cpuidle_pause_and_lock();
			cpuidle_enable_device(dev);
			cpuidle_resume_and_unlock();
			break;

		case CPU_DEAD:
		case CPU_DEAD_FROZEN:
			cpuidle_pause_and_lock();
			cpuidle_disable_device(dev);
			cpuidle_resume_and_unlock();
			break;

		default:
			return NOTIFY_DONE;
		}
	}
	return NOTIFY_OK;
}
static int acpi_cpu_soft_notify(struct notifier_block *nfb,
		unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct acpi_processor *pr = per_cpu(processors, cpu);

	if (action == CPU_ONLINE && pr) {
		if (pr->flags.need_hotplug_init) {
			struct cpuidle_driver *idle_driver =
				cpuidle_get_driver();

			printk(KERN_INFO "Will online and init hotplugged "
			       "CPU: %d\n", pr->id);
			WARN(acpi_processor_start(pr), "Failed to start CPU:"
				" %d\n", pr->id);
			pr->flags.need_hotplug_init = 0;
			if (idle_driver && !strcmp(idle_driver->name,
						   "intel_idle")) {
				intel_idle_cpu_init(pr->id);
			}
		
		} else {
			acpi_processor_ppc_has_changed(pr, 0);
			acpi_processor_cst_has_changed(pr);
			acpi_processor_reevaluate_tstate(pr, action);
			acpi_processor_tstate_has_changed(pr);
		}
	}
	if (action == CPU_DEAD && pr) {
		
		acpi_processor_reevaluate_tstate(pr, action);
	}
	return NOTIFY_OK;
}
示例#5
0
/**
 * cpuidle_play_dead - cpu off-lining
 *
 * Returns in case of an error or no driver
 */
int cpuidle_play_dead(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_driver();
	int i, dead_state = -1;
	int power_usage = -1;

	if (!drv)
		return -ENODEV;

	/* Find lowest-power state that supports long-term idle */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];

		if (s->power_usage < power_usage && s->enter_dead) {
			power_usage = s->power_usage;
			dead_state = i;
		}
	}

	if (dead_state != -1)
		return drv->states[dead_state].enter_dead(dev, dead_state);

	return -ENODEV;
}
示例#6
0
/**
 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 * @dev: the cpu
 */
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
	struct device *cpu_dev;
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();

	if (!dev)
		return;
	cpu_dev = get_cpu_device((unsigned long)dev->cpu);
	if (!cpu_dev)
		return;
	if (dev->registered == 0)
		return;

	cpuidle_pause_and_lock();

	cpuidle_disable_device(dev);

	cpuidle_remove_sysfs(cpu_dev);
	list_del(&dev->device_list);
	wait_for_completion(&dev->kobj_unregister);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;

	cpuidle_coupled_unregister_device(dev);

	cpuidle_resume_and_unlock();

	module_put(cpuidle_driver->owner);
}
示例#7
0
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
 * return non-zero on failure
 */
int cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_driver();
	struct cpuidle_state *target_state;
	int next_state, entered_state;

	if (off)
		return -ENODEV;

	if (!initialized)
		return -ENODEV;

	/* check if the device is ready */
	if (!dev || !dev->enabled)
		return -EBUSY;

#if 0
	/* shows regressions, re-enable for 2.6.29 */
	/*
	 * run any timers that can be run now, at this point
	 * before calculating the idle duration etc.
	 */
	hrtimer_peek_ahead_timers();
#endif

	/* ask the governor for the next state */
	next_state = cpuidle_curr_governor->select(drv, dev);
	if (need_resched()) {
		local_irq_enable();
		return 0;
	}

	target_state = &drv->states[next_state];

	trace_power_start(POWER_CSTATE, next_state, dev->cpu);
	trace_cpu_idle(next_state, dev->cpu);

	entered_state = target_state->enter(dev, drv, next_state);

	trace_power_end(dev->cpu);
	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);

	if (entered_state >= 0) {
		/* Update cpuidle counters */
		/* This can be moved to within driver enter routine
		 * but that results in multiple copies of same code.
		 */
		dev->states_usage[entered_state].time +=
				(unsigned long long)dev->last_residency;
		dev->states_usage[entered_state].usage++;
	}

	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
		cpuidle_curr_governor->reflect(dev, entered_state);

	return 0;
}
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
 * return non-zero on failure
 */
int cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_driver();
	int next_state, entered_state;

	if (off)
		return -ENODEV;

	if (!initialized)
		return -ENODEV;

	/* check if the device is ready */
	if (!dev || !dev->enabled)
		return -EBUSY;

#if 0
	/* shows regressions, re-enable for 2.6.29 */
	/*
	 * run any timers that can be run now, at this point
	 * before calculating the idle duration etc.
	 */
	hrtimer_peek_ahead_timers();
#endif

	/* ask the governor for the next state */
	next_state = cpuidle_curr_governor->select(drv, dev);
	if (need_resched()) {
		local_irq_enable();
		return 0;
	}

	trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
	trace_cpu_idle_rcuidle(next_state, dev->cpu);

	if (need_resched()) {
		dev->last_residency = 0;
		local_irq_enable();
		entered_state = next_state;
		goto exit;
	}

	if (cpuidle_state_is_coupled(dev, drv, next_state))
		entered_state = cpuidle_enter_state_coupled(dev, drv,
							    next_state);
	else
		entered_state = cpuidle_enter_state(dev, drv, next_state);

	trace_power_end_rcuidle(dev->cpu);

exit:
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);

	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
		cpuidle_curr_governor->reflect(dev, entered_state);

	return 0;
}
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
	int cpu;
	struct acpi_processor *_pr;
	struct cpuidle_device *dev;

	if (disabled_by_idle_boot_param())
		return 0;

	if (!pr)
		return -EINVAL;

	if (nocst)
		return -ENODEV;

	if (!pr->flags.power_setup_done)
		return -ENODEV;

	/*
	 * FIXME:  Design the ACPI notification to make it once per
	 * system instead of once per-cpu.  This condition is a hack
	 * to make the code that updates C-States be called once.
	 */

	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {

		cpuidle_pause_and_lock();
		/* Protect against cpu-hotplug */
		get_online_cpus();

		/* Disable all cpuidle devices */
		for_each_online_cpu(cpu) {
			_pr = per_cpu(processors, cpu);
			if (!_pr || !_pr->flags.power_setup_done)
				continue;
			dev = per_cpu(acpi_cpuidle_device, cpu);
			cpuidle_disable_device(dev);
		}

		/* Populate Updated C-state information */
		acpi_processor_get_power_info(pr);
		acpi_processor_setup_cpuidle_states(pr);

		/* Enable all cpuidle devices */
		for_each_online_cpu(cpu) {
			_pr = per_cpu(processors, cpu);
			if (!_pr || !_pr->flags.power_setup_done)
				continue;
			acpi_processor_get_power_info(_pr);
			if (_pr->flags.power) {
				dev = per_cpu(acpi_cpuidle_device, cpu);
				acpi_processor_setup_cpuidle_cx(_pr, dev);
				cpuidle_enable_device(dev);
			}
		}
		put_online_cpus();
		cpuidle_resume_and_unlock();
	}
示例#10
0
/**
 * cpuidle_enable_device - enables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
int cpuidle_enable_device(struct cpuidle_device *dev)
{
	int ret, i;

	if (dev->enabled)
		return 0;
	if (!cpuidle_get_driver() || !cpuidle_curr_governor)
		return -EIO;
	if (!dev->state_count)
		return -EINVAL;

	if (dev->registered == 0) {
		ret = __cpuidle_register_device(dev);
		if (ret)
			return ret;
	}

	poll_idle_init(cpuidle_get_driver());

	if ((ret = cpuidle_add_state_sysfs(dev)))
		return ret;

	if (cpuidle_curr_governor->enable &&
	    (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
		goto fail_sysfs;

	for (i = 0; i < dev->state_count; i++) {
		dev->states_usage[i].usage = 0;
		dev->states_usage[i].time = 0;
	}
	dev->last_residency = 0;

	smp_wmb();

	dev->enabled = 1;

	enabled_devices++;
	return 0;

fail_sysfs:
	cpuidle_remove_state_sysfs(dev);

	return ret;
}
示例#11
0
static int __init acpi_processor_init(void)
{
	int result = 0;

	if (acpi_disabled)
		return 0;

	memset(&errata, 0, sizeof(errata));

#ifdef CONFIG_SMP
	if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
				(struct acpi_table_header **)&madt)))
		madt = NULL;
#endif
#ifdef CONFIG_ACPI_PROCFS
	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
	if (!acpi_processor_dir)
		return -ENOMEM;
#endif
	/*
	 * Check whether the system is DMI table. If yes, OSPM
	 * should not use mwait for CPU-states.
	 */
	dmi_check_system(processor_idle_dmi_table);

	if (!cpuidle_register_driver(&acpi_idle_driver)) {
		printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
		       acpi_idle_driver.name);
	} else {
		printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
		       cpuidle_get_driver()->name);
	}

	result = acpi_bus_register_driver(&acpi_processor_driver);
	if (result < 0)
		goto out_cpuidle;

	acpi_processor_install_hotplug_notify();

	acpi_thermal_cpufreq_init();

	acpi_processor_ppc_init();

	acpi_processor_throttling_init();

	return 0;

out_cpuidle:
	cpuidle_unregister_driver(&acpi_idle_driver);

#ifdef CONFIG_ACPI_PROCFS
	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
#endif

	return result;
}
示例#12
0
int pseries_notify_cpuidle_add_cpu(int cpu)
{
	struct cpuidle_device *dev =
			per_cpu_ptr(pseries_cpuidle_devices, cpu);
	if (dev && cpuidle_get_driver()) {
		cpuidle_disable_device(dev);
		cpuidle_enable_device(dev);
	}
	return 0;
}
示例#13
0
/**
 * __cpuidle_register_device - internal register function called before register
 * and enable routines
 * @dev: the cpu
 *
 * cpuidle_lock mutex must be held before this is called
 */
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
    int ret;
    struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
    struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();

    if (!sys_dev)
        return -EINVAL;
    if (!try_module_get(cpuidle_driver->owner))
        return -EINVAL;

    init_completion(&dev->kobj_unregister);

    /*
     * cpuidle driver should set the dev->power_specified bit
     * before registering the device if the driver provides
     * power_usage numbers.
     *
     * For those devices whose ->power_specified is not set,
     * we fill in power_usage with decreasing values as the
     * cpuidle code has an implicit assumption that state Cn
     * uses less power than C(n-1).
     *
     * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
     * an power value of -1.  So we use -2, -3, etc, for other
     * c-states.
     */
    if (!dev->power_specified) {
        int i;
        for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
            dev->states[i].power_usage = -1 - i;
    }

    per_cpu(cpuidle_devices, dev->cpu) = dev;
    list_add(&dev->device_list, &cpuidle_detected_devices);
    ret = cpuidle_add_sysfs(sys_dev);
    if (ret)
        goto err_sysfs;

    ret = cpuidle_coupled_register_device(dev);
    if (ret)
        goto err_coupled;

    dev->registered = 1;
    return 0;

err_coupled:
    cpuidle_remove_sysfs(sys_dev);
    wait_for_completion(&dev->kobj_unregister);
err_sysfs:
    list_del(&dev->device_list);
    per_cpu(cpuidle_devices, dev->cpu) = NULL;
    module_put(cpuidle_driver->owner);
    return ret;
}
示例#14
0
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
 * return non-zero on failure
 */
int cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_driver();
	int next_state, entered_state;

	if (off)
		return -ENODEV;

	if (!initialized)
		return -ENODEV;

	/* check if the device is ready */
	if (!dev || !dev->enabled)
		return -EBUSY;

	/* ask the governor for the next state */
	next_state = cpuidle_curr_governor->select(drv, dev);
	if (need_resched()) {
		dev->last_residency = 0;
		/* give the governor an opportunity to reflect on the outcome */
		if (cpuidle_curr_governor->reflect)
			cpuidle_curr_governor->reflect(dev, next_state);
		local_irq_enable();
		return 0;
	}

	trace_cpu_idle_rcuidle(next_state, dev->cpu);

	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
				   &dev->cpu);

	if (cpuidle_state_is_coupled(dev, drv, next_state))
		entered_state = cpuidle_enter_state_coupled(dev, drv,
							    next_state);
	else
		entered_state = cpuidle_enter_state(dev, drv, next_state);

	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
				   &dev->cpu);

	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);

	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
		cpuidle_curr_governor->reflect(dev, entered_state);

	return 0;
}
示例#15
0
int cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_driver();
	int next_state, entered_state;

	if (off)
		return -ENODEV;

	if (!initialized)
		return -ENODEV;

	
	if (!dev || !dev->enabled)
		return -EBUSY;

#if 0
	
	hrtimer_peek_ahead_timers();
#endif

	
	next_state = cpuidle_curr_governor->select(drv, dev);
	if (need_resched()) {
		local_irq_enable();
		return 0;
	}

	trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
	trace_cpu_idle_rcuidle(next_state, dev->cpu);

	entered_state = cpuidle_enter_ops(dev, drv, next_state);

	trace_power_end_rcuidle(dev->cpu);
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);

	if (entered_state >= 0) {
		
		dev->states_usage[entered_state].time +=
				(unsigned long long)dev->last_residency;
		dev->states_usage[entered_state].usage++;
	} else {
		dev->last_residency = 0;
	}

	
	if (cpuidle_curr_governor->reflect)
		cpuidle_curr_governor->reflect(dev, entered_state);

	return 0;
}
示例#16
0
/**
 * cpuidle_idle_call - the main idle loop
 *
 * NOTE: no locks or semaphores should be used here
 * return non-zero on failure
 */
int cpuidle_idle_call(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_driver();
	int next_state, entered_state;

	if (off)
		return -ENODEV;

	if (!initialized)
		return -ENODEV;

	/* check if the device is ready */
	if (!dev || !dev->enabled)
		return -EBUSY;

	/* ask the governor for the next state */
	next_state = cpuidle_curr_governor->select(drv, dev);
	if (need_resched()) {
		dev->last_residency = 0;
		/* give the governor an opportunity to reflect on the outcome */
		if (cpuidle_curr_governor->reflect)
			cpuidle_curr_governor->reflect(dev, next_state);
		local_irq_enable();
		return 0;
	}

	trace_cpu_idle_rcuidle(next_state, dev->cpu);

	if (need_resched()) {
		dev->last_residency = 0;
		local_irq_enable();
		entered_state = next_state;
		goto exit;
	}

	if (cpuidle_state_is_coupled(dev, drv, next_state))
		entered_state = cpuidle_enter_state_coupled(dev, drv,
							    next_state);
	else
		entered_state = cpuidle_enter_state(dev, drv, next_state);

exit:
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);

	/* give the governor an opportunity to reflect on the outcome */
	if (cpuidle_curr_governor->reflect)
		cpuidle_curr_governor->reflect(dev, entered_state);

	return 0;
}
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
	int cpu;
	struct acpi_processor *_pr;

	if (disabled_by_idle_boot_param())
		return 0;

	if (!pr)
		return -EINVAL;

	if (nocst)
		return -ENODEV;

	if (!pr->flags.power_setup_done)
		return -ENODEV;


	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {

		cpuidle_pause_and_lock();
		
		get_online_cpus();

		
		for_each_online_cpu(cpu) {
			_pr = per_cpu(processors, cpu);
			if (!_pr || !_pr->flags.power_setup_done)
				continue;
			cpuidle_disable_device(&_pr->power.dev);
		}

		
		acpi_processor_setup_cpuidle_states(pr);

		
		for_each_online_cpu(cpu) {
			_pr = per_cpu(processors, cpu);
			if (!_pr || !_pr->flags.power_setup_done)
				continue;
			acpi_processor_get_power_info(_pr);
			if (_pr->flags.power) {
				acpi_processor_setup_cpuidle_cx(_pr);
				cpuidle_enable_device(&_pr->power.dev);
			}
		}
		put_online_cpus();
		cpuidle_resume_and_unlock();
	}
示例#18
0
/**
 * cpuidle_disable_device - disables idle PM for a CPU
 * @dev: the CPU
 *
 * This function must be called between cpuidle_pause_and_lock and
 * cpuidle_resume_and_unlock when used externally.
 */
void cpuidle_disable_device(struct cpuidle_device *dev)
{
	if (!dev || !dev->enabled)
		return;
	if (!cpuidle_get_driver() || !cpuidle_curr_governor)
		return;

	dev->enabled = 0;

	if (cpuidle_curr_governor->disable)
		cpuidle_curr_governor->disable(dev);

	cpuidle_remove_state_sysfs(dev);
	enabled_devices--;
}
示例#19
0
void update_smt_snooze_delay(int cpu, int residency)
{
	struct cpuidle_driver *drv = cpuidle_get_driver();
	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);

	if (cpuidle_state_table != dedicated_states)
		return;

	if (residency < 0) {
		/* Disable the Nap state on that cpu */
		if (dev)
			dev->states_usage[1].disable = 1;
	} else
		if (drv)
			drv->states[1].target_residency = residency;
}
示例#20
0
/**
 * cpuidle_play_dead - cpu off-lining
 *
 * Returns in case of an error or no driver
 */
int cpuidle_play_dead(void)
{
	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
	struct cpuidle_driver *drv = cpuidle_get_driver();
	int i;

	if (!drv)
		return -ENODEV;

	/* Find lowest-power state that supports long-term idle */
	for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
		if (drv->states[i].enter_dead)
			return drv->states[i].enter_dead(dev, i);

	return -ENODEV;
}
示例#21
0
/**
 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 * @dev: the cpu
 */
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();

	if (dev->registered == 0)
		return;

	cpuidle_pause_and_lock();

	cpuidle_disable_device(dev);

	cpuidle_remove_sysfs(dev);
	list_del(&dev->device_list);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;

	cpuidle_coupled_unregister_device(dev);

	cpuidle_resume_and_unlock();

	module_put(cpuidle_driver->owner);
}
示例#22
0
static void cpuidle_general_output(void)
{
	char *tmp;

	tmp = cpuidle_get_driver();
	if (!tmp) {
		printf(_("Could not determine cpuidle driver\n"));
		return;
	}

	printf(_("CPUidle driver: %s\n"), tmp);
	free(tmp);

	tmp = cpuidle_get_governor();
	if (!tmp) {
		printf(_("Could not determine cpuidle governor\n"));
		return;
	}

	printf(_("CPUidle governor: %s\n"), tmp);
	free(tmp);
}
static int acpi_cpu_soft_notify(struct notifier_block *nfb,
		unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct acpi_processor *pr = per_cpu(processors, cpu);

	if (action == CPU_ONLINE && pr) {
		/* CPU got physically hotplugged and onlined the first time:
		 * Initialize missing things
		 */
		if (pr->flags.need_hotplug_init) {
			struct cpuidle_driver *idle_driver =
				cpuidle_get_driver();

			printk(KERN_INFO "Will online and init hotplugged "
			       "CPU: %d\n", pr->id);
			WARN(acpi_processor_start(pr), "Failed to start CPU:"
				" %d\n", pr->id);
			pr->flags.need_hotplug_init = 0;
			if (idle_driver && !strcmp(idle_driver->name,
						   "intel_idle")) {
				intel_idle_cpu_init(pr->id);
			}
		/* Normal CPU soft online event */
		} else {
			acpi_processor_ppc_has_changed(pr, 0);
			acpi_processor_hotplug(pr);
			acpi_processor_reevaluate_tstate(pr, action);
			acpi_processor_tstate_has_changed(pr);
		}
	}
	if (action == CPU_DEAD && pr) {
		/* invalidate the flag.throttling after one CPU is offline */
		acpi_processor_reevaluate_tstate(pr, action);
	}
	return NOTIFY_OK;
}
示例#24
0
static int __init acpi_processor_init(void)
{
	int result = 0;

	if (acpi_disabled)
		return 0;

	memset(&errata, 0, sizeof(errata));

	if (!cpuidle_register_driver(&acpi_idle_driver)) {
		printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
			acpi_idle_driver.name);
	} else {
		printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
			cpuidle_get_driver()->name);
	}

	result = acpi_bus_register_driver(&acpi_processor_driver);
	if (result < 0)
		goto out_cpuidle;

	acpi_processor_install_hotplug_notify();

	acpi_thermal_cpufreq_init();

	acpi_processor_ppc_init();

	acpi_processor_throttling_init();

	return 0;

out_cpuidle:
	cpuidle_unregister_driver(&acpi_idle_driver);

	return result;
}
示例#25
0
static int __acpi_processor_start(struct acpi_device *device)
{
	struct acpi_processor *pr = acpi_driver_data(device);
	acpi_status status;
	int result = 0;

	if (!pr)
		return -ENODEV;

	if (pr->flags.need_hotplug_init)
		return 0;

#ifdef CONFIG_CPU_FREQ
	acpi_processor_ppc_has_changed(pr, 0);
	acpi_processor_load_module(pr);
#endif
	acpi_processor_get_throttling_info(pr);
	acpi_processor_get_limit_info(pr);

	if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
		acpi_processor_power_init(pr);

	pr->cdev = thermal_cooling_device_register("Processor", device,
						   &processor_cooling_ops);
	if (IS_ERR(pr->cdev)) {
		result = PTR_ERR(pr->cdev);
		goto err_power_exit;
	}

	dev_dbg(&device->dev, "registered as cooling_device%d\n",
		pr->cdev->id);

	result = sysfs_create_link(&device->dev.kobj,
				   &pr->cdev->device.kobj,
				   "thermal_cooling");
	if (result) {
		dev_err(&device->dev,
			"Failed to create sysfs link 'thermal_cooling'\n");
		goto err_thermal_unregister;
	}
	result = sysfs_create_link(&pr->cdev->device.kobj,
				   &device->dev.kobj,
				   "device");
	if (result) {
		dev_err(&pr->cdev->device,
			"Failed to create sysfs link 'device'\n");
		goto err_remove_sysfs_thermal;
	}

	status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
					     acpi_processor_notify, device);
	if (ACPI_SUCCESS(status))
		return 0;

	sysfs_remove_link(&pr->cdev->device.kobj, "device");
 err_remove_sysfs_thermal:
	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
 err_thermal_unregister:
	thermal_cooling_device_unregister(pr->cdev);
 err_power_exit:
	acpi_processor_power_exit(pr);
	return result;
}
示例#26
0
static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
	struct acpi_processor *pr = NULL;
	int result = 0;
	struct sys_device *sysdev;

	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
	if (!pr)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
		kfree(pr);
		return -ENOMEM;
	}

	pr->handle = device->handle;
	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
	device->driver_data = pr;

	result = acpi_processor_get_info(device);
	if (result) {
		/* Processor is physically not present */
		return 0;
	}

#ifdef CONFIG_SMP
	if (pr->id >= setup_max_cpus && pr->id != 0)
		return 0;
#endif

	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));

	/*
	 * Buggy BIOS check
	 * ACPI id of processors can be reported wrongly by the BIOS.
	 * Don't trust it blindly
	 */
	if (per_cpu(processor_device_array, pr->id) != NULL &&
	    per_cpu(processor_device_array, pr->id) != device) {
		printk(KERN_WARNING "BIOS reported wrong ACPI id "
			"for the processor\n");
		result = -ENODEV;
		goto err_free_cpumask;
	}
	per_cpu(processor_device_array, pr->id) = device;

	per_cpu(processors, pr->id) = pr;

	sysdev = get_cpu_sysdev(pr->id);
	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
		result = -EFAULT;
		goto err_free_cpumask;
	}

#ifdef CONFIG_CPU_FREQ
	acpi_processor_ppc_has_changed(pr, 0);
#endif
	acpi_processor_get_throttling_info(pr);
	acpi_processor_get_limit_info(pr);


	if (cpuidle_get_driver() == &acpi_idle_driver)
		acpi_processor_power_init(pr, device);

	pr->cdev = thermal_cooling_device_register("Processor", device,
						&processor_cooling_ops);
	if (IS_ERR(pr->cdev)) {
		result = PTR_ERR(pr->cdev);
		goto err_power_exit;
	}

	dev_dbg(&device->dev, "registered as cooling_device%d\n",
		 pr->cdev->id);

	result = sysfs_create_link(&device->dev.kobj,
				   &pr->cdev->device.kobj,
				   "thermal_cooling");
	if (result) {
		printk(KERN_ERR PREFIX "Create sysfs link\n");
		goto err_thermal_unregister;
	}
	result = sysfs_create_link(&pr->cdev->device.kobj,
				   &device->dev.kobj,
				   "device");
	if (result) {
		printk(KERN_ERR PREFIX "Create sysfs link\n");
		goto err_remove_sysfs;
	}

	return 0;

err_remove_sysfs:
	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:
	thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
	acpi_processor_power_exit(pr, device);
err_free_cpumask:
	free_cpumask_var(pr->throttling.shared_cpu_map);

	return result;
}
示例#27
0
void update_smt_snooze_delay(int snooze)
{
	struct cpuidle_driver *drv = cpuidle_get_driver();
	if (drv)
		drv->states[0].target_residency = snooze;
}