Exemplo n.º 1
0
/**
 * __cpuidle_register_device - internal register function called before register
 * and enable routines
 * @dev: the cpu
 *
 * cpuidle_lock mutex must be held before this is called
 */
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
	int ret;
	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();

	if (!try_module_get(cpuidle_driver->owner))
		return -EINVAL;

	init_completion(&dev->kobj_unregister);

	per_cpu(cpuidle_devices, dev->cpu) = dev;
	list_add(&dev->device_list, &cpuidle_detected_devices);
	ret = cpuidle_add_sysfs(cpu_dev);
	if (ret)
		goto err_sysfs;

	ret = cpuidle_coupled_register_device(dev);
	if (ret)
		goto err_coupled;

	dev->registered = 1;
	return 0;

err_coupled:
	cpuidle_remove_sysfs(cpu_dev);
	wait_for_completion(&dev->kobj_unregister);
err_sysfs:
	list_del(&dev->device_list);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;
	module_put(cpuidle_driver->owner);
	return ret;
}
Exemplo n.º 2
0
static void __init imx6q_opp_init(void)
{
	struct device_node *np;
	struct device *cpu_dev = get_cpu_device(0);

	if (!cpu_dev) {
		pr_warn("failed to get cpu0 device\n");
		return;
	}
	np = of_node_get(cpu_dev->of_node);
	if (!np) {
		pr_warn("failed to find cpu0 node\n");
		return;
	}

	if (of_init_opp_table(cpu_dev)) {
		pr_warn("failed to init OPP table\n");
		goto put_node;
	}

	imx6q_opp_check_speed_grading(cpu_dev);

put_node:
	of_node_put(np);
}
Exemplo n.º 3
0
/**
 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 * @dev: the cpu
 */
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
	struct device *cpu_dev;
	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();

	if (!dev)
		return;
	cpu_dev = get_cpu_device((unsigned long)dev->cpu);
	if (!cpu_dev)
		return;
	if (dev->registered == 0)
		return;

	cpuidle_pause_and_lock();

	cpuidle_disable_device(dev);

	cpuidle_remove_sysfs(cpu_dev);
	list_del(&dev->device_list);
	wait_for_completion(&dev->kobj_unregister);
	per_cpu(cpuidle_devices, dev->cpu) = NULL;

	cpuidle_coupled_unregister_device(dev);

	cpuidle_resume_and_unlock();

	module_put(cpuidle_driver->owner);
}
static int omap_cpufreq_probe(struct platform_device *pdev)
{
	mpu_dev = get_cpu_device(0);
	if (!mpu_dev) {
		pr_warn("%s: unable to get the MPU device\n", __func__);
		return -EINVAL;
	}

	mpu_reg = regulator_get(mpu_dev, "vcc");
	if (IS_ERR(mpu_reg)) {
		pr_warn("%s: unable to get MPU regulator\n", __func__);
		mpu_reg = NULL;
	} else {
		/* 
		 * Ensure physical regulator is present.
		 * (e.g. could be dummy regulator.)
		 */
		if (regulator_get_voltage(mpu_reg) < 0) {
			pr_warn("%s: physical regulator not present for MPU\n",
				__func__);
			regulator_put(mpu_reg);
			mpu_reg = NULL;
		}
	}

	return cpufreq_register_driver(&omap_driver);
}
Exemplo n.º 5
0
/**
 * mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
 * @bus:	CDMM bus information, must already be set up.
 */
static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
{
	void __iomem *cdmm;
	u32 acsr;
	unsigned int drb, type, size, rev;
	struct mips_cdmm_device *dev;
	unsigned int cpu = smp_processor_id();
	int ret = 0;
	int id = 0;

	/* Skip the first block if it's reserved for more registers */
	drb = bus->drbs_reserved;
	cdmm = bus->regs;

	/* Discover devices */
	bus->discovered = true;
	pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
	for (; drb < bus->drbs; drb += size + 1) {
		acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
		type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
		size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
		rev  = (acsr & CDMM_ACSR_DEVREV)  >> CDMM_ACSR_DEVREV_SHIFT;

		if (!type)
			continue;

		pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
			cpu, id, drb, drb * CDMM_DRB_SIZE,
			(drb + size + 1) * CDMM_DRB_SIZE - 1,
			type, rev);

		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
		if (!dev)
			break;

		dev->cpu = cpu;
		dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
		dev->res.end = bus->phys +
				(drb + size + 1) * CDMM_DRB_SIZE - 1;
		dev->res.flags = IORESOURCE_MEM;
		dev->type = type;
		dev->rev = rev;
		dev->dev.parent = get_cpu_device(cpu);
		dev->dev.bus = &mips_cdmm_bustype;
		dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
		dev->dev.release = mips_cdmm_release;

		dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
		++id;
		ret = device_register(&dev->dev);
		if (ret) {
			put_device(&dev->dev);
			kfree(dev);
		}
	}
}
Exemplo n.º 6
0
static int intel_epb_offline(unsigned int cpu)
{
	struct device *cpu_dev = get_cpu_device(cpu);

	if (!cpuhp_tasks_frozen)
		sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group);

	intel_epb_save();
	return 0;
}
Exemplo n.º 7
0
static int dlpar_offline_cpu(struct device_node *dn)
{
	int rc = 0;
	unsigned int cpu;
	int len, nthreads, i;
	const __be32 *intserv;
	u32 thread;

	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return -EINVAL;

	nthreads = len / sizeof(u32);

	cpu_maps_update_begin();
	for (i = 0; i < nthreads; i++) {
		thread = be32_to_cpu(intserv[i]);
		for_each_present_cpu(cpu) {
			if (get_hard_smp_processor_id(cpu) != thread)
				continue;

			if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
				break;

			if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
				set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
				cpu_maps_update_done();
				rc = device_offline(get_cpu_device(cpu));
				if (rc)
					goto out;
				cpu_maps_update_begin();
				break;

			}

			/*
			 * The cpu is in CPU_STATE_INACTIVE.
			 * Upgrade it's state to CPU_STATE_OFFLINE.
			 */
			set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
			BUG_ON(plpar_hcall_norets(H_PROD, thread)
								!= H_SUCCESS);
			__cpu_die(cpu);
			break;
		}
		if (cpu == num_possible_cpus())
			printk(KERN_WARNING "Could not find cpu to offline "
			       "with physical id 0x%x\n", thread);
	}
	cpu_maps_update_done();

out:
	return rc;

}
Exemplo n.º 8
0
static int hb_cpufreq_driver_init(void)
{
	struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
	struct device *cpu_dev;
	struct clk *cpu_clk;
	struct device_node *np;
	int ret;

	if ((!of_machine_is_compatible("calxeda,highbank")) &&
		(!of_machine_is_compatible("calxeda,ecx-2000")))
		return -ENODEV;

	for_each_child_of_node(of_find_node_by_path("/cpus"), np)
		if (of_get_property(np, "operating-points", NULL))
			break;

	if (!np) {
		pr_err("failed to find highbank cpufreq node\n");
		return -ENOENT;
	}

	cpu_dev = get_cpu_device(0);
	if (!cpu_dev) {
		pr_err("failed to get highbank cpufreq device\n");
		ret = -ENODEV;
		goto out_put_node;
	}

	cpu_dev->of_node = np;

	cpu_clk = clk_get(cpu_dev, NULL);
	if (IS_ERR(cpu_clk)) {
		ret = PTR_ERR(cpu_clk);
		pr_err("failed to get cpu0 clock: %d\n", ret);
		goto out_put_node;
	}

	ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
	if (ret) {
		pr_err("failed to register clk notifier: %d\n", ret);
		goto out_put_node;
	}

	/* Instantiate cpufreq-cpu0 */
	platform_device_register_full(&devinfo);

out_put_node:
	of_node_put(np);
	return ret;
}
module_init(hb_cpufreq_driver_init);

MODULE_AUTHOR("Mark Langsdorf <*****@*****.**>");
MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
MODULE_LICENSE("GPL");
Exemplo n.º 9
0
static int resources_available(void)
{
	struct device *cpu_dev;
	struct regulator *cpu_reg;
	struct clk *cpu_clk;
	int ret = 0;
	const char *name;

	cpu_dev = get_cpu_device(0);
	if (!cpu_dev) {
		pr_err("failed to get cpu0 device\n");
		return -ENODEV;
	}

	cpu_clk = clk_get(cpu_dev, NULL);
	ret = PTR_ERR_OR_ZERO(cpu_clk);
	if (ret) {
		/*
		 * If cpu's clk node is present, but clock is not yet
		 * registered, we should try defering probe.
		 */
		if (ret == -EPROBE_DEFER)
			dev_dbg(cpu_dev, "clock not ready, retry\n");
		else
			dev_err(cpu_dev, "failed to get clock: %d\n", ret);

		return ret;
	}

	clk_put(cpu_clk);

	name = find_supply_name(cpu_dev);
	/* Platform doesn't require regulator */
	if (!name)
		return 0;

	cpu_reg = regulator_get_optional(cpu_dev, name);
	ret = PTR_ERR_OR_ZERO(cpu_reg);
	if (ret) {
		/*
		 * If cpu's regulator supply node is present, but regulator is
		 * not yet registered, we should try defering probe.
		 */
		if (ret == -EPROBE_DEFER)
			dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
		else
			dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);

		return ret;
	}

	regulator_put(cpu_reg);
	return 0;
}
Exemplo n.º 10
0
static int add_sysfs_objects(struct sleep_data *sleep_info)
{
	int err = 0;
	int i = 0;
	const int attr_count = 3;

	struct attribute **attribs =
		kzalloc(sizeof(struct attribute *) * attr_count, GFP_KERNEL);

	if (!attribs)
		return -ENOMEM;

	atomic_set(&sleep_info->timer_expired, 0);
	atomic_set(&sleep_info->timer_val_ms, INT_MAX);

	attribs[0] = MSM_SLEEP_RW_ATTRIB(timer_val_ms);
	attribs[1] = MSM_SLEEP_RO_ATTRIB(timer_expired);
	attribs[2] = NULL;

	for (i = 0; i < attr_count - 1 ; i++) {
		if (!attribs[i])
			goto release_attribs;
	}

	sleep_info->attr_group = kzalloc(sizeof(struct attribute_group),
						GFP_KERNEL);
	if (!sleep_info->attr_group)
		goto release_attribs;
	sleep_info->attr_group->attrs = attribs;
	sleep_info->kobj = kobject_create_and_add("sleep-stats",
			&get_cpu_device(0)->kobj);
	if (!sleep_info->kobj)
		goto release_attr_group;

	err = sysfs_create_group(sleep_info->kobj, sleep_info->attr_group);
	if (err)
		kobject_put(sleep_info->kobj);
	else
		kobject_uevent(sleep_info->kobj, KOBJ_ADD);

	if (!err)
		return err;

release_attr_group:
	kfree(sleep_info->attr_group);
release_attribs:
	for (i = 0; i < attr_count - 1 ; i++)
		if (attribs[i])
			kfree(attribs[i]);
	kfree(attribs);

	return -ENOMEM;
}
Exemplo n.º 11
0
void cpu_remove_dev_attr(struct device_attribute *attr)
{
	int cpu;

	mutex_lock(&cpu_mutex);

	for_each_possible_cpu(cpu) {
		device_remove_file(get_cpu_device(cpu), attr);
	}

	mutex_unlock(&cpu_mutex);
}
Exemplo n.º 12
0
int cpu_add_dev_attr(struct device_attribute *attr)
{
	int cpu;

	mutex_lock(&cpu_mutex);

	for_each_possible_cpu(cpu) {
		device_create_file(get_cpu_device(cpu), attr);
	}

	mutex_unlock(&cpu_mutex);
	return 0;
}
Exemplo n.º 13
0
static void disable_hotplug_cpu(int cpu)
{
	if (!cpu_is_hotpluggable(cpu))
		return;
	lock_device_hotplug();
	if (cpu_online(cpu))
		device_offline(get_cpu_device(cpu));
	if (!cpu_online(cpu) && cpu_present(cpu)) {
		xen_arch_unregister_cpu(cpu);
		set_cpu_present(cpu, false);
	}
	unlock_device_hotplug();
}
Exemplo n.º 14
0
void cpu_remove_dev_attr_group(struct attribute_group *attrs)
{
	int cpu;
	struct device *dev;

	mutex_lock(&cpu_mutex);

	for_each_possible_cpu(cpu) {
		dev = get_cpu_device(cpu);
		sysfs_remove_group(&dev->kobj, attrs);
	}

	mutex_unlock(&cpu_mutex);
}
Exemplo n.º 15
0
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
	u32 cur_cluster = cpu_to_cluster(policy->cpu);
	struct device *cpu_dev;
	int ret;

	cpu_dev = get_cpu_device(policy->cpu);
	if (!cpu_dev) {
		pr_err("%s: failed to get cpu%d device\n", __func__,
				policy->cpu);
		return -ENODEV;
	}

	ret = get_cluster_clk_and_freq_table(cpu_dev);
	if (ret)
		return ret;

	ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
	if (ret) {
		dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
				policy->cpu, cur_cluster);
		put_cluster_clk_and_freq_table(cpu_dev);
		return ret;
	}

	if (cur_cluster < MAX_CLUSTERS) {
		int cpu;

		cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));

		for_each_cpu(cpu, policy->cpus)
			per_cpu(physical_cluster, cpu) = cur_cluster;
	} else {
		/* Assumption: during init, we are always running on A15 */
		per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
	}

	if (arm_bL_ops->get_transition_latency)
		policy->cpuinfo.transition_latency =
			arm_bL_ops->get_transition_latency(cpu_dev);
	else
		policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;

	if (is_bL_switching_enabled())
		per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);

	dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
	return 0;
}
Exemplo n.º 16
0
static int cpuid_cpu_offline(unsigned int cpu)
{
	struct device *dev;
	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);

	dev = get_cpu_device(cpu);
	if (!dev)
		return -ENODEV;
	if (info->kobj.parent) {
		sysfs_remove_group(&info->kobj, &cpuregs_attr_group);
		kobject_del(&info->kobj);
	}

	return 0;
}
static void __exit
err_inject_exit(void)
{
	int i;
	struct device *sys_dev;

#ifdef ERR_INJ_DEBUG
	printk(KERN_INFO "Exit error injection driver.\n");
#endif
	for_each_online_cpu(i) {
		sys_dev = get_cpu_device(i);
		sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
	}
	unregister_hotcpu_notifier(&err_inject_cpu_notifier);
}
Exemplo n.º 18
0
static int bL_cpufreq_exit(struct cpufreq_policy *policy)
{
	struct device *cpu_dev;

	cpu_dev = get_cpu_device(policy->cpu);
	if (!cpu_dev) {
		pr_err("%s: failed to get cpu%d device\n", __func__,
				policy->cpu);
		return -ENODEV;
	}

	put_cluster_clk_and_freq_table(cpu_dev);
	dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);

	return 0;
}
Exemplo n.º 19
0
void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
{
	struct device *cpu_dev;
	int cpu;

	WARN_ON(cpumask_empty(cpumask));

	for_each_cpu(cpu, cpumask) {
		cpu_dev = get_cpu_device(cpu);
		if (!cpu_dev) {
			pr_err("%s: failed to get cpu%d device\n", __func__,
			       cpu);
			continue;
		}

		dev_pm_opp_of_remove_table(cpu_dev);
	}
Exemplo n.º 20
0
int cpu_add_dev_attr_group(struct attribute_group *attrs)
{
	int cpu;
	struct device *dev;
	int ret;

	mutex_lock(&cpu_mutex);

	for_each_possible_cpu(cpu) {
		dev = get_cpu_device(cpu);
		ret = sysfs_create_group(&dev->kobj, attrs);
		WARN_ON(ret != 0);
	}

	mutex_unlock(&cpu_mutex);
	return 0;
}
Exemplo n.º 21
0
static int __init beagle_opp_init(void)
{
	int r = 0;

	if (!machine_is_omap3_beagle())
		return 0;

	/* Initialize the omap3 opp table if not already created. */
	r = omap3_opp_init();
	if (r < 0 && (r != -EEXIST)) {
		pr_err("%s: opp default init failed\n", __func__);
		return r;
	}

	/* Custom OPP enabled for all xM versions */
	if (cpu_is_omap3630()) {
		struct device *mpu_dev, *iva_dev;

		mpu_dev = get_cpu_device(0);
		iva_dev = omap_device_get_by_hwmod_name("iva");

		if (!mpu_dev || IS_ERR(iva_dev)) {
			pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n",
				__func__, mpu_dev, iva_dev);
			return -ENODEV;
		}
		/* Enable MPU 1GHz and lower opps */
		r = dev_pm_opp_enable(mpu_dev, 800000000);
		/* TODO: MPU 1GHz needs SR and ABB */

		/* Enable IVA 800MHz and lower opps */
		r |= dev_pm_opp_enable(iva_dev, 660000000);
		/* TODO: DSP 800MHz needs SR and ABB */
		if (r) {
			pr_err("%s: failed to enable higher opp %d\n",
				__func__, r);
			/*
			 * Cleanup - disable the higher freqs - we dont care
			 * about the results
			 */
			dev_pm_opp_disable(mpu_dev, 800000000);
			dev_pm_opp_disable(iva_dev, 660000000);
		}
	}
	return 0;
}
Exemplo n.º 22
0
static int show_cpuinfo(struct seq_file *m, void *v)
{
	char *str;
	int cpu_id = ptr_to_cpu(v);
	struct device *cpu_dev = get_cpu_device(cpu_id);
	struct clk *cpu_clk;
	unsigned long freq = 0;

	if (!cpu_online(cpu_id)) {
		seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
		goto done;
	}

	str = (char *)__get_free_page(GFP_KERNEL);
	if (!str)
		goto done;

	seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));

	cpu_clk = clk_get(cpu_dev, NULL);
	if (IS_ERR(cpu_clk)) {
		seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
			   cpu_id);
	} else {
		freq = clk_get_rate(cpu_clk);
	}
	if (freq)
		seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
			   freq / 1000000, (freq / 10000) % 100);

	seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
		   loops_per_jiffy / (500000 / HZ),
		   (loops_per_jiffy / (5000 / HZ)) % 100);

	seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
	seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
	seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
	seq_printf(m, arc_platform_smp_cpuinfo());

	free_page((unsigned long)str);
done:
	seq_printf(m, "\n");

	return 0;
}
Exemplo n.º 23
0
Arquivo: node.c Projeto: jthatch12/STi
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
	struct device *obj;

	if (!node_online(nid))
		return 0;

	obj = get_cpu_device(cpu);
	if (!obj)
		return 0;

	sysfs_remove_link(&node_devices[nid]->dev.kobj,
			  kobject_name(&obj->kobj));
	sysfs_remove_link(&obj->kobj,
			  kobject_name(&node_devices[nid]->dev.kobj));

	return 0;
}
Exemplo n.º 24
0
int cpuquiet_register_driver(struct cpuquiet_driver *drv)
{
	int err = -EBUSY;
	unsigned int cpu;
	struct device *dev;

	if (!drv)
		return -EINVAL;

#ifdef CONFIG_CPUQUIET_STATS
	stats = kzalloc(nr_cpu_ids * sizeof(*stats), GFP_KERNEL);
	if (!stats)
		return -ENOMEM;
#endif

	for_each_possible_cpu(cpu) {
#ifdef CONFIG_CPUQUIET_STATS
		u64 cur_jiffies = get_jiffies_64();
		stats[cpu].last_update = cur_jiffies;
		if (cpu_online(cpu))
			stats[cpu].up_down_count = 1;
#endif
		dev = get_cpu_device(cpu);
		if (dev) {
			cpuquiet_add_dev(dev, cpu);
#ifdef CONFIG_CPUQUIET_STATS
			cpuquiet_cpu_kobject_init(&stats[cpu].cpu_kobject,
					&ktype_cpu_stats, "stats", cpu);
#endif
		}
	}

	mutex_lock(&cpuquiet_lock);
	if (!cpuquiet_curr_driver) {
		err = 0;
		cpuquiet_curr_driver = drv;
		cpuquiet_switch_governor(cpuquiet_get_first_governor());
	}
	mutex_unlock(&cpuquiet_lock);

	return err;
}
Exemplo n.º 25
0
static int dlpar_online_cpu(struct device_node *dn)
{
	int rc = 0;
	unsigned int cpu;
	int len, nthreads, i;
	const __be32 *intserv;
	u32 thread;

	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return -EINVAL;

	nthreads = len / sizeof(u32);

	cpu_maps_update_begin();
	for (i = 0; i < nthreads; i++) {
		thread = be32_to_cpu(intserv[i]);
		for_each_present_cpu(cpu) {
			if (get_hard_smp_processor_id(cpu) != thread)
				continue;
			BUG_ON(get_cpu_current_state(cpu)
					!= CPU_STATE_OFFLINE);
			cpu_maps_update_done();
			timed_topology_update(1);
			find_and_online_cpu_nid(cpu);
			rc = device_online(get_cpu_device(cpu));
			if (rc)
				goto out;
			cpu_maps_update_begin();

			break;
		}
		if (cpu == num_possible_cpus())
			printk(KERN_WARNING "Could not find cpu to online "
			       "with physical id 0x%x\n", thread);
	}
	cpu_maps_update_done();

out:
	return rc;

}
Exemplo n.º 26
0
static int __init cpuhp_sysfs_init(void)
{
	int cpu, ret;

	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
				 &cpuhp_cpu_root_attr_group);
	if (ret)
		return ret;

	for_each_possible_cpu(cpu) {
		struct device *dev = get_cpu_device(cpu);

		if (!dev)
			continue;
		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
		if (ret)
			return ret;
	}
	return 0;
}
Exemplo n.º 27
0
static int cpuid_cpu_online(unsigned int cpu)
{
	int rc;
	struct device *dev;
	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);

	dev = get_cpu_device(cpu);
	if (!dev) {
		rc = -ENODEV;
		goto out;
	}
	rc = kobject_add(&info->kobj, &dev->kobj, "regs");
	if (rc)
		goto out;
	rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
	if (rc)
		kobject_del(&info->kobj);
out:
	return rc;
}
static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
		unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct device *sys_dev;

	sys_dev = get_cpu_device(cpu);
	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		err_inject_add_dev(sys_dev);
		break;
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		err_inject_remove_dev(sys_dev);
		break;
	}

	return NOTIFY_OK;
}
Exemplo n.º 29
0
static void __exit pseries_energy_cleanup(void)
{
	int cpu;
	struct device *cpu_dev;

	if (!sysfs_entries)
		return;

	/* Remove the sysfs files */
	device_remove_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list);
	device_remove_file(cpu_subsys.dev_root, &attr_cpu_deactivate_hint_list);

	for_each_possible_cpu(cpu) {
		cpu_dev = get_cpu_device(cpu);
		sysfs_remove_file(&cpu_dev->kobj,
				&attr_percpu_activate_hint.attr);
		sysfs_remove_file(&cpu_dev->kobj,
				&attr_percpu_deactivate_hint.attr);
	}
}
static int init_rq_attribs(void)
{
	int err;

	rq_info.rq_avg = 0;
	rq_info.attr_group = &rq_attr_group;

	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
	rq_info.kobj = kobject_create_and_add("rq-stats",
			&get_cpu_device(0)->kobj);
	if (!rq_info.kobj)
		return -ENOMEM;

	err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
	if (err)
		kobject_put(rq_info.kobj);
	else
		kobject_uevent(rq_info.kobj, KOBJ_ADD);

	return err;
}