void dual_boost(unsigned int boost_on)
{
	if (boost_on)
	{	
		if (is_dual_locked != 0)
			return;

#ifndef DUALBOOST_DEFERED_QUEUE
		cpu_hotplug_driver_lock();
		if (cpu_is_offline(NON_BOOT_CPU))
		{
			ssize_t ret;
			struct sys_device *cpu_sys_dev;
		
			ret = cpu_up(NON_BOOT_CPU); // it takes 60ms
			if (!ret)
			{
				cpu_sys_dev = get_cpu_sysdev(NON_BOOT_CPU);
				if (cpu_sys_dev)
				{
					kobject_uevent(&cpu_sys_dev->kobj, KOBJ_ONLINE);
					stall_mpdecision = 1;
				}
			}
		}
		cpu_hotplug_driver_unlock();
#else	
		if (cpu_is_offline(NON_BOOT_CPU))
			schedule_work_on(BOOT_CPU, &dvfs_hotplug_work);
#endif
		is_dual_locked = 1;
	}
	else
	{
		if (stall_mpdecision == 1)
		{
			struct sys_device *cpu_sys_dev;

#ifdef DUALBOOST_DEFERED_QUEUE
			flush_work(&dvfs_hotplug_work);
#endif
			cpu_hotplug_driver_lock();	
			cpu_sys_dev = get_cpu_sysdev(NON_BOOT_CPU);
			if (cpu_sys_dev)
			{
				kobject_uevent(&cpu_sys_dev->kobj, KOBJ_ONLINE);
				stall_mpdecision = 0;
			}
			cpu_hotplug_driver_unlock();
		}
		
		is_dual_locked = 0;
	}
}
Example #2
0
static ssize_t __ref store_online(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t ret;

	cpu_hotplug_driver_lock();
	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpu->dev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		break;
	case '1':
		ret = cpu_up(cpu->dev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
		break;
	default:
		ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	return ret;
}
Example #3
0
static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
				 const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
	ssize_t ret = 0;
#if defined (CONFIG_MACH_LGE_C1_BOARD_SPR)	
	if(!g_slate_status)
#endif	
	{
	cpu_hotplug_driver_lock();
	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpu->sysdev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		break;
	case '1':
		ret = cpu_up(cpu->sysdev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
		break;
	default:
		ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	}
	return ret;
}
static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
				 const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
	ssize_t ret;

#ifdef CONFIG_HUAWEI_KERNEL
	/* if the syscore shutdown, it's mean to in restarting. cancel the sys file operator */
	if (sys_shutdown)
	{
		return count;
	}
#endif
	cpu_hotplug_driver_lock();
	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpu->sysdev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		break;
	case '1':
		ret = cpu_up(cpu->sysdev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
		break;
	default:
		ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	return ret;
}
Example #5
0
static void dvfs_hotplug_callback(struct work_struct *unused)
{
	cpu_hotplug_driver_lock();
	if (cpu_is_offline(NON_BOOT_CPU))
	{	
		cpu_up(NON_BOOT_CPU); // it takes 60ms
	}
	cpu_hotplug_driver_unlock();
}
Example #6
0
File: cpu.c Project: Andiry/pmfs
static int cpu_subsys_offline(struct device *dev)
{
	int ret;

	cpu_hotplug_driver_lock();
	ret = cpu_down(dev->id);
	cpu_hotplug_driver_unlock();
	return ret;
}
Example #7
0
static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
{
	struct device_node *dn;
	unsigned long drc_index;
	char *cpu_name;
	int rc;

	cpu_hotplug_driver_lock();
	rc = strict_strtoul(buf, 0, &drc_index);
	if (rc) {
		rc = -EINVAL;
		goto out;
	}

	dn = dlpar_configure_connector(drc_index);
	if (!dn) {
		rc = -EINVAL;
		goto out;
	}

	/* configure-connector reports cpus as living in the base
	 * directory of the device tree.  CPUs actually live in the
	 * cpus directory so we need to fixup the full_name.
	 */
	cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
			   GFP_KERNEL);
	if (!cpu_name) {
		dlpar_free_cc_nodes(dn);
		rc = -ENOMEM;
		goto out;
	}

	sprintf(cpu_name, "/cpus%s", dn->full_name);
	kfree(dn->full_name);
	dn->full_name = cpu_name;

	rc = dlpar_acquire_drc(drc_index);
	if (rc) {
		dlpar_free_cc_nodes(dn);
		rc = -EINVAL;
		goto out;
	}

	rc = dlpar_attach_node(dn);
	if (rc) {
		dlpar_release_drc(drc_index);
		dlpar_free_cc_nodes(dn);
	}

	rc = dlpar_online_cpu(dn);
out:
	cpu_hotplug_driver_unlock();

	return rc ? rc : count;
}
static ssize_t __ref store_online(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	struct device *dev3;
	ssize_t ret;

	// AP: this sysfs only works when control mode is in sysfs-mode
	if (online_control_mode[cpu->dev.id] != ONL_CONT_MODE_SYSFS)
		return count;

	cpu_hotplug_driver_lock();
	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpu->dev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		// handling if core lock4_3 is active for fourth core
		if ((cpu->dev.id == ID_CPU_CORE_3) && 
			(online_control_mode[ID_CPU_CORE_4] == ONL_CONT_MODE_LOCK4_3))
		{
			dev3 = get_cpu_device(ID_CPU_CORE_4);
			ret = cpu_down(ID_CPU_CORE_4);
			if (!ret)
				kobject_uevent(&dev3->kobj, KOBJ_OFFLINE);
		}
		break;
	case '1':
		ret = cpu_up(cpu->dev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_ONLINE);

		// handling if core lock4_3 is active for fourth core
		if ((cpu->dev.id == ID_CPU_CORE_3) && 
			(online_control_mode[3] == ONL_CONT_MODE_LOCK4_3))
		{
			dev3 = get_cpu_device(ID_CPU_CORE_4);
			ret = cpu_up(ID_CPU_CORE_4);
			if (!ret)
				kobject_uevent(&dev3->kobj, KOBJ_ONLINE);
		}
		break;
	default:
		ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	return ret;
}
static ssize_t __ref store_online_control(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t ret;

	cpu_hotplug_driver_lock();
	switch (buf[0]) 
	{
		case '0': // control via sysfs
			ret = cpu_down(cpu->dev.id);
			if (!ret)
				kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
			online_control_mode[cpu->dev.id] = ONL_CONT_MODE_SYSFS;
			break;
			
		case '1': // forced online
			ret = cpu_up(cpu->dev.id);
			if (!ret)
				kobject_uevent(&dev->kobj, KOBJ_ONLINE);
			online_control_mode[cpu->dev.id] = ONL_CONT_MODE_ONLINE;
			break;
			
		case '2': // forced offline
			ret = cpu_down(cpu->dev.id);
			if (!ret)
				kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
			online_control_mode[cpu->dev.id] = ONL_CONT_MODE_OFFLINE;
			break;
			
		case '3': // only allowed for CPU core 4 - force offline but lock it to core 3
			if (cpu->dev.id == ID_CPU_CORE_4)
			{
				ret = cpu_down(cpu->dev.id);
				if (!ret)
					kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
				online_control_mode[cpu->dev.id] = ONL_CONT_MODE_LOCK4_3;
			}
			else
				ret = -EINVAL;
			break;
			
		default:
			ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	return ret;
}
Example #10
0
static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
{
	struct device_node *dn, *parent;
	unsigned long drc_index;
	int rc;

	cpu_hotplug_driver_lock();
	rc = strict_strtoul(buf, 0, &drc_index);
	if (rc) {
		rc = -EINVAL;
		goto out;
	}

	parent = of_find_node_by_path("/cpus");
	if (!parent) {
		rc = -ENODEV;
		goto out;
	}

	dn = dlpar_configure_connector(drc_index, parent);
	if (!dn) {
		rc = -EINVAL;
		goto out;
	}

	of_node_put(parent);

	rc = dlpar_acquire_drc(drc_index);
	if (rc) {
		dlpar_free_cc_nodes(dn);
		rc = -EINVAL;
		goto out;
	}

	rc = dlpar_attach_node(dn);
	if (rc) {
		dlpar_release_drc(drc_index);
		dlpar_free_cc_nodes(dn);
		goto out;
	}

	rc = dlpar_online_cpu(dn);
out:
	cpu_hotplug_driver_unlock();

	return rc ? rc : count;
}
Example #11
0
static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
				 const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
	ssize_t ret;

	cpu_hotplug_driver_lock();
	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpu->sysdev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		break;
	case '1':
		ret = cpu_up(cpu->sysdev.id);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
		break;
	default:
		ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

#if defined (CONFIG_KOR_MODEL_SHV_E120S) || defined (CONFIG_KOR_MODEL_SHV_E120K) || defined (CONFIG_KOR_MODEL_SHV_E120L) \
|| defined (CONFIG_KOR_MODEL_SHV_E160S) || defined (CONFIG_KOR_MODEL_SHV_E160K) || defined(CONFIG_KOR_MODEL_SHV_E160L)  \
|| defined (CONFIG_USA_MODEL_SGH_I757) || defined (CONFIG_USA_MODEL_SGH_I577) || defined (CONFIG_USA_MODEL_SGH_T769) || defined(CONFIG_CAN_MODEL_SGH_I577R) || defined (CONFIG_CAN_MODEL_SGH_I757M)
		if (!ret && cpu->sysdev.id == NON_BOOT_CPU)
		{
			if (buf[0] == '0') // cpu1 offline
			{
				cpufreq_set_limit(UNI_PRO_START);
			}
			else if (buf[0] == '1') // cpu1 online
			{
				cpufreq_set_limit(UNI_PRO_STOP);
			}
		}
#endif

	if (ret >= 0)
		ret = count;
	return ret;
}
Example #12
0
static ssize_t dlpar_cpu_release(const char *buf, size_t count)
{
	struct device_node *dn;
	const u32 *drc_index;
	int rc;

	dn = of_find_node_by_path(buf);
	if (!dn)
		return -EINVAL;

	drc_index = of_get_property(dn, "ibm,my-drc-index", NULL);
	if (!drc_index) {
		of_node_put(dn);
		return -EINVAL;
	}

	cpu_hotplug_driver_lock();
	rc = dlpar_offline_cpu(dn);
	if (rc) {
		of_node_put(dn);
		rc = -EINVAL;
		goto out;
	}

	rc = dlpar_release_drc(*drc_index);
	if (rc) {
		of_node_put(dn);
		goto out;
	}

	rc = dlpar_detach_node(dn);
	if (rc) {
		dlpar_acquire_drc(*drc_index);
		goto out;
	}

	of_node_put(dn);
out:
	cpu_hotplug_driver_unlock();
	return rc ? rc : count;
}
static void dvfs_hotplug_callback(struct work_struct *unused)
{
	cpu_hotplug_driver_lock();
	if (cpu_is_offline(NON_BOOT_CPU))
	{
		ssize_t ret;
		struct sys_device *cpu_sys_dev;
	
		ret = cpu_up(NON_BOOT_CPU); // it takes 60ms
		if (!ret)
		{
			cpu_sys_dev = get_cpu_sysdev(NON_BOOT_CPU);
			if (cpu_sys_dev)
			{
				kobject_uevent(&cpu_sys_dev->kobj, KOBJ_ONLINE);
				stall_mpdecision = 1;
			}
		}
	}
	cpu_hotplug_driver_unlock();
}
Example #14
0
static ssize_t __ref store_online(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;
	int from_nid, to_nid;
	ssize_t ret;

	cpu_hotplug_driver_lock();
	switch (buf[0]) {
	case '0':
		ret = cpu_down(cpuid);
		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
		break;
	case '1':
		from_nid = cpu_to_node(cpuid);
		ret = cpu_up(cpuid);

		/*
		 * When hot adding memory to memoryless node and enabling a cpu
		 * on the node, node number of the cpu may internally change.
		 */
		to_nid = cpu_to_node(cpuid);
		if (from_nid != to_nid)
			change_cpu_under_node(cpu, from_nid, to_nid);

		if (!ret)
			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
		break;
	default:
		ret = -EINVAL;
	}
	cpu_hotplug_driver_unlock();

	if (ret >= 0)
		ret = count;
	return ret;
}
Example #15
0
static ssize_t store_run_queue_avg(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf, size_t count)
{
	if (buf[0] == '1')
	{	
		if (is_dual_locked != 0)
			return count;

		cpufreq_set_limit(DVFS_START);
#if 1
		cpu_hotplug_driver_lock();
		if (cpu_is_offline(NON_BOOT_CPU))
		{	
			cpu_up(NON_BOOT_CPU); // it takes 60ms
		}
		cpu_hotplug_driver_unlock();
#else	
		if (cpu_is_offline(NON_BOOT_CPU))
			schedule_work_on(0, &dvfs_hotplug_work);
#endif
		stall_mpdecision = 1;	
		is_dual_locked = 1;
	}
	else
	{
		if (is_dual_locked == 0)
		{
			stall_mpdecision = 0;
			return count;
		}

		cpufreq_set_limit(DVFS_STOP);

		stall_mpdecision = 0;
		is_dual_locked = 0;
	}

	return count;
}
Example #16
0
File: cpu.c Project: Andiry/pmfs
static int __ref cpu_subsys_online(struct device *dev)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = dev->id;
	int from_nid, to_nid;
	int ret;

	cpu_hotplug_driver_lock();

	from_nid = cpu_to_node(cpuid);
	ret = cpu_up(cpuid);
	/*
	 * When hot adding memory to memoryless node and enabling a cpu
	 * on the node, node number of the cpu may internally change.
	 */
	to_nid = cpu_to_node(cpuid);
	if (from_nid != to_nid)
		change_cpu_under_node(cpu, from_nid, to_nid);

	cpu_hotplug_driver_unlock();
	return ret;
}