/* Board file init function */
int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
{
	unsigned int cpu;
	int ret = 0;

	BUG_ON((nr_devs < num_possible_cpus()) || !data);

	for_each_possible_cpu(cpu) {
		struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
		ret = msm_spm_dev_init(dev, &data[cpu]);
		if (ret < 0) {
			pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
					cpu, ret);
			break;
		}
	}

	return ret;
}
Esempio n. 2
0
asmlinkage int bfin_clone(struct pt_regs *regs)
{
    unsigned long clone_flags;
    unsigned long newsp;

#ifdef __ARCH_SYNC_CORE_DCACHE
    if (current->rt.nr_cpus_allowed == num_possible_cpus())
        set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
#endif

    /* syscall2 puts clone_flags in r0 and usp in r1 */
    clone_flags = regs->r0;
    newsp = regs->r1;
    if (!newsp)
        newsp = rdusp();
    else
        newsp -= 12;
    return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
}
Esempio n. 3
0
static int get_target_raw_data_length(unsigned long *info) {
	/* recover the code after target info are implemented on all platform */
	unsigned long length;

#if defined(PX_SOC_MMP3) || defined(PX_SOC_ARMADA610) || defined(PX_SOC_PXA920) || defined(PX_SOC_BG2)
	length = get_arm_target_raw_data_length();
	/* For all the platform on which target info is not implemented, the target info only contain all cpu main id  */
//#elif defined(PX_SOC_ARMADA370)
//	length = num_possible_cpus();
#else
	length = num_possible_cpus();
#endif

	if (copy_to_user(info, &length, sizeof(unsigned long)) != 0) {
		return -EFAULT;
	}

	return 0;
}
Esempio n. 4
0
static int dlpar_online_cpu(struct device_node *dn)
{
	int rc = 0;
	unsigned int cpu;
	int len, nthreads, i;
	const __be32 *intserv;
	u32 thread;

	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return -EINVAL;

	nthreads = len / sizeof(u32);

	cpu_maps_update_begin();
	for (i = 0; i < nthreads; i++) {
		thread = be32_to_cpu(intserv[i]);
		for_each_present_cpu(cpu) {
			if (get_hard_smp_processor_id(cpu) != thread)
				continue;
			BUG_ON(get_cpu_current_state(cpu)
					!= CPU_STATE_OFFLINE);
			cpu_maps_update_done();
			timed_topology_update(1);
			find_and_online_cpu_nid(cpu);
			rc = device_online(get_cpu_device(cpu));
			if (rc)
				goto out;
			cpu_maps_update_begin();

			break;
		}
		if (cpu == num_possible_cpus())
			printk(KERN_WARNING "Could not find cpu to online "
			       "with physical id 0x%x\n", thread);
	}
	cpu_maps_update_done();

out:
	return rc;

}
int run_parallel(const char *desc, uint32_t loops, const cpumask_t *cpumask,
		 int step, void *data,
		 int (*func)(struct time_bench_record *record, void *data)
	)
{
	struct time_bench_sync sync;
	struct time_bench_cpu *cpu_tasks;
	size_t size;

	/* Allocate records for every CPU */
	size = sizeof(*cpu_tasks) * num_possible_cpus();
	cpu_tasks = kzalloc(size, GFP_KERNEL);

	time_bench_run_concurrent(loops, step, data,
				  cpumask, &sync, cpu_tasks, func);
	time_bench_print_stats_cpumask(desc, cpu_tasks, cpumask);

	kfree(cpu_tasks);
	return 1;
}
Esempio n. 6
0
static ssize_t
cpunum_ceiling_store(struct kobject *kobj, struct kobj_attribute *attr,
		const char *buf, size_t n)
{
	int val, bit, on;

	if (sscanf(buf, "%d", &val) > 0) {
		bit = val / 2;
		on = val % 2;
		if (bit >= num_possible_cpus() || bit < 0)
		    return -EINVAL;
		if (on)
		    cpunum_max |= (1 << bit);
		else
		    cpunum_max &= ~(1 << bit);
		sysfs_notify(kobj, NULL, "cpunum_ceiling");
		return n;
	}
	return -EINVAL;
}
Esempio n. 7
0
static void __init setup_per_cpu_areas(void)
{
	unsigned long size, i;
	char *ptr;
	unsigned long nr_possible_cpus = num_possible_cpus();

	/* Copy section for each CPU (we discard the original) */
	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
#ifdef CONFIG_MODULES
	if (size < PERCPU_ENOUGH_ROOM)
		size = PERCPU_ENOUGH_ROOM;
#endif
	ptr = alloc_bootmem(size * nr_possible_cpus);

	for_each_possible_cpu(i) {
		__per_cpu_offset[i] = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
		ptr += size;
	}
}
Esempio n. 8
0
static void __exit msm_idle_stats_exit(void)
{
	unsigned int nr_cpus = num_possible_cpus();
	int i;

	if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
		pr_info("%s: enter\n", __func__);

	cdev_del(&msm_idle_stats_cdev);

	for (i = nr_cpus - 1; i >= 0; i--)
		device_destroy(
			msm_idle_stats_class, msm_idle_stats_dev_nr + i);

	class_destroy(msm_idle_stats_class);
	unregister_chrdev_region(msm_idle_stats_dev_nr, nr_cpus);

	if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
		pr_info("%s: done\n", __func__);
}
static void intelli_plug_early_suspend(struct early_suspend *handler)
#endif
{
    int i = 0;
    int num_of_active_cores = 0;

    if (atomic_read(&intelli_plug_active) == 1) {
        flush_workqueue(intelliplug_wq);
        num_of_active_cores = num_possible_cpus();

        mutex_lock(&intelli_plug_mutex);
        hotplug_suspended = true;
        mutex_unlock(&intelli_plug_mutex);

        /* put rest of the cores to sleep! */
        for (i = num_of_active_cores - 1; i > 0; i--) {
            cpu_down(i);
        }
    }
}
Esempio n. 10
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int ncores = num_possible_cpus();
	unsigned int cpu = smp_processor_id();
	int i;

	smp_store_cpu_info(cpu);

	/* are we trying to boot more cores than exist? */
	if (max_cpus > ncores)
		max_cpus = ncores;

	/*
	 * Initialise the present map, which describes the set of CPUs
	 * actually populated at the present time.
	 */
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	/*
	 * Initialise the SCU if there are more than one CPU and let
	 * them know where to start.
	 */
	if (max_cpus > 1) {
		/*
		 * Enable the local timer or broadcast device for the
		 * boot CPU, but only if we have more than one CPU.
		 */
		percpu_timer_setup();

		scu_enable(scu_base_addr());

		/*
		 * Write the address of secondary startup into the
		 * system-wide flags register. The boot monitor waits
		 * until it receives a soft interrupt, and then the
		 * secondary CPU branches to this address.
		 */
	__raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
	}
}
Esempio n. 11
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	int cpu, err;
	unsigned int ncores = num_possible_cpus();

	/*
	 * are we trying to boot more cores than exist?
	 */
	if (max_cpus > ncores)
		max_cpus = ncores;

	/* Don't bother if we're effectively UP */
	if (max_cpus <= 1)
		return;

	/*
	 * Initialise the present map (which describes the set of CPUs
	 * actually populated at the present time) and release the
	 * secondaries from the bootloader.
	 *
	 * Make sure we online at most (max_cpus - 1) additional CPUs.
	 */
	max_cpus--;
	for_each_possible_cpu(cpu) {
		if (max_cpus == 0)
			break;

		if (cpu == smp_processor_id())
			continue;

		if (!smp_enable_ops[cpu])
			continue;

		err = smp_enable_ops[cpu]->prepare_cpu(cpu);
		if (err)
			continue;

		set_cpu_present(cpu, true);
		max_cpus--;
	}
}
static int s3c_rtc_resume(struct platform_device *pdev)
{
	s3c_rtc_enable(pdev, 1);

	if (device_may_wakeup(&pdev->dev) && wake_en) {
		disable_irq_wake(s3c_rtc_alarmno);
		wake_en = false;
	}
    {
        int i, count = num_possible_cpus();

        for(i = count - 1; i > 0; i--)
            if(!cpu_online(i))
                cpu_up(i);

        wake_lock(&down_cpu_wakelock);
        hrtimer_start(&down_cpu_timer, ktime_set(3, 0), HRTIMER_MODE_REL);
    }

	return 0;
}
Esempio n. 13
0
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
	int i, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;

	irqs = min(pmu_device->num_resources, num_possible_cpus());

	irq = platform_get_irq(pmu_device, 0);
	if (irq >= 0 && irq_is_percpu(irq)) {
		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
		free_percpu_irq(irq, &percpu_pmu);
	} else {
		for (i = 0; i < irqs; ++i) {
			if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
				continue;
			irq = platform_get_irq(pmu_device, i);
			if (irq >= 0)
				free_irq(irq, cpu_pmu);
		}
	}
}
static void mt_hotplug_mechanism_power_suspend(struct power_suspend *h)
{
    HOTPLUG_INFO("mt_hotplug_mechanism_power_suspend");

    if (g_enable)
    {
        int i = 0;
        
        hp_disable_cpu_hp(1);

        for (i = (num_possible_cpus() - 1); i > 0; i--)
        {
            if (cpu_online(i))
                cpu_down(i);
        }
    }

    g_cur_state = STATE_ENTER_POWER_SUSPEND;

    return;
}
Esempio n. 15
0
void mtfs_tracefile_fini_arch(void)
{
	int i;
	int j;

	for (i = 0; i < num_possible_cpus(); i++) {
		for (j = 0; j < MTFS_TCD_TYPE_MAX; j++) {
			if (mtfs_trace_console_buffers[i][j] != NULL) {
				kfree(mtfs_trace_console_buffers[i][j]);
				mtfs_trace_console_buffers[i][j] = NULL;
			}
		}
	}

	for (i = 0; mtfs_trace_data[i] != NULL; i++) {
		kfree(mtfs_trace_data[i]);
		mtfs_trace_data[i] = NULL;
	}

	//mtfs_fini_rwsem(&mtfs_tracefile_sem);
}
int __init exynos4_init(void)
{
	unsigned int value;
	unsigned int tmp;
	unsigned int i;

	printk(KERN_INFO "EXYNOS4: Initializing architecture\n");

	/* set idle function */
	pm_idle = exynos4_idle;

	/*
	 * on exynos4x12, CMU reset system power register should to be set 0x0
	 */
	if (!soc_is_exynos4210()) {
		for (i = 0; i < ARRAY_SIZE(exynos4_pmu_init_zero); i++)
			__raw_writel(0x0, exynos4_pmu_init_zero[i]);
	}

	/* set sw_reset function */
	s5p_reset_hook = exynos4_sw_reset;

	/* Disable auto wakeup from power off mode */
	for (i = 0; i < num_possible_cpus(); i++) {
		tmp = __raw_readl(S5P_ARM_CORE_OPTION(i));
		tmp &= ~S5P_CORE_OPTION_DIS;
		__raw_writel(tmp, S5P_ARM_CORE_OPTION(i));
	}

	if (soc_is_exynos4212() || soc_is_exynos4412()) {
		value = __raw_readl(S5P_AUTOMATIC_WDT_RESET_DISABLE);
		value &= ~S5P_SYS_WDTRESET;
		__raw_writel(value, S5P_AUTOMATIC_WDT_RESET_DISABLE);
		value = __raw_readl(S5P_MASK_WDT_RESET_REQUEST);
		value &= ~S5P_SYS_WDTRESET;
		__raw_writel(value, S5P_MASK_WDT_RESET_REQUEST);
	}

	return sysdev_register(&exynos4_sysdev);
}
Esempio n. 17
0
void flowctrl_cpu_suspend_enter(unsigned int cpuid)
{
	unsigned int reg;
	int i;

	reg = flowctrl_read_cpu_csr(cpuid);
	switch (tegra_get_chip_id()) {
	case TEGRA20:
		/* clear wfe bitmap */
		reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
		/* clear wfi bitmap */
		reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
		/* pwr gating on wfe */
		reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
		break;
	case TEGRA30:
	case TEGRA114:
	case TEGRA124:
		/* clear wfe bitmap */
		reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
		/* clear wfi bitmap */
		reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
		/* pwr gating on wfi */
		reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
		break;
	}
	reg |= FLOW_CTRL_CSR_INTR_FLAG;			/* clear intr flag */
	reg |= FLOW_CTRL_CSR_EVENT_FLAG;		/* clear event flag */
	reg |= FLOW_CTRL_CSR_ENABLE;			/* pwr gating */
	flowctrl_write_cpu_csr(cpuid, reg);

	for (i = 0; i < num_possible_cpus(); i++) {
		if (i == cpuid)
			continue;
		reg = flowctrl_read_cpu_csr(i);
		reg |= FLOW_CTRL_CSR_EVENT_FLAG;
		reg |= FLOW_CTRL_CSR_INTR_FLAG;
		flowctrl_write_cpu_csr(i, reg);
	}
}
void spm_mcdi_init_core_mux(void)
{
    int i = 0;
    // set SPM_MP_CORE0_AUX
    spm_write(SPM_PCM_EVENT_VECTOR2, spm_read(SPM_PCM_EVENT_VECTOR2)&0xfe00ffff);
    spm_write(SPM_PCM_EVENT_VECTOR3, spm_read(SPM_PCM_EVENT_VECTOR3)&0xfe00ffff); 
    
    for (i = (num_possible_cpus() - 1); i > 0; i--)
    {
        if (cpu_online(i)==0)
        {
            switch(i)
            {
                case 1://for 72, only core1 hotplug out
                    spm_write(SPM_PCM_EVENT_VECTOR3, spm_read(SPM_PCM_EVENT_VECTOR3)|SPM_PCM_HOTPLUG);
                default: break;                    
            //cpu_down(i);
            }
        }
    }
    
}
static bool processor_physically_present(acpi_handle handle)
{
	int cpuid, type;
	u32 acpi_id;
	acpi_status status;
	acpi_object_type acpi_type;
	unsigned long long tmp;
	union acpi_object object = { 0 };
	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };

	status = acpi_get_type(handle, &acpi_type);
	if (ACPI_FAILURE(status))
		return false;

	switch (acpi_type) {
	case ACPI_TYPE_PROCESSOR:
		status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
		if (ACPI_FAILURE(status))
			return false;
		acpi_id = object.processor.proc_id;
		break;
	case ACPI_TYPE_DEVICE:
		status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
		if (ACPI_FAILURE(status))
			return false;
		acpi_id = tmp;
		break;
	default:
		return false;
	}

	type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
	cpuid = acpi_get_cpuid(handle, type, acpi_id);

	if ((cpuid == -1) && (num_possible_cpus() > 1))
		return false;

	return true;
}
Esempio n. 20
0
static int __devinit smp_bgq_kick_cpu(int nr)
{
	struct device_node *np;
	int tid;
	const char *enable_method;

	if (nr < 0 || nr >= num_possible_cpus())
		return -ENOENT;

	np = of_get_cpu_node(nr, &tid);
	if (!np)
		return -ENODEV;

	enable_method = of_get_property(np, "enable-method", NULL);
	if (!enable_method) {
		pr_err("CPU%d has no enable-method\n", nr);
		return -ENOENT;
	}
	pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);

	if (strcmp(enable_method, "kexec") != 0) {
		pr_err("CPU%d: This kernel does not support the \"%s\"\n",
		       nr, enable_method);
		return -EINVAL;
	}

	/*
	 * The processor is currently spinning, waiting for the
	 * cpu_start field to become non-zero.	After we set
	 * cpu_start, the processor will continue on to
	 * secondary_start
	 */
	paca[nr].cpu_start = 1;

	/* barrier so other CPU can see it */
	smp_mb();

	return 0;
}
Esempio n. 21
0
void ilockdep_release(struct ilockdep_map *lock,
	unsigned long ip, void *lock_addr)
{
	unsigned int i;
	unsigned long flags;
	struct task_struct *curr = current;

	raw_local_irq_save(flags);
	for (i = 0; i < CONFIG_DEBUG_ILOCKDEP_NUM; i++) {
		if (curr->ilockdep_lock.held_locks[i].key == lock_addr) {
			curr->ilockdep_lock.held_locks[i].key = NULL;
			curr->ilockdep_lock.held_locks[i].cpu =
				num_possible_cpus() + 1;
			curr->ilockdep_lock.held_locks[i].name = NULL;
			curr->ilockdep_lock.held_locks[i].ip = 0;
			curr->ilockdep_lock.depth--;
			break;
		}
	}
	raw_local_irq_restore(flags);
	return;
}
Esempio n. 22
0
static void __init setup_per_cpu_areas(void)
{
#ifndef __LINSCHED__
	unsigned long size, i;
	char *ptr;
	unsigned long nr_possible_cpus = num_possible_cpus();

	/* Copy section for each CPU (we discard the original) */
	size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
	ptr = alloc_bootmem_pages(size * nr_possible_cpus);

	for_each_possible_cpu(i) {
		__per_cpu_offset[i] = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
		ptr += size;
	}
#else
	/* Manually declare all necessary per-cpu areas. As noted in
	 * percpu.h, this is ugly and does not scale at all, and probably
	 * needs rethinking eventually. Perhaps the Linux version of
	 * per-cpu areas can be used again, if someone who understands how
	 * it could be ported to user space.
	 */
	ALLOC_PER_CPU_MEM(runqueues);
	ALLOC_PER_CPU_MEM(phys_domains);
	ALLOC_PER_CPU_MEM(sched_group_phys);
	ALLOC_PER_CPU_MEM(current_task);
	/* more per_cpu variables would be added here... */
	int cpu_id;
	for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) {
		INIT_PER_CPU_MEM(runqueues, cpu_id);
		INIT_PER_CPU_MEM(phys_domains, cpu_id);
		INIT_PER_CPU_MEM(sched_group_phys, cpu_id);
		INIT_PER_CPU_MEM(current_task, cpu_id);
		/* more per_cpu variables would be added here... */
	}
#endif /* __LINSCHED__ */
}
Esempio n. 23
0
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
			      gfp_t gfp)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);

	if (sizeof(spinlock_t) != 0) {
		tbl->locks = NULL;
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
		    gfp == GFP_KERNEL)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
#endif
		if (gfp != GFP_KERNEL)
			gfp |= __GFP_NOWARN | __GFP_NORETRY;

		if (!tbl->locks)
			tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
						   gfp);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}
Esempio n. 24
0
/* Error injection interface */
static ssize_t mce_write(struct file *filp, const char __user *ubuf,
			 size_t usize, loff_t *off)
{
	struct delayed_mce *dm;
	struct mce m;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	/*
	 * There are some cases where real MSR reads could slip
	 * through.
	 */
	if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA))
		return -EIO;

	if ((unsigned long)usize > sizeof(struct mce))
		usize = sizeof(struct mce);
	if (copy_from_user(&m, ubuf, usize))
		return -EFAULT;

	if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
		return -EINVAL;

	dm = kmalloc(sizeof(struct delayed_mce), GFP_KERNEL);
	if (!dm)
		return -ENOMEM;

	/*
	 * Need to give user space some time to set everything up,
	 * so do it a jiffie or two later everywhere.
	 * Should we use a hrtimer here for better synchronization?
	 */
	memcpy(&dm->m, &m, sizeof(struct mce));
	setup_timer(&dm->timer, raise_mce, (unsigned long)dm);
	dm->timer.expires = jiffies + 2;
	add_timer_on(&dm->timer, m.extcpu);
	return usize;
}
Esempio n. 25
0
int __devinit msm_thermal_init(struct msm_thermal_data *pdata)
{
	int ret = 0;

	BUG_ON(!pdata);
	tsens_get_max_sensor_num(&max_tsens_num);
	memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));

	if (create_sensor_id_map())
		return -EINVAL;
	if (check_sensor_id(msm_thermal_info.sensor_id))
		return -EINVAL;

	enabled = 1;

	INIT_DELAYED_WORK(&check_temp_work, check_temp);
	schedule_delayed_work(&check_temp_work, 0);

	if (num_possible_cpus() > 1)
		register_cpu_notifier(&msm_thermal_cpu_notifier);

	return ret;
}
Esempio n. 26
0
int cfs_tracefile_init_arch(void)
{
	int    i;
	int    j;
	struct cfs_trace_cpu_data *tcd;

	/* initialize trace_data */
	memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
	for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
		cfs_trace_data[i] =
			kmalloc(sizeof(union cfs_trace_data_union) *
				num_possible_cpus(), GFP_KERNEL);
		if (!cfs_trace_data[i])
			goto out;
	}

	/* arch related info initialized */
	cfs_tcd_for_each(tcd, i, j) {
		spin_lock_init(&tcd->tcd_lock);
		tcd->tcd_pages_factor = pages_factor[i];
		tcd->tcd_type = i;
		tcd->tcd_cpu = j;
	}
Esempio n. 27
0
static int stat_open(struct inode *inode, struct file *file)
{
	unsigned size = 4096 * (1 + num_possible_cpus() / 32);
	char *buf;
	struct seq_file *m;
	int res;

	/* don't ask for more than the kmalloc() max size, currently 128 KB */
	if (size > 128 * 1024)
		size = 128 * 1024;
	buf = kmalloc(size, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	res = single_open(file, show_stat, NULL);
	if (!res) {
		m = file->private_data;
		m->buf = buf;
		m->size = size;
	} else
		kfree(buf);
	return res;
}
Esempio n. 28
0
static int edp_debugfs_show(struct seq_file *s, void *data)
{
	unsigned int max_nr_cpus = num_possible_cpus();
	int th_idx;

	if (max_nr_cpus != 2 && max_nr_cpus != 4) {
		seq_printf(s, "Unsupported number of CPUs\n");
		return 0;
	}

	tegra_get_edp_limit(&th_idx);
	seq_printf(s, "-- VDD_CPU %sEDP table (%umA = %umA - %umA) --\n",
		   edp_limits == edp_default_limits ? "**default** " : "",
		   regulator_cur - edp_reg_override_mA,
		   regulator_cur, edp_reg_override_mA);
	if (max_nr_cpus == 2)
		edp_show_2core_edp_table(s, th_idx);
	else if (max_nr_cpus == 4)
		edp_show_4core_edp_table(s, th_idx);

	seq_printf(s, "-- VDD_CPU %sPower EDP table --\n",
		   power_edp_limits == power_edp_default_limits ?
		   "**default** " : "");
	if (max_nr_cpus == 2)
		edp_show_2core_power_table(s);
	else if (max_nr_cpus == 4)
		edp_show_4core_power_table(s);

	if (system_edp_limits) {
		seq_printf(s, "\n-- System EDP table --\n");
		if (max_nr_cpus == 2)
			edp_show_2core_system_table(s);
		else if (max_nr_cpus == 4)
			edp_show_4core_system_table(s);
	}
	return 0;
}
static void __cpuinit intelli_plug_late_resume(struct early_suspend *handler)
{
	int num_of_active_cores;
	int i;

	mutex_lock(&intelli_plug_mutex);
	/* keep cores awake long enough for faster wake up */
	persist_count = BUSY_PERSISTENCE;
	suspended = false;
	mutex_unlock(&intelli_plug_mutex);

	/* wake up everyone */
	if (eco_mode_active)
		num_of_active_cores = 2;
	else
		num_of_active_cores = num_possible_cpus();

	for (i = 1; i < num_of_active_cores; i++) {
		cpu_up(i);
	}

	queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
		msecs_to_jiffies(10));
}
Esempio n. 30
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	int err;
	unsigned int cpu, ncores = num_possible_cpus();

	init_cpu_topology();

	smp_store_cpu_info(smp_processor_id());


	if (max_cpus > ncores)
		max_cpus = ncores;

	
	if (max_cpus <= 1)
		return;

	max_cpus--;
	for_each_possible_cpu(cpu) {
		if (max_cpus == 0)
			break;

		if (cpu == smp_processor_id())
			continue;

		if (!cpu_ops[cpu])
			continue;

		err = cpu_ops[cpu]->cpu_prepare(cpu);
		if (err)
			continue;

		set_cpu_present(cpu, true);
		max_cpus--;
	}
}