Beispiel #1
0
/*
 * Can we actually use the console at this time on this cpu?
 *
 * Console drivers may assume that per-cpu resources have
 * been allocated. So unless they're explicitly marked as
 * being able to cope (CON_ANYTIME) don't call them until
 * this CPU is officially up.
 */
static inline int can_use_console(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
	if (!cpu_active(cpu) && cpu_hotplug_inprogress())
		return 0;
#endif
	return cpu_online(cpu) || have_callable_console();
}
Beispiel #2
0
static void idle_exit(int cpu)
{
	struct sleep_data *sleep_info = &core_sleep_info;
	struct hrtimer *timer = &per_cpu(core_sleep_timer, cpu);

	if (sleep_info && cpu_online(cpu) && cpu_active(cpu)) {
		if (atomic_read(&sleep_info->timer_val_ms) != INT_MAX &&
			atomic_read(&sleep_info->timer_val_ms) &&
			!atomic_read(&sleep_info->timer_expired))
			hrtimer_start(timer,
				ktime_set(0,
				atomic_read(&sleep_info->timer_val_ms) * NSEC_PER_MSEC),
				HRTIMER_MODE_REL_PINNED);
	}
}
Beispiel #3
0
static int twd_cpufreq_transition(struct notifier_block *nb,
	unsigned long state, void *data)
{
	struct cpufreq_freqs *freqs = data;

	/*
	 * The twd clock events must be reprogrammed to account for the new
	 * frequency.  The timer is local to a cpu, so cross-call to the
	 * changing cpu.
	 *
	 * Only wait for it to finish, if the cpu is active to avoid
	 * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during
	 * booting of that cpu.
	 */
	if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
		smp_call_function_single(freqs->cpu, twd_update_frequency,
					 NULL, cpu_active(freqs->cpu));

	return NOTIFY_OK;
}
/**
 * set_mtrr - update mtrrs on all processors
 * @reg:	mtrr in question
 * @base:	mtrr base
 * @size:	mtrr size
 * @type:	mtrr type
 *
 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 *
 * 1. Queue work to do the following on all processors:
 * 2. Disable Interrupts
 * 3. Wait for all procs to do so
 * 4. Enter no-fill cache mode
 * 5. Flush caches
 * 6. Clear PGE bit
 * 7. Flush all TLBs
 * 8. Disable all range registers
 * 9. Update the MTRRs
 * 10. Enable all range registers
 * 11. Flush all TLBs and caches again
 * 12. Enter normal cache mode and reenable caching
 * 13. Set PGE
 * 14. Wait for buddies to catch up
 * 15. Enable interrupts.
 *
 * What does that mean for us? Well, first we set data.count to the number
 * of CPUs. As each CPU announces that it started the rendezvous handler by
 * decrementing the count, We reset data.count and set the data.gate flag
 * allowing all the cpu's to proceed with the work. As each cpu disables
 * interrupts, it'll decrement data.count once. We wait until it hits 0 and
 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
 * are waiting for that flag to be cleared. Once it's cleared, each
 * CPU goes through the transition of updating MTRRs.
 * The CPU vendors may each do it differently,
 * so we call mtrr_if->set() callback and let them take care of it.
 * When they're done, they again decrement data->count and wait for data.gate
 * to be set.
 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
 * Everyone then enables interrupts and we all continue on.
 *
 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 * becomes nops.
 */
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
	struct set_mtrr_data data;
	unsigned long flags;
	int cpu;

#ifdef CONFIG_SMP
	/*
	 * If this cpu is not yet active, we are in the cpu online path. There
	 * can be no stop_machine() in parallel, as stop machine ensures this
	 * by using get_online_cpus(). We can skip taking the stop_cpus_mutex,
	 * as we don't need it and also we can't afford to block while waiting
	 * for the mutex.
	 *
	 * If this cpu is active, we need to prevent stop_machine() happening
	 * in parallel by taking the stop cpus mutex.
	 *
	 * Also, this is called in the context of cpu online path or in the
	 * context where cpu hotplug is prevented. So checking the active status
	 * of the raw_smp_processor_id() is safe.
	 */
	if (cpu_active(raw_smp_processor_id()))
		mutex_lock(&stop_cpus_mutex);
#endif

	preempt_disable();

	data.smp_reg = reg;
	data.smp_base = base;
	data.smp_size = size;
	data.smp_type = type;
	atomic_set(&data.count, num_booting_cpus() - 1);

	/* Make sure data.count is visible before unleashing other CPUs */
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Start the ball rolling on other CPUs */
	for_each_online_cpu(cpu) {
		struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);

		if (cpu == smp_processor_id())
			continue;

		stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
	}


	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	local_irq_save(flags);

	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Do our MTRR business */

	/*
	 * HACK!
	 * We use this same function to initialize the mtrrs on boot.
	 * The state of the boot cpu's mtrrs has been saved, and we want
	 * to replicate across all the APs.
	 * If we're doing that @reg is set to something special...
	 */
	if (reg != ~0U)
		mtrr_if->set(reg, base, size, type);
	else if (!mtrr_aps_delayed_init)
		mtrr_if->set_all();

	/* Wait for the others */
	while (atomic_read(&data.count))
		cpu_relax();

	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	/*
	 * Wait here for everyone to have seen the gate change
	 * So we're the last ones to touch 'data'
	 */
	while (atomic_read(&data.count))
		cpu_relax();

	local_irq_restore(flags);
	preempt_enable();
#ifdef CONFIG_SMP
	if (cpu_active(raw_smp_processor_id()))
		mutex_unlock(&stop_cpus_mutex);
#endif
}
static int start_dvfs(void)
{
	u32 reg, cpu_rate;
	unsigned long flags;

	if (dvfs_core_is_active)
		return 0;

	spin_lock_irqsave(&mxc_dvfs_core_lock, flags);

	clk_enable(dvfs_clk);

	/* get current working point */
	cpu_rate = clk_get_rate(cpu_clk);
	curr_op = cpu_op_nr - 1;
	do {
		if (cpu_rate <= cpu_op_tbl[curr_op].cpu_rate)
			break;
	} while (--curr_op >= 0);
	old_op = curr_op;

	dvfs_load_config(curr_op);

	if (curr_op == 0)
		maxf = 1;
	else
		maxf = 0;
	if (curr_op == (cpu_op_nr - 1))
		minf = 1;
	else
		minf = 0;

	/* config reg GPC_CNTR */
	reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);

	reg &= ~MXC_GPCCNTR_GPCIRQM;
	/* GPCIRQ=1, select ARM IRQ */
	reg |= MXC_GPCCNTR_GPCIRQ_ARM;
	/* ADU=1, select ARM domain */
	if (!cpu_is_mx6())
		reg |= MXC_GPCCNTR_ADU;
	__raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);

	/* Set PREDIV bits */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	reg = (reg & ~(dvfs_data->prediv_mask));
	reg |= (dvfs_data->prediv_val) << (dvfs_data->prediv_offset);
	__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);

	/* Enable DVFS interrupt */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	/* FSVAIM=0 */
	reg = (reg & ~MXC_DVFSCNTR_FSVAIM);

	/* Set MAXF, MINF */
	reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK
				| MXC_DVFSCNTR_MINF_MASK));
	reg |= 1 << MXC_DVFSCNTR_MAXF_OFFSET;

	/* Select ARM domain */
	reg |= MXC_DVFSCNTR_DVFIS;
	/* Enable DVFS frequency adjustment interrupt */
	reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
	/* Set load tracking buffer register source */
	reg = (reg & ~MXC_DVFSCNTR_LTBRSR_MASK);
	reg |= DVFS_LTBRSR;
	/* Set DIV3CK */
	reg = (reg & ~(dvfs_data->div3ck_mask));
	reg |= (dvfs_data->div3ck_val) << (dvfs_data->div3ck_offset);
	__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);

	/* Enable DVFS */
	if (cpu_is_mx6()) {
		unsigned long cpu_wfi = 0;
		int num_cpus = num_possible_cpus();
		reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_EMAC);
		/* Need to enable DVFS tracking for each core that is active */
		do {
			if (cpu_active(num_cpus))
				set_bit(num_cpus, &cpu_wfi);
		} while (num_cpus--);
		reg |= cpu_wfi << 9;
		__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_EMAC);
	} else {
		reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
		reg |= MXC_DVFSCNTR_DVFEN;
		__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
	}

	dvfs_core_is_active = 1;

	spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);

	printk(KERN_DEBUG "DVFS is started\n");
	return 0;
}
Beispiel #6
0
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
	int rc, c;

	/*
	 * Don't allow secondary threads to come online if inhibited
	 */
	if (threads_per_core > 1 && secondaries_inhibited() &&
	    cpu % threads_per_core != 0)
		return -EBUSY;

	if (smp_ops == NULL ||
	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
		return -EINVAL;

	cpu_idle_thread_init(cpu, tidle);

	/* Make sure callin-map entry is 0 (can be leftover a CPU
	 * hotplug
	 */
	cpu_callin_map[cpu] = 0;

	/* The information for processor bringup must
	 * be written out to main store before we release
	 * the processor.
	 */
	smp_mb();

	/* wake up cpus */
	DBG("smp: kicking cpu %d\n", cpu);
	rc = smp_ops->kick_cpu(cpu);
	if (rc) {
		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
		return rc;
	}

	/*
	 * wait to see if the cpu made a callin (is actually up).
	 * use this value that I found through experimentation.
	 * -- Cort
	 */
	if (system_state < SYSTEM_RUNNING)
		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
			udelay(100);
#ifdef CONFIG_HOTPLUG_CPU
	else
		/*
		 * CPUs can take much longer to come up in the
		 * hotplug case.  Wait five seconds.
		 */
		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
			msleep(1);
#endif

	if (!cpu_callin_map[cpu]) {
		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
		return -ENOENT;
	}

	DBG("Processor %u found.\n", cpu);

	if (smp_ops->give_timebase)
		smp_ops->give_timebase();

	/* Wait until cpu puts itself in the online & active maps */
	while (!cpu_online(cpu) || !cpu_active(cpu))
		cpu_relax();

	return 0;
}