示例#1
0
/**
 * set_mtrr - update mtrrs on all processors
 * @reg:	mtrr in question
 * @base:	mtrr base
 * @size:	mtrr size
 * @type:	mtrr type
 *
 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 * 
 * 1. Send IPI to do the following:
 * 2. Disable Interrupts
 * 3. Wait for all procs to do so 
 * 4. Enter no-fill cache mode
 * 5. Flush caches
 * 6. Clear PGE bit
 * 7. Flush all TLBs
 * 8. Disable all range registers
 * 9. Update the MTRRs
 * 10. Enable all range registers
 * 11. Flush all TLBs and caches again
 * 12. Enter normal cache mode and reenable caching
 * 13. Set PGE 
 * 14. Wait for buddies to catch up
 * 15. Enable interrupts.
 * 
 * What does that mean for us? Well, first we set data.count to the number
 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
 * Meanwhile, they are waiting for that flag to be set. Once it's set, each 
 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 
 * differently, so we call mtrr_if->set() callback and let them take care of it.
 * When they're done, they again decrement data->count and wait for data.gate to 
 * be reset. 
 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
 * Everyone then enables interrupts and we all continue on.
 *
 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 * becomes nops.
 */
static void set_mtrr(unsigned int reg, unsigned long base,
		     unsigned long size, mtrr_type type)
{
	struct set_mtrr_data data;
	unsigned long flags;

	data.smp_reg = reg;
	data.smp_base = base;
	data.smp_size = size;
	data.smp_type = type;
	atomic_set(&data.count, num_booting_cpus() - 1);
	/* make sure data.count is visible before unleashing other CPUs */
	smp_wmb();
	atomic_set(&data.gate,0);

	/*  Start the ball rolling on other CPUs  */
	if (smp_call_function(ipi_handler, &data, 0) != 0)
		panic("mtrr: timed out waiting for other CPUs\n");

	local_irq_save(flags);

	while(atomic_read(&data.count))
		cpu_relax();

	/* ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate,1);

	/* do our MTRR business */

	/* HACK!
	 * We use this same function to initialize the mtrrs on boot.
	 * The state of the boot cpu's mtrrs has been saved, and we want
	 * to replicate across all the APs. 
	 * If we're doing that @reg is set to something special...
	 */
	if (reg != ~0U) 
		mtrr_if->set(reg,base,size,type);

	/* wait for the others */
	while(atomic_read(&data.count))
		cpu_relax();

	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate,0);

	/*
	 * Wait here for everyone to have seen the gate change
	 * So we're the last ones to touch 'data'
	 */
	while(atomic_read(&data.count))
		cpu_relax();

	local_irq_restore(flags);
}
示例#2
0
static void __init synchronize_tsc_ap (void)
{
	int i;

	/*
	 * Not every cpu is online at the time
	 * this gets called, so we first wait for the BP to
	 * finish SMP initialization:
	 */
	while (!atomic_read(&tsc_start_flag)) mb();

	for (i = 0; i < NR_LOOPS; i++) {
		atomic_inc(&tsc_count_start);
		while (atomic_read(&tsc_count_start) != num_booting_cpus())
			mb();

		rdtscll(tsc_values[smp_processor_id()]);
		if (i == NR_LOOPS-1)
			write_tsc(0, 0);

		atomic_inc(&tsc_count_stop);
		while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
	}
}
/**
 * set_mtrr - update mtrrs on all processors
 * @reg:	mtrr in question
 * @base:	mtrr base
 * @size:	mtrr size
 * @type:	mtrr type
 *
 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 *
 * 1. Queue work to do the following on all processors:
 * 2. Disable Interrupts
 * 3. Wait for all procs to do so
 * 4. Enter no-fill cache mode
 * 5. Flush caches
 * 6. Clear PGE bit
 * 7. Flush all TLBs
 * 8. Disable all range registers
 * 9. Update the MTRRs
 * 10. Enable all range registers
 * 11. Flush all TLBs and caches again
 * 12. Enter normal cache mode and reenable caching
 * 13. Set PGE
 * 14. Wait for buddies to catch up
 * 15. Enable interrupts.
 *
 * What does that mean for us? Well, first we set data.count to the number
 * of CPUs. As each CPU announces that it started the rendezvous handler by
 * decrementing the count, We reset data.count and set the data.gate flag
 * allowing all the cpu's to proceed with the work. As each cpu disables
 * interrupts, it'll decrement data.count once. We wait until it hits 0 and
 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
 * are waiting for that flag to be cleared. Once it's cleared, each
 * CPU goes through the transition of updating MTRRs.
 * The CPU vendors may each do it differently,
 * so we call mtrr_if->set() callback and let them take care of it.
 * When they're done, they again decrement data->count and wait for data.gate
 * to be set.
 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
 * Everyone then enables interrupts and we all continue on.
 *
 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 * becomes nops.
 */
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
	struct set_mtrr_data data;
	unsigned long flags;
	int cpu;

#ifdef CONFIG_SMP
	/*
	 * If this cpu is not yet active, we are in the cpu online path. There
	 * can be no stop_machine() in parallel, as stop machine ensures this
	 * by using get_online_cpus(). We can skip taking the stop_cpus_mutex,
	 * as we don't need it and also we can't afford to block while waiting
	 * for the mutex.
	 *
	 * If this cpu is active, we need to prevent stop_machine() happening
	 * in parallel by taking the stop cpus mutex.
	 *
	 * Also, this is called in the context of cpu online path or in the
	 * context where cpu hotplug is prevented. So checking the active status
	 * of the raw_smp_processor_id() is safe.
	 */
	if (cpu_active(raw_smp_processor_id()))
		mutex_lock(&stop_cpus_mutex);
#endif

	preempt_disable();

	data.smp_reg = reg;
	data.smp_base = base;
	data.smp_size = size;
	data.smp_type = type;
	atomic_set(&data.count, num_booting_cpus() - 1);

	/* Make sure data.count is visible before unleashing other CPUs */
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Start the ball rolling on other CPUs */
	for_each_online_cpu(cpu) {
		struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);

		if (cpu == smp_processor_id())
			continue;

		stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
	}


	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	local_irq_save(flags);

	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Do our MTRR business */

	/*
	 * HACK!
	 * We use this same function to initialize the mtrrs on boot.
	 * The state of the boot cpu's mtrrs has been saved, and we want
	 * to replicate across all the APs.
	 * If we're doing that @reg is set to something special...
	 */
	if (reg != ~0U)
		mtrr_if->set(reg, base, size, type);
	else if (!mtrr_aps_delayed_init)
		mtrr_if->set_all();

	/* Wait for the others */
	while (atomic_read(&data.count))
		cpu_relax();

	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	/*
	 * Wait here for everyone to have seen the gate change
	 * So we're the last ones to touch 'data'
	 */
	while (atomic_read(&data.count))
		cpu_relax();

	local_irq_restore(flags);
	preempt_enable();
#ifdef CONFIG_SMP
	if (cpu_active(raw_smp_processor_id()))
		mutex_unlock(&stop_cpus_mutex);
#endif
}
示例#4
0
文件: main.c 项目: ANFS/ANFS-kernel
/**
 * set_mtrr - update mtrrs on all processors
 * @reg:	mtrr in question
 * @base:	mtrr base
 * @size:	mtrr size
 * @type:	mtrr type
 *
 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 *
 * 1. Queue work to do the following on all processors:
 * 2. Disable Interrupts
 * 3. Wait for all procs to do so
 * 4. Enter no-fill cache mode
 * 5. Flush caches
 * 6. Clear PGE bit
 * 7. Flush all TLBs
 * 8. Disable all range registers
 * 9. Update the MTRRs
 * 10. Enable all range registers
 * 11. Flush all TLBs and caches again
 * 12. Enter normal cache mode and reenable caching
 * 13. Set PGE
 * 14. Wait for buddies to catch up
 * 15. Enable interrupts.
 *
 * What does that mean for us? Well, first we set data.count to the number
 * of CPUs. As each CPU announces that it started the rendezvous handler by
 * decrementing the count, We reset data.count and set the data.gate flag
 * allowing all the cpu's to proceed with the work. As each cpu disables
 * interrupts, it'll decrement data.count once. We wait until it hits 0 and
 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
 * are waiting for that flag to be cleared. Once it's cleared, each
 * CPU goes through the transition of updating MTRRs.
 * The CPU vendors may each do it differently,
 * so we call mtrr_if->set() callback and let them take care of it.
 * When they're done, they again decrement data->count and wait for data.gate
 * to be set.
 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
 * Everyone then enables interrupts and we all continue on.
 *
 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 * becomes nops.
 */
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
	struct set_mtrr_data data;
	unsigned long flags;
	int cpu;

	preempt_disable();

	data.smp_reg = reg;
	data.smp_base = base;
	data.smp_size = size;
	data.smp_type = type;
	atomic_set(&data.count, num_booting_cpus() - 1);

	/* Make sure data.count is visible before unleashing other CPUs */
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Start the ball rolling on other CPUs */
	for_each_online_cpu(cpu) {
		struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);

		if (cpu == smp_processor_id())
			continue;

		stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
	}


	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	local_irq_save(flags);

	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Do our MTRR business */

	/*
	 * HACK!
	 *
	 * We use this same function to initialize the mtrrs during boot,
	 * resume, runtime cpu online and on an explicit request to set a
	 * specific MTRR.
	 *
	 * During boot or suspend, the state of the boot cpu's mtrrs has been
	 * saved, and we want to replicate that across all the cpus that come
	 * online (either at the end of boot or resume or during a runtime cpu
	 * online). If we're doing that, @reg is set to something special and on
	 * this cpu we still do mtrr_if->set_all(). During boot/resume, this
	 * is unnecessary if at this point we are still on the cpu that started
	 * the boot/resume sequence. But there is no guarantee that we are still
	 * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be
	 * sure that we are in sync with everyone else.
	 */
	if (reg != ~0U)
		mtrr_if->set(reg, base, size, type);
	else
		mtrr_if->set_all();

	/* Wait for the others */
	while (atomic_read(&data.count))
		cpu_relax();

	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	/*
	 * Wait here for everyone to have seen the gate change
	 * So we're the last ones to touch 'data'
	 */
	while (atomic_read(&data.count))
		cpu_relax();

	local_irq_restore(flags);
	preempt_enable();
}
示例#5
0
/**
 * set_mtrr - update mtrrs on all processors
 * @reg:	mtrr in question
 * @base:	mtrr base
 * @size:	mtrr size
 * @type:	mtrr type
 *
 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
 *
 * 1. Queue work to do the following on all processors:
 * 2. Disable Interrupts
 * 3. Wait for all procs to do so
 * 4. Enter no-fill cache mode
 * 5. Flush caches
 * 6. Clear PGE bit
 * 7. Flush all TLBs
 * 8. Disable all range registers
 * 9. Update the MTRRs
 * 10. Enable all range registers
 * 11. Flush all TLBs and caches again
 * 12. Enter normal cache mode and reenable caching
 * 13. Set PGE
 * 14. Wait for buddies to catch up
 * 15. Enable interrupts.
 *
 * What does that mean for us? Well, first we set data.count to the number
 * of CPUs. As each CPU announces that it started the rendezvous handler by
 * decrementing the count, We reset data.count and set the data.gate flag
 * allowing all the cpu's to proceed with the work. As each cpu disables
 * interrupts, it'll decrement data.count once. We wait until it hits 0 and
 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
 * are waiting for that flag to be cleared. Once it's cleared, each
 * CPU goes through the transition of updating MTRRs.
 * The CPU vendors may each do it differently,
 * so we call mtrr_if->set() callback and let them take care of it.
 * When they're done, they again decrement data->count and wait for data.gate
 * to be set.
 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
 * Everyone then enables interrupts and we all continue on.
 *
 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
 * becomes nops.
 */
static void
set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
	struct set_mtrr_data data;
	unsigned long flags;
	int cpu;

	preempt_disable();

	data.smp_reg = reg;
	data.smp_base = base;
	data.smp_size = size;
	data.smp_type = type;
	atomic_set(&data.count, num_booting_cpus() - 1);

	/* Make sure data.count is visible before unleashing other CPUs */
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Start the ball rolling on other CPUs */
	for_each_online_cpu(cpu) {
		struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);

		if (cpu == smp_processor_id())
			continue;

		stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
	}


	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	local_irq_save(flags);

	while (atomic_read(&data.count))
		cpu_relax();

	/* Ok, reset count and toggle gate */
	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 0);

	/* Do our MTRR business */

	/*
	 * HACK!
	 * We use this same function to initialize the mtrrs on boot.
	 * The state of the boot cpu's mtrrs has been saved, and we want
	 * to replicate across all the APs.
	 * If we're doing that @reg is set to something special...
	 */
	if (reg != ~0U)
		mtrr_if->set(reg, base, size, type);
	else if (!mtrr_aps_delayed_init)
		mtrr_if->set_all();

	/* Wait for the others */
	while (atomic_read(&data.count))
		cpu_relax();

	atomic_set(&data.count, num_booting_cpus() - 1);
	smp_wmb();
	atomic_set(&data.gate, 1);

	/*
	 * Wait here for everyone to have seen the gate change
	 * So we're the last ones to touch 'data'
	 */
	while (atomic_read(&data.count))
		cpu_relax();

	local_irq_restore(flags);
	preempt_enable();
}
示例#6
0
static void __init synchronize_tsc_bp (void)
{
	int i;
	unsigned long long t0;
	unsigned long long sum, avg;
	long long delta;
	unsigned long one_usec;
	int buggy = 0;

	printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());

	/* convert from kcyc/sec to cyc/usec */
	one_usec = cpu_khz / 1000;

	atomic_set(&tsc_start_flag, 1);
	wmb();

	/*
	 * We loop a few times to get a primed instruction cache,
	 * then the last pass is more or less synchronized and
	 * the BP and APs set their cycle counters to zero all at
	 * once. This reduces the chance of having random offsets
	 * between the processors, and guarantees that the maximum
	 * delay between the cycle counters is never bigger than
	 * the latency of information-passing (cachelines) between
	 * two CPUs.
	 */
	for (i = 0; i < NR_LOOPS; i++) {
		/*
		 * all APs synchronize but they loop on '== num_cpus'
		 */
		while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
			mb();
		atomic_set(&tsc_count_stop, 0);
		wmb();
		/*
		 * this lets the APs save their current TSC:
		 */
		atomic_inc(&tsc_count_start);

		rdtscll(tsc_values[smp_processor_id()]);
		/*
		 * We clear the TSC in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_tsc(0, 0);

		/*
		 * Wait for all APs to leave the synchronization point:
		 */
		while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
			mb();
		atomic_set(&tsc_count_start, 0);
		wmb();
		atomic_inc(&tsc_count_stop);
	}

	sum = 0;
	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_isset(i, cpu_callout_map)) {
			t0 = tsc_values[i];
			sum += t0;
		}
	}
	avg = sum;
	do_div(avg, num_booting_cpus());

	sum = 0;
	for (i = 0; i < NR_CPUS; i++) {
		if (!cpu_isset(i, cpu_callout_map))
			continue;
		delta = tsc_values[i] - avg;
		if (delta < 0)
			delta = -delta;
		/*
		 * We report bigger than 2 microseconds clock differences.
		 */
		if (delta > 2*one_usec) {
			long realdelta;
			if (!buggy) {
				buggy = 1;
				printk("\n");
			}
			realdelta = delta;
			do_div(realdelta, one_usec);
			if (tsc_values[i] < avg)
				realdelta = -realdelta;

			printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
		}

		sum += delta;
	}
	if (!buggy)
		printk("passed.\n");
}