Exemple #1
0
PRIVATE void estimate_cpu_freq(void)
{
	u64_t tsc_delta;
	u64_t cpu_freq;

	irq_hook_t calib_cpu;

	/* set the probe, we use the legacy timer, IRQ 0 */
	put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);

	/* just in case we are in an SMP single cpu fallback mode */
	BKL_UNLOCK();
	/* set the PIC timer to get some time */
	intr_enable();

	/* loop for some time to get a sample */
	while(probe_ticks < PROBE_TICKS) {
		intr_enable();
	}

	intr_disable();
	/* just in case we are in an SMP single cpu fallback mode */
	BKL_LOCK();

	/* remove the probe */
	rm_irq_handler(&calib_cpu);

	tsc_delta = sub64(tsc1, tsc0);

	cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0));
	cpu_set_freq(cpuid, cpu_freq);
	cpu_info[cpuid].freq = div64u(cpu_freq, 1000000);
	BOOT_VERBOSE(cpu_print_freq(cpuid));
}
Exemple #2
0
/*
 * tell another cpu about a task to do and return only after the cpu acks that
 * the task is finished. Also wait before it finishes task sent by another cpu
 * to the same one.
 */
static void smp_schedule_sync(struct proc * p, unsigned task)
{
	unsigned cpu = p->p_cpu;
	unsigned mycpu = cpuid;

	assert(cpu != mycpu);
	/*
	 * if some other cpu made a request to the same cpu, wait until it is
	 * done before proceeding
	 */
	if (sched_ipi_data[cpu].flags != 0) {
		BKL_UNLOCK();
		while (sched_ipi_data[cpu].flags != 0) {
			if (sched_ipi_data[mycpu].flags) {
				BKL_LOCK();
				smp_sched_handler();
				BKL_UNLOCK();
			}
		}
		BKL_LOCK();
	}

	sched_ipi_data[cpu].data = (u32_t) p;
	sched_ipi_data[cpu].flags |= task;
	__insn_barrier();
	arch_send_smp_schedule_ipi(cpu);

	/* wait until the destination cpu finishes its job */
	BKL_UNLOCK();
	while (sched_ipi_data[cpu].flags != 0) {
		if (sched_ipi_data[mycpu].flags) {
			BKL_LOCK();
			smp_sched_handler();
			BKL_UNLOCK();
		}
	}
	BKL_LOCK();
}
Exemple #3
0
PRIVATE int calib_cpu_handler(irq_hook_t * UNUSED(hook))
{
	u64_t tsc;

	probe_ticks++;
	read_tsc_64(&tsc);


	if (probe_ticks == 1) {
		tsc0 = tsc;
	}
	else if (probe_ticks == PROBE_TICKS) {
		tsc1 = tsc;
	}

	/* just in case we are in an SMP single cpu fallback mode */
	BKL_UNLOCK();
	return 1;
}
Exemple #4
0
void wait_for_APs_to_finish_booting(void)
{
	unsigned n = 0;
	int i;

	/* check how many cpus are actually alive */
	for (i = 0 ; i < ncpus ; i++) {
		if (cpu_test_flag(i, CPU_IS_READY))
			n++;
	}
	if (n != ncpus)
		printf("WARNING only %d out of %d cpus booted\n", n, ncpus);

	/* we must let the other CPUs to run in kernel mode first */
	BKL_UNLOCK();
	while (ap_cpus_booted != (n - 1))
		arch_pause();
	/* now we have to take the lock again as we continue execution */
	BKL_LOCK();
}
Exemple #5
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));

#ifdef CONFIG_SMP
		/*
		 * Since at the time we got a scheduling IPI we might have been
		 * waiting for BKL already, we may miss it due to a similar IPI to
		 * the cpu which is already waiting for us to handle its. This
		 * results in a live-lock of these two cpus.
		 *
		 * Therefore we always check if there is one pending and if so,
		 * we handle it straight away so the other cpu can continue and
		 * we do not deadlock.
		 */
		smp_sched_handler();
#endif
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Exemple #6
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}