Ejemplo n.º 1
0
inline void smp_local_timer_interrupt(struct pt_regs *regs)
{
    int user = user_mode(regs);
    int cpu = smp_processor_id();

    /*
     * The profiling function is SMP safe. (nothing can mess
     * around with "current", and the profiling counters are
     * updated with atomic operations). This is especially
     * useful with a profiling multiplier != 1
     */
    if (!user)
        x86_do_profile(regs->rip);

    if (--prof_counter[cpu] <= 0)
    {
        /*
         * The multiplier may have changed since the last time we got
         * to this point as a result of the user writing to
         * /proc/profile. In this case we need to adjust the APIC
         * timer accordingly.
         *
         * Interrupts are already masked off at this point.
         */
        prof_counter[cpu] = prof_multiplier[cpu];
        if (prof_counter[cpu] != prof_old_multiplier[cpu])
        {
            __setup_APIC_LVTT(calibration_result/prof_counter[cpu]);
            prof_old_multiplier[cpu] = prof_counter[cpu];
        }

#ifdef CONFIG_SMP
        update_process_times(user);
#endif
    }

    /*
     * We take the 'long' return path, and there every subsystem
     * grabs the apropriate locks (kernel lock/ irq lock).
     *
     * we might want to decouple profiling from the 'long path',
     * and do the profiling totally in assembly.
     *
     * Currently this isn't too much of an issue (performance wise),
     * we can take more than 100K local irqs per second on a 100 MHz P5.
     */
}
Ejemplo n.º 2
0
inline void smp_local_timer_interrupt(struct pt_regs * regs)
{
	int cpu = smp_processor_id();

	x86_do_profile(regs);

	if (--per_cpu(prof_counter, cpu) <= 0) {
		/*
		 * The multiplier may have changed since the last time we got
		 * to this point as a result of the user writing to
		 * /proc/profile. In this case we need to adjust the APIC
		 * timer accordingly.
		 *
		 * Interrupts are already masked off at this point.
		 */
		per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
		if (per_cpu(prof_counter, cpu) !=
					per_cpu(prof_old_multiplier, cpu)) {
			__setup_APIC_LVTT(
					calibration_result/
					per_cpu(prof_counter, cpu));
			per_cpu(prof_old_multiplier, cpu) =
						per_cpu(prof_counter, cpu);
		}

#ifdef CONFIG_SMP
		update_process_times(user_mode(regs));
#endif
	}

	/*
	 * We take the 'long' return path, and there every subsystem
	 * grabs the apropriate locks (kernel lock/ irq lock).
	 *
	 * we might want to decouple profiling from the 'long path',
	 * and do the profiling totally in assembly.
	 *
	 * Currently this isn't too much of an issue (performance wise),
	 * we can take more than 100K local irqs per second on a 100 MHz P5.
	 */
}