void profile_tick(int type, struct pt_regs *regs) { if (type == CPU_PROFILING) profile_hook(regs); if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) profile_hit(type, (void *)profile_pc(regs)); }
/* * The profiling function is SMP safe. (nothing can mess * around with "current", and the profiling counters are * updated with atomic operations). This is especially * useful with a profiling multiplier != 1 */ static inline void ppc64_do_profile(struct pt_regs *regs) { unsigned long nip; extern unsigned long prof_cpu_mask; profile_hook(regs); if (user_mode(regs)) return; if (!prof_buffer) return; nip = instruction_pointer(regs); /* * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. * (default is all CPUs.) */ if (!((1<<smp_processor_id()) & prof_cpu_mask)) return; nip -= (unsigned long)_stext; nip >>= prof_shift; /* * Don't ignore out-of-bounds EIP values silently, * put them into the last histogram slot, so if * present, they will show up as a sharp peak. */ if (nip > prof_len-1) nip = prof_len-1; atomic_inc((atomic_t *)&prof_buffer[nip]); }
static inline void do_profile(struct pt_regs *regs) { /* * temperary remove code to do profile, because the arch change of the profile in the kernel 2.6.12 */ #if 0 unsigned long pc; pc = regs->pc; profile_hook(regs); if (prof_buffer && current->pid) { extern int _stext; pc -= (unsigned long)&_stext; pc >>= prof_shift; if (pc < prof_len) ++prof_buffer[pc]; else /* * Don't ignore out-of-bounds PC values silently, * put them into the last histogram slot, so if * present, they will show up as a sharp peak. */ ++prof_buffer[prof_len - 1]; } #endif }