/* This must be safe from any context. It's safe writing here * because of the head/tail separation of the writer and reader * of the CPU buffer. * * cpu_mode is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel/user (and xen) * enter events whenever cpu_mode changes */ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, int cpu_mode, unsigned long event) { struct task_struct * task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; } task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_cpu_mode != cpu_mode) { cpu_buf->last_cpu_mode = cpu_mode; add_code(cpu_buf, cpu_mode); } /* notice a task switch */ /* if not processing other domain samples */ if ((cpu_buf->last_task != task) && (current_domain == COORDINATOR_DOMAIN)) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } if (pc == IBS_FETCH_CODE || pc == IBS_OP_CODE) add_code(cpu_buf, cpu_mode); add_sample(cpu_buf, pc, event); return 1; }
void oprofile_add_trace(unsigned long pc) { struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; #ifdef CONFIG_CA_CSS if (pc == 0) return; #endif if (!cpu_buf->tracing) return; if (nr_available_slots(cpu_buf) < 1) { cpu_buf->tracing = 0; cpu_buf->sample_lost_overflow++; return; } /* broken frame can give an eip with the same value as an escape code, * abort the trace if we get it */ if (pc == ESCAPE_CODE) { cpu_buf->tracing = 0; cpu_buf->backtrace_aborted++; return; } #ifdef CONFIG_CA_CSS /* Use -1 for ca_css record */ add_sample(cpu_buf, pc, -1); #else add_sample(cpu_buf, pc, 0); #endif }
/* This must be safe from any context. It's safe writing here * because of the head/tail separation of the writer and reader * of the CPU buffer. * * cpu_mode is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel/user (and xen) * enter events whenever cpu_mode changes */ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, int cpu_mode, unsigned long event) { struct task_struct * task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; } WARN_ON(cpu_mode > CPU_MODE_XEN); task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_cpu_mode != cpu_mode) { cpu_buf->last_cpu_mode = cpu_mode; add_code(cpu_buf, cpu_mode); } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } add_sample(cpu_buf, pc, event); return 1; }
/* This must be safe from any context. It's safe writing here * because of the head/tail separation of the writer and reader * of the CPU buffer. * * is_kernel is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel enter/exit * events whenever is_kernel changes */ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, int is_kernel, unsigned long event) { struct task_struct * task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; } is_kernel = !!is_kernel; task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; add_code(cpu_buf, is_kernel); } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } add_sample(cpu_buf, pc, event); return 1; }
int oprofile_add_domain_switch(int32_t domain_id) { struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; /* should have space for switching into and out of domain (2 slots each) plus one sample and one cpu mode switch */ if (((nr_available_slots(cpu_buf) < 6) && (domain_id != COORDINATOR_DOMAIN)) || (nr_available_slots(cpu_buf) < 2)) return 0; add_code(cpu_buf, CPU_DOMAIN_SWITCH); add_sample(cpu_buf, domain_id, 0); current_domain = domain_id; return 1; }
static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { if (nr_available_slots(cpu_buf) < 4) { cpu_buf->sample_lost_overflow++; return 0; } add_code(cpu_buf, CPU_TRACE_BEGIN); cpu_buf->tracing = 1; return 1; }
/* * This serves to add an escape code to indicate switching into * user space during tracing across the sysetm call boundary */ int oprofile_syscall_trace_boundary(void) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); if (!cpu_buf || !cpu_buf->tracing) return 0; if (nr_available_slots(cpu_buf) < 1) { cpu_buf->tracing = 0; cpu_buf->sample_lost_overflow++; return 0; } /* Set buffer state to user to prevent traces from being filtered out */ cpu_buf->last_is_kernel = 0; add_code(cpu_buf, CPU_IS_USER); return 1; }
void oprofile_add_ibs_sample(struct pt_regs *const regs, unsigned int *const ibs_sample, int ibs_code) { int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); struct task_struct *task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { /* we can't backtrace since we lost the source of this event */ cpu_buf->sample_lost_overflow++; return; } /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; add_code(cpu_buf, is_kernel); } /* notice a task switch */ if (!is_kernel) { task = current; if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } } add_code(cpu_buf, ibs_code); add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); if (ibs_code == IBS_OP_BEGIN) { add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); } if (backtrace_depth) oprofile_ops.backtrace(regs, backtrace_depth); }
static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) { struct task_struct *task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { cpu_buf->sample_lost_overflow++; return 0; } is_kernel = !!is_kernel; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; add_code(cpu_buf, is_kernel); } /* notice a task switch */ if (!is_kernel) { task = current; if (cpu_buf->last_task != task) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } } add_code(cpu_buf, ibs_code); add_sample(cpu_buf, ibs[0], ibs[1]); add_sample(cpu_buf, ibs[2], ibs[3]); add_sample(cpu_buf, ibs[4], ibs[5]); if (ibs_code == IBS_OP_BEGIN) { add_sample(cpu_buf, ibs[6], ibs[7]); add_sample(cpu_buf, ibs[8], ibs[9]); add_sample(cpu_buf, ibs[10], ibs[11]); } return 1; }
static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, int cpu_mode, unsigned int *ibs, int ibs_code) { struct task_struct *task; cpu_buf->sample_received++; if (nr_available_slots(cpu_buf) < 14) { cpu_buf->sample_lost_overflow++; return 0; } task = current; /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_cpu_mode != cpu_mode) { cpu_buf->last_cpu_mode = cpu_mode; add_code(cpu_buf, cpu_mode); } /* notice a task switch */ /* if not processing other domain samples */ if ((cpu_buf->last_task != task) && (current_domain == COORDINATOR_DOMAIN)) { cpu_buf->last_task = task; add_code(cpu_buf, (unsigned long)task); } add_code(cpu_buf, ibs_code); add_sample(cpu_buf, ibs[0], ibs[1]); add_sample(cpu_buf, ibs[2], ibs[3]); add_sample(cpu_buf, ibs[4], ibs[5]); if (ibs_code == IBS_OP_BEGIN) { add_sample(cpu_buf, ibs[6], ibs[7]); add_sample(cpu_buf, ibs[8], ibs[9]); add_sample(cpu_buf, ibs[10], ibs[11]); } return 1; }
void oprofile_add_trace(unsigned long pc) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); if (!cpu_buf->tracing) return; if (nr_available_slots(cpu_buf) < 1) { cpu_buf->tracing = 0; cpu_buf->sample_lost_overflow++; return; } /* broken frame can give an eip with the same value as an escape code, * abort the trace if we get it */ if (pc == ESCAPE_CODE) { cpu_buf->tracing = 0; cpu_buf->backtrace_aborted++; return; } add_sample(cpu_buf, pc, 0); }