static void check_critical_timing(struct trace_array *tr, struct trace_array_cpu *data, unsigned long parent_ip, int cpu) { cycle_t T0, T1, delta; unsigned long flags; int pc; T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; local_save_flags(flags); pc = preempt_count(); if (!report_latency(delta)) goto out; atomic_spin_lock_irqsave(&max_trace_lock, flags); /* check if we are still the max latency */ if (!report_latency(delta)) goto out_unlock; trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); if (data->critical_sequence != max_sequence) goto out_unlock; data->critical_end = parent_ip; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr_single(tr, current, cpu); } max_sequence++; out_unlock: atomic_spin_unlock_irqrestore(&max_trace_lock, flags); out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); }
static void function_trace_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; /* * Need to use raw, since this must be called before the * recursive protection is performed. */ local_irq_save(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); trace_function(tr, ip, parent_ip, flags, pc); } atomic_dec(&data->disabled); local_irq_restore(flags); }
static void function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; pc = preempt_count(); preempt_disable_notrace(); local_save_flags(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); preempt_enable_notrace(); }
/* * irqsoff uses its own tracer function to keep the overhead down: */ static void irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; /* * Does not matter if we preempt. We test the flags * afterward, to see if irqs are disabled or not. * If we preempt and get a false positive, the flags * test will fail. */ cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) return; local_save_flags(flags); /* slight chance to get a false positive on tracing_cpu */ if (!irqs_disabled_flags(flags)) return; data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); }
static inline void stop_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; cpu = raw_smp_processor_id(); /* Always clear the tracing cpu on stopping the trace */ if (unlikely(per_cpu(tracing_cpu, cpu))) per_cpu(tracing_cpu, cpu) = 0; else return; if (!tracer_enabled) return; data = tr->data[cpu]; if (unlikely(!data) || !data->critical_start || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); }
static inline void start_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (likely(!tracer_enabled)) return; cpu = raw_smp_processor_id(); if (per_cpu(tracing_cpu, cpu)) return; data = tr->data[cpu]; if (unlikely(!data) || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); data->critical_start = parent_ip ? : ip; local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; atomic_dec(&data->disabled); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if ((wakeup_rt && !rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_current_cpu = wakeup_cpu; wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
static void function_stack_trace_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; local_irq_save(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); trace_function(tr, ip, parent_ip, flags, pc); __trace_stack(tr, flags, 5, pc); } atomic_dec(&data->disabled); local_irq_restore(flags); }
static void function_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; int bit; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; pc = preempt_count(); preempt_disable_notrace(); bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); if (bit < 0) goto out; cpu = smp_processor_id(); data = per_cpu_ptr(tr->trace_buffer.data, cpu); if (!atomic_read(&data->disabled)) { local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, pc); } trace_clear_recursion(bit); out: preempt_enable_notrace(); }
void handle_pre_generic (void *event_data, void *data) { if (errorcount || sorrycount) return; tree fndecl = (tree) event_data; printf("BranchCover: processing:%s\n", input_filename); trace_function(fndecl); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if (likely(!rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
/* * irqsoff uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int resched; int cpu; int pc; if (likely(!wakeup_task)) return; pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (unlikely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); if (unlikely(!wakeup_task)) goto unlock; /* * The task can't disappear because it needs to * wake up first, and we have the wakeup_lock. */ if (task_cpu(wakeup_task) != cpu) goto unlock; trace_function(tr, ip, parent_ip, flags, pc); unlock: __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&data->disabled); ftrace_preempt_enable(resched); }
static void function_stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; /* * Need to use raw, since this must be called before the * recursive protection is performed. */ local_irq_save(flags); cpu = raw_smp_processor_id(); data = per_cpu_ptr(tr->trace_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); trace_function(tr, ip, parent_ip, flags, pc); /* * skip over 5 funcs: * __ftrace_trace_stack, * __trace_stack, * function_stack_trace_call * ftrace_list_func * ftrace_call */ __trace_stack(tr, flags, 5, pc); } atomic_dec(&data->disabled); local_irq_restore(flags); }
/* * irqsoff uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int resched; int cpu; int pc; if (likely(!wakeup_task)) return; pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); if (cpu != wakeup_current_cpu) goto out_enable; data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (unlikely(disabled != 1)) goto out; local_irq_save(flags); trace_function(tr, ip, parent_ip, flags, pc); local_irq_restore(flags); out: atomic_dec(&data->disabled); out_enable: ftrace_preempt_enable(resched); }
static void function_trace_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; /* * Need to use raw, since this must be called before the * recursive protection is performed. */ local_irq_save(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { /* * So far tracing doesn't support multiple buffers, so * we make an explicit call for now. */ if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE)) pstore_ftrace_call(ip, parent_ip); pc = preempt_count(); trace_function(tr, ip, parent_ip, flags, pc); } atomic_dec(&data->disabled); local_irq_restore(flags); }
static void notrace probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; int pc; tracing_record_cmdline(prev); if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; pc = preempt_count(); /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (likely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; if (!report_latency(delta)) goto out_unlock; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); } out_unlock: __wakeup_reset(wakeup_trace); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }