static void probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) { struct trace_array_cpu *data; unsigned long flags; int cpu, pc; if (unlikely(!sched_ref)) return; tracing_record_cmdline(current); if (!tracer_enabled || sched_stopped) return; pc = preempt_count(); local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; if (likely(!atomic_read(&data->disabled))) tracing_sched_wakeup_trace(ctx_trace, wakee, current, flags, pc); local_irq_restore(flags); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if ((wakeup_rt && !rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_current_cpu = wakeup_cpu; wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }