static void check_critical_timing(struct trace_array *tr, struct trace_array_cpu *data, unsigned long parent_ip, int cpu) { cycle_t T0, T1, delta; unsigned long flags; int pc; T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; local_save_flags(flags); pc = preempt_count(); if (!report_latency(delta)) goto out; atomic_spin_lock_irqsave(&max_trace_lock, flags); /* check if we are still the max latency */ if (!report_latency(delta)) goto out_unlock; trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); if (data->critical_sequence != max_sequence) goto out_unlock; data->critical_end = parent_ip; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr_single(tr, current, cpu); } max_sequence++; out_unlock: atomic_spin_unlock_irqrestore(&max_trace_lock, flags); out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); }
static inline void start_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (likely(!tracer_enabled)) return; cpu = raw_smp_processor_id(); if (per_cpu(tracing_cpu, cpu)) return; data = tr->data[cpu]; if (unlikely(!data) || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); data->critical_start = parent_ip ? : ip; local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; atomic_dec(&data->disabled); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if ((wakeup_rt && !rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_current_cpu = wakeup_cpu; wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
/* * platform_cpu_die: shutdown a CPU * @cpu: */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; struct wd_api *wd_api = NULL; HOTPLUG_INFO("platform_cpu_die, cpu: %d\n", cpu); get_wd_api(&wd_api); if (wd_api) wd_api->wd_cpu_hot_plug_off_notify(cpu); #ifdef CONFIG_MTK_SCHED_TRACERS trace_cpu_hotplug(cpu, 0, per_cpu(last_event_ts, cpu)); per_cpu(last_event_ts, cpu) = ns2usecs(ftrace_now(cpu)); #endif /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(cpu); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(cpu); if (spurious) HOTPLUG_INFO("platform_do_lowpower, spurious wakeup call, cpu: %d, spurious: %d\n", cpu, spurious); }
static void function_reset(struct trace_array *tr) { int cpu; tr->time_start = ftrace_now(tr->cpu); for_each_online_cpu(cpu) tracing_reset(tr, cpu); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if (likely(!rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
static void notrace probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; int pc; tracing_record_cmdline(prev); if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; pc = preempt_count(); /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (likely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; if (!report_latency(delta)) goto out_unlock; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); } out_unlock: __wakeup_reset(wakeup_trace); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }