/* * Collect the trace on the current cpu and write it into the ftrace buffer. * * pre: bts_tracer_lock must be locked */ static void trace_bts_cpu(void *arg) { struct trace_array *tr = (struct trace_array *) arg; const struct bts_trace *trace; unsigned char *at; if (unlikely(!tr)) return; if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled))) return; if (unlikely(!this_tracer)) return; ds_suspend_bts(this_tracer); trace = ds_read_bts(this_tracer); if (!trace) goto out; for (at = trace->ds.top; (void *)at < trace->ds.end; at += trace->ds.size) trace_bts_at(trace, at); for (at = trace->ds.begin; (void *)at < trace->ds.top; at += trace->ds.size) trace_bts_at(trace, at); out: ds_resume_bts(this_tracer); }
static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: /* The notification is sent with interrupts enabled. */ if (trace_hw_branches_enabled) { bts_trace_init_cpu(cpu); if (trace_hw_branches_suspended && likely(per_cpu(hwb_tracer, cpu))) ds_suspend_bts(per_cpu(hwb_tracer, cpu)); } break; case CPU_DOWN_PREPARE: /* The notification is sent with interrupts enabled. */ if (likely(per_cpu(hwb_tracer, cpu))) { ds_release_bts(per_cpu(hwb_tracer, cpu)); per_cpu(hwb_tracer, cpu) = NULL; } } return NOTIFY_DONE; }
static void trace_bts_cpu(void *arg) { struct trace_array *tr = (struct trace_array *) arg; const struct bts_trace *trace; unsigned char *at; if (!this_tracer) return; ds_suspend_bts(this_tracer); trace = ds_read_bts(this_tracer); if (!trace) goto out; for (at = trace->ds.top; (void *)at < trace->ds.end; at += trace->ds.size) trace_bts_at(tr, trace, at); for (at = trace->ds.begin; (void *)at < trace->ds.top; at += trace->ds.size) trace_bts_at(tr, trace, at); out: ds_resume_bts(this_tracer); }
static void bts_trace_stop(struct trace_array *tr) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) if (likely(per_cpu(hwb_tracer, cpu))) ds_suspend_bts(per_cpu(hwb_tracer, cpu)); trace_hw_branches_suspended = 1; put_online_cpus(); }
static void trace_bts_prepare(struct trace_iterator *iter) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) if (likely(per_cpu(hwb_tracer, cpu))) ds_suspend_bts(per_cpu(hwb_tracer, cpu)); /* * We need to collect the trace on the respective cpu since ftrace * implicitly adds the record for the current cpu. * Once that is more flexible, we could collect the data from any cpu. */ on_each_cpu(trace_bts_cpu, iter->tr, 1); for_each_online_cpu(cpu) if (likely(per_cpu(hwb_tracer, cpu))) ds_suspend_bts(per_cpu(hwb_tracer, cpu)); put_online_cpus(); }
static void trace_bts_prepare(struct trace_iterator *iter) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) if (likely(per_cpu(tracer, cpu))) ds_suspend_bts(per_cpu(tracer, cpu)); on_each_cpu(trace_bts_cpu, iter->tr, 1); for_each_online_cpu(cpu) if (likely(per_cpu(tracer, cpu))) ds_resume_bts(per_cpu(tracer, cpu)); put_online_cpus(); }
static int ds_suspend_bts_wrap(struct bts_tracer *tracer) { ds_suspend_bts(tracer); return 0; }