/* * Collect the trace on the current cpu and write it into the ftrace buffer. * * pre: bts_tracer_lock must be locked */ static void trace_bts_cpu(void *arg) { struct trace_array *tr = (struct trace_array *) arg; const struct bts_trace *trace; unsigned char *at; if (unlikely(!tr)) return; if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled))) return; if (unlikely(!this_tracer)) return; ds_suspend_bts(this_tracer); trace = ds_read_bts(this_tracer); if (!trace) goto out; for (at = trace->ds.top; (void *)at < trace->ds.end; at += trace->ds.size) trace_bts_at(trace, at); for (at = trace->ds.begin; (void *)at < trace->ds.top; at += trace->ds.size) trace_bts_at(trace, at); out: ds_resume_bts(this_tracer); }
static void trace_bts_cpu(void *arg) { struct trace_array *tr = (struct trace_array *) arg; const struct bts_trace *trace; unsigned char *at; if (!this_tracer) return; ds_suspend_bts(this_tracer); trace = ds_read_bts(this_tracer); if (!trace) goto out; for (at = trace->ds.top; (void *)at < trace->ds.end; at += trace->ds.size) trace_bts_at(tr, trace, at); for (at = trace->ds.begin; (void *)at < trace->ds.top; at += trace->ds.size) trace_bts_at(tr, trace, at); out: ds_resume_bts(this_tracer); }
static void bts_trace_start(struct trace_array *tr) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) if (likely(per_cpu(hwb_tracer, cpu))) ds_resume_bts(per_cpu(hwb_tracer, cpu)); trace_hw_branches_suspended = 0; put_online_cpus(); }
static void trace_bts_prepare(struct trace_iterator *iter) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) if (likely(per_cpu(tracer, cpu))) ds_suspend_bts(per_cpu(tracer, cpu)); on_each_cpu(trace_bts_cpu, iter->tr, 1); for_each_online_cpu(cpu) if (likely(per_cpu(tracer, cpu))) ds_resume_bts(per_cpu(tracer, cpu)); put_online_cpus(); }
static int ds_resume_bts_wrap(struct bts_tracer *tracer) { ds_resume_bts(tracer); return 0; }