static int mt65xx_mon_trace_init(struct trace_array *tr) { mt65xx_mon_trace = tr; #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) tracing_reset_online_cpus(tr); #else tracing_reset_online_cpus(&tr->trace_buffer); #endif tracing_start_mt65xx_mon_record(); return 0; }
/* * Pretty much the same than for the function tracer from which the selftest * has been borrowed. */ __init int trace_selftest_startup_function_graph(struct tracer *trace, struct trace_array *tr) { int ret; unsigned long count; #ifdef CONFIG_DYNAMIC_FTRACE if (ftrace_filter_param) { printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); return 0; } #endif /* * Simulate the init() callback but we attach a watchdog callback * to detect and recover from possible hangs */ tracing_reset_online_cpus(&tr->trace_buffer); set_graph_array(tr); ret = register_ftrace_graph(&fgraph_ops); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } tracing_start_cmdline_record(); /* Sleep for a 1/10 of a second */ msleep(100); /* Have we just recovered from a hang? */ if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { tracing_selftest_disabled = true; ret = -1; goto out; } tracing_stop(); /* check the trace buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } /* Don't test dynamic tracing, the function tracer already did */ out: /* Stop it if we failed */ if (ret) ftrace_graph_stop(); return ret; }
static int sched_switch_trace_init(struct trace_array *tr) { ctx_trace = tr; tracing_reset_online_cpus(tr); tracing_start_sched_switch_record(); return 0; }
static int bts_trace_init(struct trace_array *tr) { tracing_reset_online_cpus(tr); bts_trace_start(tr); return 0; }
static void start_stack_trace(struct trace_array *tr) { mutex_lock(&sample_timer_lock); tracing_reset_online_cpus(tr); start_stack_timers(); tracer_enabled = 1; mutex_unlock(&sample_timer_lock); }
static int mt65xx_mon_trace_init(struct trace_array *tr) { mt65xx_mon_trace = tr; tracing_reset_online_cpus(tr); tracing_start_mt65xx_mon_record(); return 0; }
static void bts_trace_start(struct trace_array *tr) { int cpu; tracing_reset_online_cpus(tr); for_each_cpu(cpu, cpu_possible_mask) smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); }
static int kmem_trace_init(struct trace_array *tr) { kmemtrace_array = tr; tracing_reset_online_cpus(tr); kmemtrace_start_probes(); return 0; }
static int power_trace_init(struct trace_array *tr) { power_trace = tr; trace_power_enabled = 1; tracing_power_register(); tracing_reset_online_cpus(tr); return 0; }
int trace_selftest_startup_function_graph(struct tracer *trace, struct trace_array *tr) { int ret; unsigned long count; /* */ tracing_reset_online_cpus(tr); set_graph_array(tr); ret = register_ftrace_graph(&trace_graph_return, &trace_graph_entry_watchdog); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } tracing_start_cmdline_record(); /* */ msleep(100); /* */ if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { tracing_selftest_disabled = true; ret = -1; goto out; } tracing_stop(); /* */ ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } /* */ out: /* */ if (ret) ftrace_graph_stop(); return ret; }
static void __irqsoff_tracer_init(struct trace_array *tr) { save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; irqsoff_trace = tr; /* make sure that the tracer is visible */ smp_wmb(); tracing_reset_online_cpus(tr); start_irqsoff_tracer(tr); }
static void wakeup_reset(struct trace_array *tr) { unsigned long flags; tracing_reset_online_cpus(tr); local_irq_save(flags); __raw_spin_lock(&wakeup_lock); __wakeup_reset(tr); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); }
static int boot_trace_init(struct trace_array *tr) { boot_trace = tr; if (!tr) return 0; tracing_reset_online_cpus(tr); tracing_sched_switch_assign_trace(tr); return 0; }
static void function_trace_start(struct trace_array *tr) { tracing_reset_online_cpus(tr); }
static void trace_bts_close(struct trace_iterator *iter) { tracing_reset_online_cpus(iter->tr); }
static void persistent_trace_start(struct trace_array *tr) { tracing_reset_online_cpus(tr); }