#include <linux/fs.h> #include <linux/debugfs.h> #include <linux/ftrace.h> #include "trace.h" /* Our two options */ enum { TRACE_NOP_OPT_ACCEPT = 0x1, TRACE_NOP_OPT_REFUSE = 0x2 }; /* Options for the tracer (see trace_options file) */ static struct tracer_opt nop_opts[] = { /* Option that will be accepted by set_flag callback */ { TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) }, /* Option that will be refused by set_flag callback */ { TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) }, { } /* Always set a last empty entry */ }; static struct tracer_flags nop_flags = { /* You can check your flags value here when you want. */ .val = 0, /* By default: all flags disabled */ .opts = nop_opts }; static struct trace_array *ctx_trace; static void start_nop_trace(struct trace_array *tr) {
.func = function_trace_call, }; static struct ftrace_ops trace_stack_ops __read_mostly = { .func = function_stack_trace_call, }; /* Our two options */ enum { TRACE_FUNC_OPT_STACK = 0x1, }; static struct tracer_opt func_opts[] = { #ifdef CONFIG_STACKTRACE { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, #endif { } /* Always set a last empty entry */ }; static struct tracer_flags func_flags = { .val = 0, /* By default: all flags disabled */ .opts = func_opts }; static void tracing_start_function_trace(void) { ftrace_function_enabled = 0; if (trace_flags & TRACE_ITER_PREEMPTONLY) trace_ops.func = function_trace_call_preempt_only;
#include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/dcache.h> #include <linux/fs.h> #include <linux/kmemtrace.h> #include "trace_output.h" #include "trace.h" /* Select an alternative, minimalistic output than the original one */ #define TRACE_KMEM_OPT_MINIMAL 0x1 static struct tracer_opt kmem_opts[] = { /* Default disable the minimalistic output */ { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, { } }; static struct tracer_flags kmem_tracer_flags = { .val = 0, .opts = kmem_opts }; static struct trace_array *kmemtrace_array; /* Trace allocations */ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, unsigned long call_site, const void *ptr, size_t bytes_req,
static struct ftrace_ops trace_ops __read_mostly = { .func = function_trace_call, .flags = FTRACE_OPS_FL_GLOBAL, }; static struct ftrace_ops trace_stack_ops __read_mostly = { .func = function_stack_trace_call, .flags = FTRACE_OPS_FL_GLOBAL, }; static struct tracer_opt func_opts[] = { #ifdef CONFIG_STACKTRACE { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, #endif #ifdef CONFIG_PSTORE_FTRACE { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) }, #endif { } /* Always set a last empty entry */ }; static struct tracer_flags func_flags = { .val = 0, /* By default: all flags disabled */ .opts = func_opts }; static void tracing_start_function_trace(void) { ftrace_function_enabled = 0;
#include "trace_output.h" #ifdef CONFIG_BLK_DEV_IO_TRACE static unsigned int blktrace_seq __read_mostly = 1; static struct trace_array *blk_tr; static bool blk_tracer_enabled __read_mostly; /* Select an alternative, minimalistic output than the original one */ #define TRACE_BLK_OPT_CLASSIC 0x1 static struct tracer_opt blk_tracer_opts[] = { /* Default disable the minimalistic output */ { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, { } }; static struct tracer_flags blk_tracer_flags = { .val = 0, .opts = blk_tracer_opts, }; /* Global reference count of probes */ static atomic_t blk_probes_ref = ATOMIC_INIT(0); static void blk_register_tracepoints(void); static void blk_unregister_tracepoints(void); /*