void tracing_sched_wakeup_trace(struct trace_array *tr, struct task_struct *wakee, struct task_struct *curr, unsigned long flags, int pc) { struct ftrace_event_call *call = &event_wakeup; struct ring_buffer_event *event; struct ctx_switch_entry *entry; struct ring_buffer *buffer = tr->buffer; event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->prev_pid = curr->pid; entry->prev_prio = curr->prio; entry->prev_state = curr->state; entry->next_pid = wakee->pid; entry->next_prio = wakee->prio; entry->next_state = wakee->state; entry->next_cpu = task_cpu(wakee); if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr->buffer, flags, 6, pc); ftrace_trace_userstack(tr->buffer, flags, pc); }
static void probe_power_mark(struct power_trace *it, unsigned int type, unsigned int level) { struct ftrace_event_call *call = &event_power; struct ring_buffer_event *event; struct trace_power *entry; struct trace_array_cpu *data; struct trace_array *tr = power_trace; if (!trace_power_enabled) return; memset(it, 0, sizeof(struct power_trace)); it->state = level; it->type = type; it->stamp = ktime_get(); preempt_disable(); it->end = it->stamp; data = tr->data[smp_processor_id()]; event = trace_buffer_lock_reserve(tr, TRACE_POWER, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); entry->state_data = *it; if (!filter_check_discard(call, entry, tr->buffer, event)) trace_buffer_unlock_commit(tr, event, 0, 0); out: preempt_enable(); }
void tracing_sched_switch_trace(struct trace_array *tr, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc) { struct ftrace_event_call *call = &event_context_switch; struct ring_buffer *buffer = tr->buffer; struct ring_buffer_event *event; struct ctx_switch_entry *entry; event = trace_buffer_lock_reserve(buffer, TRACE_CTX, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->prev_pid = prev->pid; entry->prev_prio = prev->prio; entry->prev_state = prev->state; entry->next_pid = next->pid; entry->next_prio = next->prio; entry->next_state = next->state; entry->next_cpu = task_cpu(next); if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, flags, pc); }
/* Trace allocations */ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) { struct ftrace_event_call *call = &event_kmem_alloc; struct trace_array *tr = kmemtrace_array; struct kmemtrace_alloc_entry *entry; struct ring_buffer_event *event; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, 0); entry->ent.type = TRACE_KMEM_ALLOC; entry->type_id = type_id; entry->call_site = call_site; entry->ptr = ptr; entry->bytes_req = bytes_req; entry->bytes_alloc = bytes_alloc; entry->gfp_flags = gfp_flags; entry->node = node; if (!filter_check_discard(call, entry, tr->buffer, event)) ring_buffer_unlock_commit(tr->buffer, event); trace_wake_up(); }
static inline void kmemtrace_free(enum kmemtrace_type_id type_id, unsigned long call_site, const void *ptr) { struct ftrace_event_call *call = &event_kmem_free; struct trace_array *tr = kmemtrace_array; struct kmemtrace_free_entry *entry; struct ring_buffer_event *event; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, 0); entry->ent.type = TRACE_KMEM_FREE; entry->type_id = type_id; entry->call_site = call_site; entry->ptr = ptr; if (!filter_check_discard(call, entry, tr->buffer, event)) ring_buffer_unlock_commit(tr->buffer, event); trace_wake_up(); }
void tracing_mt65xx_mon_manual_stop(struct trace_array *tr, unsigned long flags, int pc) { #if 0 struct ftrace_event_call *call = &event_mt65xx_mon; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) struct ring_buffer *buffer = tr->buffer; #else struct ring_buffer *buffer = tr->trace_buffer.buffer; #endif struct ring_buffer_event *event; struct mt65xx_mon_entry *entry; unsigned int idx = 0; event = trace_buffer_lock_reserve(buffer, TRACE_MT65XX_MON_TYPE, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); mtk_mon->disable(); entry->log = idx = mtk_mon->mon_log((void *)&entry->field); entry->cpu = raw_smp_processor_id(); #if 0 if (!filter_check_discard(call, entry, buffer, event)) #endif trace_buffer_unlock_commit(buffer, event, flags, pc); }
void tracing_mt65xx_mon_function(struct trace_array *tr, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc) { #if 0 struct ftrace_event_call *call = &event_mt65xx_mon; #endif struct ring_buffer *buffer = tr->buffer; struct ring_buffer_event *event; struct mt65xx_mon_entry *entry; unsigned int idx = 0; event = trace_buffer_lock_reserve(buffer, TRACE_MT65XX_MON_TYPE, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); mtk_mon->disable(); entry->log = idx = mtk_mon->mon_log((void *)&entry->field); entry->cpu = raw_smp_processor_id(); mtk_mon->enable(); #if 0 if (!filter_check_discard(call, entry, buffer, event)) #endif trace_buffer_unlock_commit(buffer, event, flags, pc); }
void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { struct ftrace_event_call *call = &event_boot_ret; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_boot_ret *entry; struct trace_array *tr = boot_trace; if (!tr || !pre_initcalls_finished) return; sprint_symbol(bt->func, (unsigned long)fn); preempt_disable(); buffer = tr->buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); entry->boot_ret = *bt; if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, 0, 0); out: preempt_enable(); }
static void probe_power_end(struct power_trace *it) { struct ftrace_event_call *call = &event_power; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_power *entry; struct trace_array_cpu *data; struct trace_array *tr = power_trace; if (!trace_power_enabled) return; buffer = tr->buffer; preempt_disable(); it->end = ktime_get(); data = tr->data[smp_processor_id()]; event = trace_buffer_lock_reserve(buffer, TRACE_POWER, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); entry->state_data = *it; if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, 0, 0); out: preempt_enable(); }
static void probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) { struct ftrace_event_call *call = &event_branch; struct trace_array *tr = branch_tracer; struct ring_buffer_event *event; struct trace_branch *entry; struct ring_buffer *buffer; unsigned long flags; int cpu, pc; const char *p; /* * I would love to save just the ftrace_likely_data pointer, but * this code can also be used by modules. Ugly things can happen * if the module is unloaded, and then we go and read the * pointer. This is slower, but much safer. */ if (unlikely(!tr)) return; local_irq_save(flags); cpu = raw_smp_processor_id(); if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) goto out; pc = preempt_count(); buffer = tr->buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, sizeof(*entry), flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); /* Strip off the path, only save the file */ p = f->file + strlen(f->file); while (p >= f->file && *p != '/') p--; p++; strncpy(entry->func, f->func, TRACE_FUNC_SIZE); strncpy(entry->file, p, TRACE_FILE_SIZE); entry->func[TRACE_FUNC_SIZE] = 0; entry->file[TRACE_FILE_SIZE] = 0; entry->line = f->line; entry->correct = val == expect; if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); out: atomic_dec(&tr->data[cpu]->disabled); local_irq_restore(flags); }
void trace_hw_branch(u64 from, u64 to) { struct ftrace_event_call *call = &event_hw_branch; struct trace_array *tr = hw_branch_trace; struct ring_buffer_event *event; struct ring_buffer *buf; struct hw_branch_entry *entry; unsigned long irq1; int cpu; if (unlikely(!tr)) return; if (unlikely(!trace_hw_branches_enabled)) return; local_irq_save(irq1); cpu = raw_smp_processor_id(); if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) goto out; buf = tr->buffer; event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, from); entry->ent.type = TRACE_HW_BRANCHES; entry->from = from; entry->to = to; if (!filter_check_discard(call, entry, buf, event)) trace_buffer_unlock_commit(buf, event, 0, 0); out: atomic_dec(&tr->data[cpu]->disabled); local_irq_restore(irq1); }