void tracing_sched_switch_trace(struct trace_array *tr, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc) { struct ftrace_event_call *call = &event_context_switch; struct ring_buffer *buffer = tr->buffer; struct ring_buffer_event *event; struct ctx_switch_entry *entry; event = trace_buffer_lock_reserve(buffer, TRACE_CTX, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->prev_pid = prev->pid; entry->prev_prio = prev->prio; entry->prev_state = prev->state; entry->next_pid = next->pid; entry->next_prio = next->prio; entry->next_state = next->state; entry->next_cpu = task_cpu(next); if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, flags, pc); }
static void probe_power_mark(struct power_trace *it, unsigned int type, unsigned int level) { struct ftrace_event_call *call = &event_power; struct ring_buffer_event *event; struct trace_power *entry; struct trace_array_cpu *data; struct trace_array *tr = power_trace; if (!trace_power_enabled) return; memset(it, 0, sizeof(struct power_trace)); it->state = level; it->type = type; it->stamp = ktime_get(); preempt_disable(); it->end = it->stamp; data = tr->data[smp_processor_id()]; event = trace_buffer_lock_reserve(tr, TRACE_POWER, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); entry->state_data = *it; if (!filter_check_discard(call, entry, tr->buffer, event)) trace_buffer_unlock_commit(tr, event, 0, 0); out: preempt_enable(); }
void tracing_sched_wakeup_trace(struct trace_array *tr, struct task_struct *wakee, struct task_struct *curr, unsigned long flags, int pc) { struct ftrace_event_call *call = &event_wakeup; struct ring_buffer_event *event; struct ctx_switch_entry *entry; struct ring_buffer *buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->prev_pid = curr->pid; entry->prev_prio = curr->prio; entry->prev_state = curr->state; entry->next_pid = wakee->pid; entry->next_prio = wakee->prio; entry->next_state = wakee->state; entry->next_cpu = task_cpu(wakee); if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, flags, pc); #ifdef CONFIG_STM_WAKEUP stm_sched_wakeup(entry->prev_pid, entry->prev_prio, entry->prev_state, entry->next_pid, entry->next_prio, entry->next_state, entry->next_cpu); #endif }
void tracing_mt65xx_mon_manual_stop(struct trace_array *tr, unsigned long flags, int pc) { #if 0 struct ftrace_event_call *call = &event_mt65xx_mon; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) struct ring_buffer *buffer = tr->buffer; #else struct ring_buffer *buffer = tr->trace_buffer.buffer; #endif struct ring_buffer_event *event; struct mt65xx_mon_entry *entry; unsigned int idx = 0; event = trace_buffer_lock_reserve(buffer, TRACE_MT65XX_MON_TYPE, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); mtk_mon->disable(); entry->log = idx = mtk_mon->mon_log((void *)&entry->field); entry->cpu = raw_smp_processor_id(); #if 0 if (!filter_check_discard(call, entry, buffer, event)) #endif trace_buffer_unlock_commit(buffer, event, flags, pc); }
void trace_hw_branch(u64 from, u64 to) { struct trace_array *tr = hw_branch_trace; struct ring_buffer_event *event; struct hw_branch_entry *entry; unsigned long irq1; int cpu; if (unlikely(!tr)) return; if (unlikely(!trace_hw_branches_enabled)) return; local_irq_save(irq1); cpu = raw_smp_processor_id(); if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) goto out; event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, from); entry->ent.type = TRACE_HW_BRANCHES; entry->from = from; entry->to = to; trace_buffer_unlock_commit(tr, event, 0, 0); out: atomic_dec(&tr->data[cpu]->disabled); local_irq_restore(irq1); }
void tracing_mt65xx_mon_function(struct trace_array *tr, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc) { #if 0 struct ftrace_event_call *call = &event_mt65xx_mon; #endif struct ring_buffer *buffer = tr->buffer; struct ring_buffer_event *event; struct mt65xx_mon_entry *entry; unsigned int idx = 0; event = trace_buffer_lock_reserve(buffer, TRACE_MT65XX_MON_TYPE, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); mtk_mon->disable(); entry->log = idx = mtk_mon->mon_log((void *)&entry->field); entry->cpu = raw_smp_processor_id(); mtk_mon->enable(); #if 0 if (!filter_check_discard(call, entry, buffer, event)) #endif trace_buffer_unlock_commit(buffer, event, flags, pc); }
void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { struct ftrace_event_call *call = &event_boot_ret; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_boot_ret *entry; struct trace_array *tr = boot_trace; if (!tr || !pre_initcalls_finished) return; sprint_symbol(bt->func, (unsigned long)fn); preempt_disable(); buffer = tr->buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); entry->boot_ret = *bt; if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, 0, 0); out: preempt_enable(); }
static void probe_power_end(struct power_trace *it) { struct ftrace_event_call *call = &event_power; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_power *entry; struct trace_array_cpu *data; struct trace_array *tr = power_trace; if (!trace_power_enabled) return; buffer = tr->buffer; preempt_disable(); it->end = ktime_get(); data = tr->data[smp_processor_id()]; event = trace_buffer_lock_reserve(buffer, TRACE_POWER, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); entry->state_data = *it; if (!filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, 0, 0); out: preempt_enable(); }
void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { struct ring_buffer_event *event; struct trace_boot_call *entry; struct trace_array *tr = boot_trace; if (!tr || !pre_initcalls_finished) return; /* Get its name now since this function could * disappear because it is in the .init section. */ sprint_symbol(bt->func, (unsigned long)fn); preempt_disable(); event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, sizeof(*entry), 0, 0); if (!event) goto out; entry = ring_buffer_event_data(event); entry->boot_call = *bt; trace_buffer_unlock_commit(tr, event, 0, 0); out: preempt_enable(); }
/* * Send out a notify message. */ static void trace_note(struct blk_trace *bt, pid_t pid, int action, const void *data, size_t len) { struct blk_io_trace *t; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; int pc = 0; int cpu = smp_processor_id(); bool blk_tracer = blk_tracer_enabled; if (blk_tracer) { buffer = blk_tr->buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } if (!bt->rchan) return; t = relay_reserve(bt->rchan, sizeof(*t) + len); if (t) { t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->time = ktime_to_ns(ktime_get()); record_it: t->device = bt->dev; t->action = action; t->pid = pid; t->cpu = cpu; t->pdu_len = len; memcpy((void *) t + sizeof(*t), data, len); if (blk_tracer) trace_buffer_unlock_commit(buffer, event, 0, pc); } }
/* * The worker for the various blk_add_trace*() types. Fills out a * blk_io_trace structure and places it in a per-cpu subbuffer. */ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; pid_t pid; int cpu, pc = 0; bool blk_tracer = blk_tracer_enabled; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) return; what |= ddir_act[rw & WRITE]; what |= MASK_TC_BIT(rw, SYNCIO); what |= MASK_TC_BIT(rw, AHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); what |= MASK_TC_BIT(rw, FLUSH); what |= MASK_TC_BIT(rw, FUA); pid = tsk->pid; if (act_log_check(bt, what, sector, pid)) return; cpu = raw_smp_processor_id(); if (blk_tracer) { tracing_record_cmdline(current); buffer = blk_tr->buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + pdu_len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } /* * A word about the locking here - we disable interrupts to reserve * some space in the relay per-cpu buffer, to prevent an irq * from coming in and stepping on our toes. */ local_irq_save(flags); if (unlikely(tsk->btrace_seq != blktrace_seq)) trace_note_tsk(bt, tsk); t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); if (t) { sequence = per_cpu_ptr(bt->sequence, cpu); t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->sequence = ++(*sequence); t->time = ktime_to_ns(ktime_get()); record_it: /* * These two are not needed in ftrace as they are in the * generic trace_entry, filled by tracing_generic_entry_update, * but for the trace_event->bin() synthesizer benefit we do it * here too. */ t->cpu = cpu; t->pid = pid; t->sector = sector; t->bytes = bytes; t->action = what; t->device = bt->dev; t->error = error; t->pdu_len = pdu_len; if (pdu_len) memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); if (blk_tracer) { trace_buffer_unlock_commit(buffer, event, 0, pc); return; } } local_irq_restore(flags); }
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; pid_t pid; int cpu, pc = 0; bool blk_tracer = blk_tracer_enabled; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) return; what |= ddir_act[rw & WRITE]; what |= MASK_TC_BIT(rw, BARRIER); what |= MASK_TC_BIT(rw, SYNCIO); what |= MASK_TC_BIT(rw, AHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); pid = tsk->pid; if (act_log_check(bt, what, sector, pid)) return; cpu = raw_smp_processor_id(); if (blk_tracer) { tracing_record_cmdline(current); buffer = blk_tr->buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + pdu_len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } local_irq_save(flags); if (unlikely(tsk->btrace_seq != blktrace_seq)) trace_note_tsk(bt, tsk); t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); if (t) { sequence = per_cpu_ptr(bt->sequence, cpu); t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->sequence = ++(*sequence); t->time = ktime_to_ns(ktime_get()); record_it: t->cpu = cpu; t->pid = pid; t->sector = sector; t->bytes = bytes; t->action = what; t->device = bt->dev; t->error = error; t->pdu_len = pdu_len; if (pdu_len) memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); if (blk_tracer) { trace_buffer_unlock_commit(buffer, event, 0, pc); return; } } local_irq_restore(flags); }