static void probe_mt65xx_mon_tracepoint(void *ignore, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; unsigned long flags; int cpu; int pc; if (unlikely(!mt65xx_mon_ref)) return; if (!mt65xx_mon_enabled || mt65xx_mon_stopped) return; if (prev) tracing_record_cmdline(prev); if (next) tracing_record_cmdline(next); tracing_record_cmdline(current); pc = preempt_count(); /* local_irq_save(flags); */ spin_lock_irqsave(&mt65xx_mon_spinlock, flags); cpu = raw_smp_processor_id(); #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) data = mt65xx_mon_trace->data[cpu]; #else data = per_cpu_ptr(mt65xx_mon_trace->trace_buffer.data, cpu); #endif if (likely(!atomic_read(&data->disabled))) tracing_mt65xx_mon_function(mt65xx_mon_trace, prev, next, flags, pc); spin_unlock_irqrestore(&mt65xx_mon_spinlock, flags); /* local_irq_restore(flags); */ }
static void probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; unsigned long flags; int cpu; int pc; if (unlikely(!sched_ref)) return; tracing_record_cmdline(prev); tracing_record_cmdline(next); if (!tracer_enabled || sched_stopped) return; pc = preempt_count(); local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; if (likely(!atomic_read(&data->disabled))) tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); local_irq_restore(flags); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if ((wakeup_rt && !rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_current_cpu = wakeup_cpu; wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
static void probe_mt65xx_mon_tracepoint(void *ignore, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; unsigned long flags; int cpu; int pc; if (unlikely(!mt65xx_mon_ref)) return; if (!mt65xx_mon_enabled || mt65xx_mon_stopped) return; if (prev) tracing_record_cmdline(prev); if (next) tracing_record_cmdline(next); tracing_record_cmdline(current); pc = preempt_count(); //local_irq_save(flags); spin_lock_irqsave(&mt65xx_mon_spinlock, flags); cpu = raw_smp_processor_id(); data = mt65xx_mon_trace->data[cpu]; if (likely(!atomic_read(&data->disabled))) tracing_mt65xx_mon_function(mt65xx_mon_trace, prev, next, flags, pc); spin_unlock_irqrestore(&mt65xx_mon_spinlock, flags); //local_irq_restore(flags); }
static void probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) { if (unlikely(!sched_ref)) return; tracing_record_cmdline(prev); tracing_record_cmdline(next); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if (likely(!rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
static void probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) { struct trace_array_cpu *data; unsigned long flags; int cpu, pc; if (unlikely(!sched_ref)) return; tracing_record_cmdline(current); if (!tracer_enabled || sched_stopped) return; pc = preempt_count(); local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; if (likely(!atomic_read(&data->disabled))) tracing_sched_wakeup_trace(ctx_trace, wakee, current, flags, pc); local_irq_restore(flags); }
static void probe_sched_wakeup(void *ignore, struct task_struct *wakee) { if (unlikely(!sched_ref)) return; tracing_record_cmdline(current); }
static void probe_mt65xx_mon_manual_tracepoint(void *ignore, unsigned int manual_start) { struct trace_array_cpu *data; unsigned long flags; int cpu; int pc; if (unlikely(!mt65xx_mon_ref)) return; if (!mt65xx_mon_enabled || mt65xx_mon_stopped) return; if((manual_start != 0) && (manual_start != 1)) return; tracing_record_cmdline(current); if(manual_start == is_manual_start) //if already started or stopped return; if(manual_start == 1) //for START operation, only enable mt65xx monitor { mtk_mon->enable(); return; } else //for STOP operation. log monitor data into buffer { pc = preempt_count(); local_irq_save(flags); cpu = raw_smp_processor_id(); #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) data = mt65xx_mon_trace->data[cpu]; #else data = per_cpu_ptr(mt65xx_mon_trace->trace_buffer.data, cpu); #endif if (likely(!atomic_read(&data->disabled))) tracing_mt65xx_mon_manual_stop(mt65xx_mon_trace, flags, pc); local_irq_restore(flags); } }
static void probe_mt65xx_mon_manual_tracepoint(void *ignore, unsigned int manual_start) { struct trace_array_cpu *data; unsigned long flags; int cpu; int pc; if (unlikely(!mt65xx_mon_ref)) return; if (!mt65xx_mon_enabled || mt65xx_mon_stopped) return; if ((manual_start != 0) && (manual_start != 1)) return; tracing_record_cmdline(current); if (manual_start == is_manual_start)/* if already started or stopped */ return; if (manual_start == 1) { //for START operation, only enable mt65xx monitor mtk_mon->enable(); return; } else { //for STOP operation. log monitor data into buffer pc = preempt_count(); local_irq_save(flags); cpu = raw_smp_processor_id(); data = mt65xx_mon_trace->data[cpu]; if (likely(!atomic_read(&data->disabled))) tracing_mt65xx_mon_manual_stop(mt65xx_mon_trace, flags, pc); local_irq_restore(flags); } }
static void notrace probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; int pc; tracing_record_cmdline(prev); if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; pc = preempt_count(); /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (likely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; if (!report_latency(delta)) goto out_unlock; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); } out_unlock: __wakeup_reset(wakeup_trace); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
/* * The worker for the various blk_add_trace*() types. Fills out a * blk_io_trace structure and places it in a per-cpu subbuffer. */ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; pid_t pid; int cpu, pc = 0; bool blk_tracer = blk_tracer_enabled; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) return; what |= ddir_act[rw & WRITE]; what |= MASK_TC_BIT(rw, SYNCIO); what |= MASK_TC_BIT(rw, AHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); what |= MASK_TC_BIT(rw, FLUSH); what |= MASK_TC_BIT(rw, FUA); pid = tsk->pid; if (act_log_check(bt, what, sector, pid)) return; cpu = raw_smp_processor_id(); if (blk_tracer) { tracing_record_cmdline(current); buffer = blk_tr->buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + pdu_len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } /* * A word about the locking here - we disable interrupts to reserve * some space in the relay per-cpu buffer, to prevent an irq * from coming in and stepping on our toes. */ local_irq_save(flags); if (unlikely(tsk->btrace_seq != blktrace_seq)) trace_note_tsk(bt, tsk); t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); if (t) { sequence = per_cpu_ptr(bt->sequence, cpu); t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->sequence = ++(*sequence); t->time = ktime_to_ns(ktime_get()); record_it: /* * These two are not needed in ftrace as they are in the * generic trace_entry, filled by tracing_generic_entry_update, * but for the trace_event->bin() synthesizer benefit we do it * here too. */ t->cpu = cpu; t->pid = pid; t->sector = sector; t->bytes = bytes; t->action = what; t->device = bt->dev; t->error = error; t->pdu_len = pdu_len; if (pdu_len) memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); if (blk_tracer) { trace_buffer_unlock_commit(buffer, event, 0, pc); return; } } local_irq_restore(flags); }
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; pid_t pid; int cpu, pc = 0; bool blk_tracer = blk_tracer_enabled; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) return; what |= ddir_act[rw & WRITE]; what |= MASK_TC_BIT(rw, BARRIER); what |= MASK_TC_BIT(rw, SYNCIO); what |= MASK_TC_BIT(rw, AHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); pid = tsk->pid; if (act_log_check(bt, what, sector, pid)) return; cpu = raw_smp_processor_id(); if (blk_tracer) { tracing_record_cmdline(current); buffer = blk_tr->buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + pdu_len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } local_irq_save(flags); if (unlikely(tsk->btrace_seq != blktrace_seq)) trace_note_tsk(bt, tsk); t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); if (t) { sequence = per_cpu_ptr(bt->sequence, cpu); t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->sequence = ++(*sequence); t->time = ktime_to_ns(ktime_get()); record_it: t->cpu = cpu; t->pid = pid; t->sector = sector; t->bytes = bytes; t->action = what; t->device = bt->dev; t->error = error; t->pdu_len = pdu_len; if (pdu_len) memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); if (blk_tracer) { trace_buffer_unlock_commit(buffer, event, 0, pc); return; } } local_irq_restore(flags); }