Ejemplo n.º 1
0
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
{
	struct trace_array *tr = data;
	struct syscall_trace_exit *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	unsigned long irq_flags;
	int pc;
	int syscall_nr;

	syscall_nr = trace_get_syscall_nr(current, regs);
	if (syscall_nr < 0)
		return;
	if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	local_save_flags(irq_flags);
	pc = preempt_count();

	buffer = tr->trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer,
			sys_data->exit_event->event.type, sizeof(*entry),
			irq_flags, pc);
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	entry->ret = syscall_get_return_value(current, regs);

	if (!filter_current_check_discard(buffer, sys_data->exit_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event,
						   irq_flags, pc);
}
Ejemplo n.º 2
0
static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
{
	struct ring_buffer_event *event;
	struct trace_entry *entry;

	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
		entry = ring_buffer_event_data(event);

		if (!trace_valid_entry(entry)) {
			printk(KERN_CONT ".. invalid entry %d ",
				entry->type);
			goto failed;
		}
	}
	return 0;

 failed:
	/* disable tracing */
	tracing_disabled = 1;
	printk(KERN_CONT ".. corrupted trace buffer .. ");
	return -1;
}
Ejemplo n.º 3
0
void
tracing_mt65xx_mon_function(struct trace_array *tr,
                           struct task_struct *prev,
                           struct task_struct *next,
                           unsigned long flags, int pc)
{
#if 0
    struct ftrace_event_call *call = &event_mt65xx_mon;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
    struct ring_buffer *buffer = tr->buffer;
#else
    struct ring_buffer *buffer = tr->trace_buffer.buffer;
#endif
    struct ring_buffer_event *event;
    struct mt65xx_mon_entry *entry;
    unsigned int idx=0;

    event = trace_buffer_lock_reserve(buffer, TRACE_MT65XX_MON_TYPE,
                      sizeof(*entry), flags, pc);
    if (!event)
        return;

    entry = ring_buffer_event_data(event);
	


	mtk_mon->disable();
    entry->log = idx = mtk_mon->mon_log((void *)&entry->field);
    entry->cpu = raw_smp_processor_id();
    mtk_mon->enable();
	

	
#if 0
    if (!filter_check_discard(call, entry, buffer, event))
#endif
        trace_buffer_unlock_commit(buffer, event, flags, pc);
}
Ejemplo n.º 4
0
void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
{
	struct ring_buffer_event *event;
	struct trace_boot_ret *entry;
	struct trace_array *tr = boot_trace;

	if (!tr || !pre_initcalls_finished)
		return;

	sprint_symbol(bt->func, (unsigned long)fn);
	preempt_disable();

	event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
					  sizeof(*entry), 0, 0);
	if (!event)
		goto out;
	entry	= ring_buffer_event_data(event);
	entry->boot_ret = *bt;
	trace_buffer_unlock_commit(tr, event, 0, 0);
 out:
	preempt_enable();
}
Ejemplo n.º 5
0
void trace_hw_branch(u64 from, u64 to)
{
	struct ftrace_event_call *call = &event_hw_branch;
	struct trace_array *tr = hw_branch_trace;
	struct ring_buffer_event *event;
	struct ring_buffer *buf;
	struct hw_branch_entry *entry;
	unsigned long irq1;
	int cpu;

	if (unlikely(!tr))
		return;

	if (unlikely(!trace_hw_branches_enabled))
		return;

	local_irq_save(irq1);
	cpu = raw_smp_processor_id();
	if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
		goto out;

	buf = tr->buffer;
	event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
					  sizeof(*entry), 0, 0);
	if (!event)
		goto out;
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(&entry->ent, 0, from);
	entry->ent.type = TRACE_HW_BRANCHES;
	entry->from = from;
	entry->to   = to;
	if (!filter_check_discard(call, entry, buf, event))
		trace_buffer_unlock_commit(buf, event, 0, 0);

 out:
	atomic_dec(&tr->data[cpu]->disabled);
	local_irq_restore(irq1);
}
Ejemplo n.º 6
0
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
	struct ring_buffer_event *e;
	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
	if (e)
		goto event;
	if (ring_buffer_swap_cpu(op_ring_buffer_read,
				 op_ring_buffer_write,
				 cpu))
		return NULL;
	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
	if (e)
		goto event;
	return NULL;

event:
	entry->event = e;
	entry->sample = ring_buffer_event_data(e);
	entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
		/ sizeof(entry->sample->data[0]);
	entry->data = entry->sample->data;
	return entry->sample;
}
Ejemplo n.º 7
0
static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
				  unsigned long call_site,
				  const void *ptr)
{
	struct trace_array *tr = kmemtrace_array;
	struct kmemtrace_free_entry *entry;
	struct ring_buffer_event *event;

	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(&entry->ent, 0, 0);

	entry->ent.type		= TRACE_KMEM_FREE;
	entry->type_id		= type_id;
	entry->call_site	= call_site;
	entry->ptr		= ptr;

	ring_buffer_unlock_commit(tr->buffer, event);

	trace_wake_up();
}
Ejemplo n.º 8
0
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
{
	struct trace_array *tr = data;
	struct syscall_trace_enter *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	int syscall_nr;
	int size;

	syscall_nr = trace_get_syscall_nr(current, regs);
	if (syscall_nr < 0)
		return;
	if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

	buffer = tr->trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer,
			sys_data->enter_event->event.type, size, 0, 0);
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);

	if (!filter_current_check_discard(buffer, sys_data->enter_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
}
Ejemplo n.º 9
0
/*
 * The worker for the various blk_add_trace*() types. Fills out a
 * blk_io_trace structure and places it in a per-cpu subbuffer.
 */
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
		     int rw, u32 what, int error, int pdu_len, void *pdu_data)
{
	struct task_struct *tsk = current;
	struct ring_buffer_event *event = NULL;
	struct ring_buffer *buffer = NULL;
	struct blk_io_trace *t;
	unsigned long flags = 0;
	unsigned long *sequence;
	pid_t pid;
	int cpu, pc = 0;
	bool blk_tracer = blk_tracer_enabled;

	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
		return;

	what |= ddir_act[rw & WRITE];
	what |= MASK_TC_BIT(rw, SYNCIO);
	what |= MASK_TC_BIT(rw, AHEAD);
	what |= MASK_TC_BIT(rw, META);
	what |= MASK_TC_BIT(rw, DISCARD);
	what |= MASK_TC_BIT(rw, FLUSH);
	what |= MASK_TC_BIT(rw, FUA);

	pid = tsk->pid;
	if (act_log_check(bt, what, sector, pid))
		return;
	cpu = raw_smp_processor_id();

	if (blk_tracer) {
		tracing_record_cmdline(current);

		buffer = blk_tr->buffer;
		pc = preempt_count();
		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
						  sizeof(*t) + pdu_len,
						  0, pc);
		if (!event)
			return;
		t = ring_buffer_event_data(event);
		goto record_it;
	}

	/*
	 * A word about the locking here - we disable interrupts to reserve
	 * some space in the relay per-cpu buffer, to prevent an irq
	 * from coming in and stepping on our toes.
	 */
	local_irq_save(flags);

	if (unlikely(tsk->btrace_seq != blktrace_seq))
		trace_note_tsk(bt, tsk);

	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
	if (t) {
		sequence = per_cpu_ptr(bt->sequence, cpu);

		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
		t->sequence = ++(*sequence);
		t->time = ktime_to_ns(ktime_get());
record_it:
		/*
		 * These two are not needed in ftrace as they are in the
		 * generic trace_entry, filled by tracing_generic_entry_update,
		 * but for the trace_event->bin() synthesizer benefit we do it
		 * here too.
		 */
		t->cpu = cpu;
		t->pid = pid;

		t->sector = sector;
		t->bytes = bytes;
		t->action = what;
		t->device = bt->dev;
		t->error = error;
		t->pdu_len = pdu_len;

		if (pdu_len)
			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);

		if (blk_tracer) {
			trace_buffer_unlock_commit(buffer, event, 0, pc);
			return;
		}
	}

	local_irq_restore(flags);
}
Ejemplo n.º 10
0
static void
probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
	struct trace_event_call *call = &event_branch;
	struct trace_array *tr = branch_tracer;
	struct trace_array_cpu *data;
	struct ring_buffer_event *event;
	struct trace_branch *entry;
	struct ring_buffer *buffer;
	unsigned long flags;
	int pc;
	const char *p;

	if (current->trace_recursion & TRACE_BRANCH_BIT)
		return;

	/*
	 * I would love to save just the ftrace_likely_data pointer, but
	 * this code can also be used by modules. Ugly things can happen
	 * if the module is unloaded, and then we go and read the
	 * pointer.  This is slower, but much safer.
	 */

	if (unlikely(!tr))
		return;

	raw_local_irq_save(flags);
	current->trace_recursion |= TRACE_BRANCH_BIT;
	data = this_cpu_ptr(tr->trace_buffer.data);
	if (atomic_read(&data->disabled))
		goto out;

	pc = preempt_count();
	buffer = tr->trace_buffer.buffer;
	event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
					  sizeof(*entry), flags, pc);
	if (!event)
		goto out;

	entry	= ring_buffer_event_data(event);

	/* Strip off the path, only save the file */
	p = f->data.file + strlen(f->data.file);
	while (p >= f->data.file && *p != '/')
		p--;
	p++;

	strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE);
	strncpy(entry->file, p, TRACE_FILE_SIZE);
	entry->func[TRACE_FUNC_SIZE] = 0;
	entry->file[TRACE_FILE_SIZE] = 0;
	entry->constant = f->constant;
	entry->line = f->data.line;
	entry->correct = val == expect;

	if (!call_filter_check_discard(call, entry, buffer, event))
		trace_buffer_unlock_commit_nostack(buffer, event);

 out:
	current->trace_recursion &= ~TRACE_BRANCH_BIT;
	raw_local_irq_restore(flags);
}
Ejemplo n.º 11
0
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
		     int rw, u32 what, int error, int pdu_len, void *pdu_data)
{
	struct task_struct *tsk = current;
	struct ring_buffer_event *event = NULL;
	struct ring_buffer *buffer = NULL;
	struct blk_io_trace *t;
	unsigned long flags = 0;
	unsigned long *sequence;
	pid_t pid;
	int cpu, pc = 0;
	bool blk_tracer = blk_tracer_enabled;

	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
		return;

	what |= ddir_act[rw & WRITE];
	what |= MASK_TC_BIT(rw, BARRIER);
	what |= MASK_TC_BIT(rw, SYNCIO);
	what |= MASK_TC_BIT(rw, AHEAD);
	what |= MASK_TC_BIT(rw, META);
	what |= MASK_TC_BIT(rw, DISCARD);

	pid = tsk->pid;
	if (act_log_check(bt, what, sector, pid))
		return;
	cpu = raw_smp_processor_id();

	if (blk_tracer) {
		tracing_record_cmdline(current);

		buffer = blk_tr->buffer;
		pc = preempt_count();
		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
						  sizeof(*t) + pdu_len,
						  0, pc);
		if (!event)
			return;
		t = ring_buffer_event_data(event);
		goto record_it;
	}

	
	local_irq_save(flags);

	if (unlikely(tsk->btrace_seq != blktrace_seq))
		trace_note_tsk(bt, tsk);

	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
	if (t) {
		sequence = per_cpu_ptr(bt->sequence, cpu);

		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
		t->sequence = ++(*sequence);
		t->time = ktime_to_ns(ktime_get());
record_it:
		
		t->cpu = cpu;
		t->pid = pid;

		t->sector = sector;
		t->bytes = bytes;
		t->action = what;
		t->device = bt->dev;
		t->error = error;
		t->pdu_len = pdu_len;

		if (pdu_len)
			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);

		if (blk_tracer) {
			trace_buffer_unlock_commit(buffer, event, 0, pc);
			return;
		}
	}

	local_irq_restore(flags);
}