Beispiel #1
0
static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu, resched;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	pc = preempt_count();
	resched = ftrace_preempt_disable();
	local_save_flags(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		trace_function(tr, ip, parent_ip, flags, pc);

	atomic_dec(&data->disabled);
	ftrace_preempt_enable(resched);
}
u64 notrace trace_clock_local(void)
{
	u64 clock;
	int resched;

	/*
	 * sched_clock() is an architecture implemented, fast, scalable,
	 * lockless clock. It is not guaranteed to be coherent across
	 * CPUs, nor across CPU idle events.
	 */
	resched = ftrace_preempt_disable();
	clock = sched_clock();
	ftrace_preempt_enable(resched);

	return clock;
}
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = wakeup_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int resched;
	int cpu;
	int pc;

	if (likely(!wakeup_task))
		return;

	pc = preempt_count();
	resched = ftrace_preempt_disable();

	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (unlikely(disabled != 1))
		goto out;

	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);

	if (unlikely(!wakeup_task))
		goto unlock;

	/*
	 * The task can't disappear because it needs to
	 * wake up first, and we have the wakeup_lock.
	 */
	if (task_cpu(wakeup_task) != cpu)
		goto unlock;

	trace_function(tr, ip, parent_ip, flags, pc);

 unlock:
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);

 out:
	atomic_dec(&data->disabled);

	ftrace_preempt_enable(resched);
}
Beispiel #4
0
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
{
    struct trace_array *tr = wakeup_trace;
    struct trace_array_cpu *data;
    unsigned long flags;
    long disabled;
    int resched;
    int cpu;
    int pc;

    if (likely(!wakeup_task))
        return;

    pc = preempt_count();
    resched = ftrace_preempt_disable();

    cpu = raw_smp_processor_id();
    if (cpu != wakeup_current_cpu)
        goto out_enable;

    data = tr->data[cpu];
    disabled = atomic_inc_return(&data->disabled);
    if (unlikely(disabled != 1))
        goto out;

    local_irq_save(flags);

    trace_function(tr, ip, parent_ip, flags, pc);

    local_irq_restore(flags);

out:
    atomic_dec(&data->disabled);
out_enable:
    ftrace_preempt_enable(resched);
}