Esempio n. 1
0
static ssize_t
sysprof_sample_read(struct file *filp, char __user *ubuf,
                    size_t cnt, loff_t *ppos)
{
    char buf[MAX_LONG_DIGITS];
    int r;

    r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period));

    return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
Esempio n. 2
0
static
pgm_time_t
pgm_clock_update (void)
{
	struct timespec		clock_now;
	pgm_time_t		now;
	static pgm_time_t	last = 0;

	clock_gettime (CLOCK_MONOTONIC, &clock_now);
	now = secs_to_usecs (clock_now.tv_sec) + nsecs_to_usecs (clock_now.tv_nsec);
	if (PGM_UNLIKELY(now < last))
		return last;
	else
		return last = now;
}
Esempio n. 3
0
static void notrace
probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
	struct task_struct *next)
{
	unsigned long latency = 0, t0 = 0, t1 = 0;
	struct trace_array_cpu *data;
	cycle_t T0, T1, delta;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	tracing_record_cmdline(prev);

	if (unlikely(!tracer_enabled))
		return;

	/*
	 * When we start a new trace, we set wakeup_task to NULL
	 * and then set tracer_enabled = 1. We want to make sure
	 * that another CPU does not see the tracer_enabled = 1
	 * and the wakeup_task with an older task, that might
	 * actually be the same as next.
	 */
	smp_rmb();

	if (next != wakeup_task)
		return;

	pc = preempt_count();

	/* disable local data, not wakeup_cpu data */
	cpu = raw_smp_processor_id();
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
	if (likely(disabled != 1))
		goto out;

	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);

	/* We could race with grabbing wakeup_lock */
	if (unlikely(!tracer_enabled || next != wakeup_task))
		goto out_unlock;

	/* The task we are waiting for is waking up */
	data = wakeup_trace->data[wakeup_cpu];

	trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
	tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);

	/*
	 * usecs conversion is slow so we try to delay the conversion
	 * as long as possible:
	 */
	T0 = data->preempt_timestamp;
	T1 = ftrace_now(cpu);
	delta = T1-T0;

	if (!report_latency(delta))
		goto out_unlock;

	latency = nsecs_to_usecs(delta);

	tracing_max_latency = delta;
	t0 = nsecs_to_usecs(T0);
	t1 = nsecs_to_usecs(T1);

	update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);

out_unlock:
	__wakeup_reset(wakeup_trace);
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);
out:
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
}