Exemplo n.º 1
0
Arquivo: main.c Projeto: janfj/dd-wrt
static void __init do_initcalls(void)
{
	initcall_t *call;
	int count = preempt_count();

	for (call = __initcall_start; call < __initcall_end; call++) {
		ktime_t t0, t1, delta;
		char *msg = NULL;
		char msgbuf[40];
		int result;
		wbd222_wdt_touch();

		if (initcall_debug) {
			printk("Calling initcall 0x%p", *call);
			print_fn_descriptor_symbol(": %s()",
					(unsigned long) *call);
			printk("\n");
			t0 = ktime_get();
		}

		result = (*call)();

		if (initcall_debug) {
			t1 = ktime_get();
			delta = ktime_sub(t1, t0);

			printk("initcall 0x%p", *call);
			print_fn_descriptor_symbol(": %s()",
					(unsigned long) *call);
			printk(" returned %d.\n", result);

			printk("initcall 0x%p ran for %Ld msecs: ",
				*call, (unsigned long long)delta.tv64 >> 20);
			print_fn_descriptor_symbol("%s()\n",
				(unsigned long) *call);
		}

		if (result && result != -ENODEV && initcall_debug) {
			sprintf(msgbuf, "error code %d", result);
			msg = msgbuf;
		}
		if (preempt_count() != count) {
			msg = "preemption imbalance";
			preempt_count() = count;
		}
		if (irqs_disabled()) {
			msg = "disabled interrupts";
			local_irq_enable();
		}
		if (msg) {
			printk(KERN_WARNING "initcall at 0x%p", *call);
			print_fn_descriptor_symbol(": %s()",
					(unsigned long) *call);
			printk(": returned with %s\n", msg);
		}
	}

	/* Make sure there is no pending stuff from the initcall sequence */
	flush_scheduled_work();
}
Exemplo n.º 2
0
static void __init do_initcalls(void)
{
    initcall_t *call;
    int count = preempt_count();

    for (call = __initcall_start; call < __initcall_end; call++) {
        char *msg;

        if (initcall_debug) {
            printk(KERN_DEBUG "Calling initcall 0x%p", *call);
            print_fn_descriptor_symbol(": %s()", (unsigned long) *call);
            printk("\n");
        }

        (*call)();

        msg = NULL;
        if (preempt_count() != count) {
            msg = "preemption imbalance";
            preempt_count() = count;
        }
        if (irqs_disabled()) {
            msg = "disabled interrupts";
            local_irq_enable();
        }
        if (msg) {
            printk(KERN_WARNING "error in initcall at 0x%p: "
                   "returned with %s\n", *call, msg);
        }
    }

    /* Make sure there is no pending stuff from the initcall sequence */
    flush_scheduled_work();
}
Exemplo n.º 3
0
/*
 * fixup_init is called when:
 * - an active object is initialized
 */
static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
{
	struct rcu_head *head = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		/*
		 * Ensure that queued callbacks are all executed.
		 * If we detect that we are nested in a RCU read-side critical
		 * section, we should simply fail, otherwise we would deadlock.
<<<<<<< HEAD
		 * In !PREEMPT configurations, there is no way to tell if we are
		 * in a RCU read-side critical section or not, so we never
		 * attempt any fixup and just print a warning.
		 */
#ifndef CONFIG_PREEMPT
		WARN_ON_ONCE(1);
		return 0;
#endif
		if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
		    irqs_disabled()) {
			WARN_ON_ONCE(1);
=======
		 */
		if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
		    irqs_disabled()) {
			WARN_ON(1);
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
			return 0;
Exemplo n.º 4
0
static void __init _do_initcalls(int count,initcall_t* strt,initcall_t* end)
{
 initcall_t *call;
 for(call=strt;call<end;call++)
 {
  char *msg;
  if (initcall_debug)
  {
   printk(KERN_DEBUG "Calling initcall 0x%p", *call);
   print_symbol(": %s()", (unsigned long) *call);
   printk("\n");
  }
  (*call)();
  msg=NULL;
  if(preempt_count()!=count)
  {
   msg="preemption imbalance";
   preempt_count()=count;
  }
  if(irqs_disabled())
  {
   msg="disabled interrupts";
   local_irq_enable();
  }
  if(msg)
  {
   printk("error in initcall at 0x%p: "
          "returned with %s\n",*call, msg);
  }
 }
}
Exemplo n.º 5
0
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
{
	unsigned long saved_preempt_count = preempt_count();
	int expected_failure = 0;
#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES)
        int saved_lock_count = current->lock_count;
#endif

	WARN_ON(irqs_disabled());

	testcase_fn();
	/*
	 * Filter out expected failures:
	 */
#ifndef CONFIG_PROVE_LOCKING
	if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected)
		expected_failure = 1;
	if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected)
		expected_failure = 1;
	if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected)
		expected_failure = 1;
	if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
		expected_failure = 1;
#endif
	if (debug_locks != expected) {
		if (expected_failure) {
			expected_testcase_failures++;
			printk("failed|");
		} else {
			unexpected_testcase_failures++;

			printk("FAILED|");
			dump_stack();
		}
	} else {
		testcase_successes++;
		printk("  ok  |");
	}
	testcase_total++;

	if (debug_locks_verbose)
		printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
			lockclass_mask, debug_locks, expected);
	/*
	 * Some tests (e.g. double-unlock) might corrupt the preemption
	 * count, so restore it:
	 */
	preempt_count() = saved_preempt_count;
#ifdef CONFIG_TRACE_IRQFLAGS
	if (softirq_count())
		current->softirqs_enabled = 0;
	else
		current->softirqs_enabled = 1;
#endif

	reset_locks();
#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES)
        current->lock_count = saved_lock_count;
#endif
}
Exemplo n.º 6
0
static void __init do_one_initcall(initcall_t fn)
{
	int count = preempt_count();
	int result;

	if (initcall_debug)
		result = do_one_initcall_debug(fn);
	else
		result = fn();

	msgbuf[0] = 0;

	if (result && result != -ENODEV && initcall_debug)
		sprintf(msgbuf, "error code %d ", result);

	if (preempt_count() != count) {
		strncat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
		preempt_count() = count;
	}
	if (irqs_disabled()) {
		strncat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
		local_irq_enable();
	}
	if (msgbuf[0]) {
		print_fn_descriptor_symbol(KERN_WARNING "initcall %s", fn);
		printk(" returned with %s\n", msgbuf);
	}
}
Exemplo n.º 7
0
static void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
	/*
	 * The preempt tracer hooks into add_preempt_count and will break
	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
	 * is set and before current->softirq_enabled is cleared.
	 * We must manually increment preempt_count here and manually
	 * call the trace_preempt_off later.
	 */
	preempt_count() += cnt;
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == cnt)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);

	if (preempt_count() == cnt)
		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
Exemplo n.º 8
0
static int test_init(void)
{
	int ret;
	int cpu;

	printk(KERN_INFO "[init] Can you feel me?\n");

	ret = preempt_count();
	pr_info("ret = %d\n", ret);

	preempt_disable();

	ret = preempt_count();
	pr_info("ret = %d\n", ret);

	preempt_enable();

	ret = preempt_count();
	pr_info("ret = %d\n", ret);

	cpu = get_cpu();
	pr_info("cpu = %d\n", cpu);
	put_cpu();

	return 0;
}
Exemplo n.º 9
0
static void __init do_initcalls(void)
{
    initcall_t *call;
    int count = preempt_count();

    for (call = __initcall_start; call < __initcall_end; call++) {
        char *msg = NULL;
        char msgbuf[40];
        int result;

        if (initcall_debug) {
            printk("Calling initcall 0x%p", *call);
            print_fn_descriptor_symbol(": %s()",
                                       (unsigned long) *call);
            printk("\n");
        }

        result = (*call)();

        if (result && result != -ENODEV && initcall_debug) {
            sprintf(msgbuf, "error code %d", result);
            msg = msgbuf;
        }
        if (preempt_count() != count) {
            msg = "preemption imbalance";
            preempt_count() = count;
        }
        if (irqs_disabled()) {
            msg = "disabled interrupts";
            local_irq_enable();
        }
#ifdef CONFIG_PREEMPT_RT
        if (irqs_disabled()) {
            msg = "disabled hard interrupts";
            local_irq_enable();
        }
#endif
        if (msg) {
            printk(KERN_WARNING "initcall at 0x%p", *call);
            print_fn_descriptor_symbol(": %s()",
                                       (unsigned long) *call);
            printk(": returned with %s\n", msg);
        }
    }

    /* Make sure there is no pending stuff from the initcall sequence */
    flush_scheduled_work();
}
Exemplo n.º 10
0
/*
 * fixup_free is called when:
 * - an active object is freed
 */
static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
{
	struct rcu_head *head = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		/*
		 * Ensure that queued callbacks are all executed.
		 * If we detect that we are nested in a RCU read-side critical
		 * section, we should simply fail, otherwise we would deadlock.
		 * In !PREEMPT configurations, there is no way to tell if we are
		 * in a RCU read-side critical section or not, so we never
		 * attempt any fixup and just print a warning.
		 */
#ifndef CONFIG_PREEMPT
		WARN_ON_ONCE(1);
		return 0;
#endif
		if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
		    irqs_disabled()) {
			WARN_ON_ONCE(1);
			return 0;
		}
		rcu_barrier();
		rcu_barrier_sched();
		rcu_barrier_bh();
		debug_object_free(head, &rcuhead_debug_descr);
		return 1;
	default:
		return 0;
	}
}
Exemplo n.º 11
0
static void
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
{
	struct trace_array_cpu *data;
	unsigned long flags;
	int cpu, pc;

	if (unlikely(!sched_ref))
		return;

	tracing_record_cmdline(current);

	if (!tracer_enabled || sched_stopped)
		return;

	pc = preempt_count();
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = ctx_trace->data[cpu];

	if (likely(!atomic_read(&data->disabled)))
		tracing_sched_wakeup_trace(ctx_trace, wakee, current,
					   flags, pc);

	local_irq_restore(flags);
}
Exemplo n.º 12
0
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	/*
	 * Does not matter if we preempt. We test the flags
	 * afterward, to see if irqs are disabled or not.
	 * If we preempt and get a false positive, the flags
	 * test will fail.
	 */
	cpu = raw_smp_processor_id();
	if (likely(!per_cpu(tracing_cpu, cpu)))
		return;

	local_save_flags(flags);
	/* slight chance to get a false positive on tracing_cpu */
	if (!irqs_disabled_flags(flags))
		return;

	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		trace_function(tr, ip, parent_ip, flags, preempt_count());

	atomic_dec(&data->disabled);
}
Exemplo n.º 13
0
static void inline procfile_context_print(void)
{
    printk_d("preemptible 0x%x\n", preemptible());
    printk_d("in_atomic_preempt_off 0x%x\n", in_atomic_preempt_off());
    printk_d("in_atomic 0x%x\n", in_atomic());
    printk_d("in_nmi 0x%lx\n", in_nmi());
    printk_d("in_serving_softirq 0x%lx\n", in_serving_softirq());
    printk_d("in_interrupt 0x%lx\n", in_interrupt());
    printk_d("in_softirq 0x%lx\n", in_softirq());
    printk_d("in_irq 0x%lx\n", in_irq());
    printk_d("preempt_count 0x%x\n", preempt_count());
    printk_d("irqs_disabled 0x%x\n", irqs_disabled());
    if(current) {
        printk_d("task->comm %s\n", current->comm);
        printk_d("task->flags 0x%x\n", current->flags);
        printk_d("task->state %lu\n", current->state);
        printk_d("task->usage %d\n", atomic_read(&(current->usage)));
        printk_d("task->prio %d\n", current->prio);
        printk_d("task->static_prio %d\n", current->static_prio);
        printk_d("task->normal_prio %d\n", current->normal_prio);
        printk_d("task->rt_priority %d\n", current->rt_priority);
        printk_d("task->policy %d\n", current->policy);
        printk_d("task->pid %d\n", current->pid);
        printk_d("task->tgid %d\n", current->tgid);
    }
    else
        printk_d("task pointer NULL\n");
}
Exemplo n.º 14
0
/* leave me here to see if it catches a weird crash */
static void ramster_check_irq_counts(void)
{
	static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
	int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;

	cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
	if (cur_hardirq_cnt > last_hardirq_cnt) {
		last_hardirq_cnt = cur_hardirq_cnt;
		if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
			pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
				last_hardirq_cnt);
	}
	cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
	if (cur_softirq_cnt > last_softirq_cnt) {
		last_softirq_cnt = cur_softirq_cnt;
		if (!(last_softirq_cnt&(last_softirq_cnt-1)))
			pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
				last_softirq_cnt);
	}
	cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
	if (cur_preempt_cnt > last_preempt_cnt) {
		last_preempt_cnt = cur_preempt_cnt;
		if (!(last_preempt_cnt&(last_preempt_cnt-1)))
			pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
				last_preempt_cnt);
	}
}
Exemplo n.º 15
0
static inline void
stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{
	int cpu;
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;

	cpu = raw_smp_processor_id();
	/* Always clear the tracing cpu on stopping the trace */
	if (unlikely(per_cpu(tracing_cpu, cpu)))
		per_cpu(tracing_cpu, cpu) = 0;
	else
		return;

	if (!tracer_enabled)
		return;

	data = tr->data[cpu];

	if (unlikely(!data) ||
	    !data->critical_start || atomic_read(&data->disabled))
		return;

	atomic_inc(&data->disabled);

	local_save_flags(flags);
	trace_function(tr, ip, parent_ip, flags, preempt_count());
	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
	data->critical_start = 0;
	atomic_dec(&data->disabled);
}
Exemplo n.º 16
0
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
		pc = preempt_count();
		trace_function(tr, ip, parent_ip, flags, pc);
		
		__trace_stack(tr, flags, 5, pc);
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}
Exemplo n.º 17
0
/* Kretprobe handler */
static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
					  struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
	struct kretprobe_trace_entry *entry;
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	int size, i, pc;
	unsigned long irq_flags;
	struct ftrace_event_call *call = &tp->call;

	local_save_flags(irq_flags);
	pc = preempt_count();

	size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);

	event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
						  irq_flags, pc);
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nargs = tp->nr_args;
	entry->func = (unsigned long)tp->rp.kp.addr;
	entry->ret_ip = (unsigned long)ri->ret_addr;
	for (i = 0; i < tp->nr_args; i++)
		entry->args[i] = call_fetch(&tp->args[i].fetch, regs);

	if (!filter_current_check_discard(buffer, call, entry, event))
		trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
}
Exemplo n.º 18
0
static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	pc = preempt_count();
	preempt_disable_notrace();
	local_save_flags(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		trace_function(tr, ip, parent_ip, flags, pc);

	atomic_dec(&data->disabled);
	preempt_enable_notrace();
}
static void
probe_mt65xx_mon_tracepoint(void *ignore, struct task_struct *prev, struct task_struct *next)
{
	struct trace_array_cpu *data;
	unsigned long flags;
	int cpu;
	int pc;

	if (unlikely(!mt65xx_mon_ref))
		return;

	if (!mt65xx_mon_enabled || mt65xx_mon_stopped)
		return;

	if (prev)
		tracing_record_cmdline(prev);
	if (next)
		tracing_record_cmdline(next);
	tracing_record_cmdline(current);

	pc = preempt_count();
	//local_irq_save(flags);
	spin_lock_irqsave(&mt65xx_mon_spinlock, flags);
	cpu = raw_smp_processor_id();
	data = mt65xx_mon_trace->data[cpu];

	if (likely(!atomic_read(&data->disabled)))
		tracing_mt65xx_mon_function(mt65xx_mon_trace, prev, next, flags, pc);
	spin_unlock_irqrestore(&mt65xx_mon_spinlock, flags);
	//local_irq_restore(flags);
}
Exemplo n.º 20
0
/*
 * fixup_free is called when:
 * - an active object is freed
 */
static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
{
	struct rcu_head *head = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		/*
		 * Ensure that queued callbacks are all executed.
		 * If we detect that we are nested in a RCU read-side critical
		 * section, we should simply fail, otherwise we would deadlock.
		 */
#ifndef CONFIG_PREEMPT
		WARN_ON(1);
		return 0;
#else
		if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
		    irqs_disabled()) {
			WARN_ON(1);
			return 0;
		}
		rcu_barrier();
		rcu_barrier_sched();
		rcu_barrier_bh();
		debug_object_free(head, &rcuhead_debug_descr);
		return 1;
#endif
	default:
		return 0;
	}
}
Exemplo n.º 21
0
/*
 * fixup_activate is called when:
 * - an active object is activated
 * - an unknown object is activated (might be a statically initialized object)
 * Activation is performed internally by call_rcu().
 */
static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
{
	struct rcu_head *head = addr;

	switch (state) {

	case ODEBUG_STATE_NOTAVAILABLE:
		/*
		 * This is not really a fixup. We just make sure that it is
		 * tracked in the object tracker.
		 */
		debug_object_init(head, &rcuhead_debug_descr);
		debug_object_activate(head, &rcuhead_debug_descr);
		return 0;

	case ODEBUG_STATE_ACTIVE:
		/*
		 * Ensure that queued callbacks are all executed.
		 * If we detect that we are nested in a RCU read-side critical
		 * section, we should simply fail, otherwise we would deadlock.
		 */
		if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
		    irqs_disabled()) {
			WARN_ON(1);
			return 0;
		}
		rcu_barrier();
		rcu_barrier_sched();
		rcu_barrier_bh();
		debug_object_activate(head, &rcuhead_debug_descr);
		return 1;
	default:
		return 0;
	}
}
Exemplo n.º 22
0
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
    struct trace_array_cpu *data;
    int cpu = smp_processor_id();
    unsigned long flags;
    long disabled;
    int pc;

    if (likely(!tracer_enabled))
        return;

    tracing_record_cmdline(p);
    tracing_record_cmdline(current);

    if ((wakeup_rt && !rt_task(p)) ||
            p->prio >= wakeup_prio ||
            p->prio >= current->prio)
        return;

    pc = preempt_count();
    disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
    if (unlikely(disabled != 1))
        goto out;

    /* interrupts should be off from try_to_wake_up */
    __raw_spin_lock(&wakeup_lock);

    /* check for races. */
    if (!tracer_enabled || p->prio >= wakeup_prio)
        goto out_locked;

    /* reset the trace */
    __wakeup_reset(wakeup_trace);

    wakeup_cpu = task_cpu(p);
    wakeup_current_cpu = wakeup_cpu;
    wakeup_prio = p->prio;

    wakeup_task = p;
    get_task_struct(wakeup_task);

    local_save_flags(flags);

    data = wakeup_trace->data[wakeup_cpu];
    data->preempt_timestamp = ftrace_now(cpu);
    tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);

    /*
     * We must be careful in using CALLER_ADDR2. But since wake_up
     * is not called by an assembly function  (where as schedule is)
     * it should be safe to use it here.
     */
    trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);

out_locked:
    __raw_spin_unlock(&wakeup_lock);
out:
    atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Exemplo n.º 23
0
static void
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
{
	struct trace_array_cpu *data;
	unsigned long flags;
	int cpu;
	int pc;

	if (unlikely(!sched_ref))
		return;

	tracing_record_cmdline(prev);
	tracing_record_cmdline(next);

	if (!tracer_enabled || sched_stopped)
		return;

	pc = preempt_count();
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = ctx_trace->data[cpu];

	if (likely(!atomic_read(&data->disabled)))
		tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);

	local_irq_restore(flags);
}
Exemplo n.º 24
0
static void
probe_mt65xx_mon_tracepoint(void *ignore, struct task_struct *prev, struct task_struct *next)
{
    struct trace_array_cpu *data;
    unsigned long flags;
    int cpu;
    int pc;

    if (unlikely(!mt65xx_mon_ref))
        return;

    if (!mt65xx_mon_enabled || mt65xx_mon_stopped)
        return;

    if (prev)
        tracing_record_cmdline(prev);
    if (next)
        tracing_record_cmdline(next);
    tracing_record_cmdline(current);

    pc = preempt_count();
    /* local_irq_save(flags); */
    spin_lock_irqsave(&mt65xx_mon_spinlock, flags);
    cpu = raw_smp_processor_id();
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
    data = mt65xx_mon_trace->data[cpu];
#else
    data = per_cpu_ptr(mt65xx_mon_trace->trace_buffer.data, cpu);
#endif

    if (likely(!atomic_read(&data->disabled)))
        tracing_mt65xx_mon_function(mt65xx_mon_trace, prev, next, flags, pc);
    spin_unlock_irqrestore(&mt65xx_mon_spinlock, flags);
    /* local_irq_restore(flags); */
}
Exemplo n.º 25
0
/*
 * Do the printing; return non-zero if the task should be rescheduled.
 */
static int jiq_print(void *ptr)
{
	struct clientdata *data = ptr;
	int len = data->len;
	char *buf = data->buf;
	unsigned long j = jiffies;

	if (len > LIMIT) { 
		wake_up_interruptible(&jiq_wait);
		return 0;
	}

	if (len == 0)
		len = sprintf(buf,"    time  delta preempt   pid cpu command\n");
	else
		len =0;

  	/* intr_count is only exported since 1.3.5, but 1.99.4 is needed anyways */
	len += sprintf(buf+len, "%9li  %4li     %3i %5i %3i %s\n",
			j, j - data->jiffies,
			preempt_count(), current->pid, smp_processor_id(),
			current->comm);

	data->len += len;
	data->buf += len;
	data->jiffies = j;
	return 1;
}
Exemplo n.º 26
0
static inline void
start_critical_timing(unsigned long ip, unsigned long parent_ip)
{
	int cpu;
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;

	if (likely(!tracer_enabled))
		return;

	cpu = raw_smp_processor_id();

	if (per_cpu(tracing_cpu, cpu))
		return;

	data = tr->data[cpu];

	if (unlikely(!data) || atomic_read(&data->disabled))
		return;

	atomic_inc(&data->disabled);

	data->critical_sequence = max_sequence;
	data->preempt_timestamp = ftrace_now(cpu);
	data->critical_start = parent_ip ? : ip;

	local_save_flags(flags);

	trace_function(tr, ip, parent_ip, flags, preempt_count());

	per_cpu(tracing_cpu, cpu) = 1;

	atomic_dec(&data->disabled);
}
Exemplo n.º 27
0
static inline void __lock_kernel(void)
{
	preempt_disable();
	if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
		/*
		 * If preemption was disabled even before this
		 * was called, there's nothing we can be polite
		 * about - just spin.
		 */
		if (preempt_count() > 1) {
			_raw_spin_lock(&kernel_flag);
			return;
		}

		/*
		 * Otherwise, let's wait for the kernel lock
		 * with preemption enabled..
		 */
		do {
			preempt_enable();
			while (spin_is_locked(&kernel_flag))
				cpu_relax();
			preempt_disable();
		} while (!_raw_spin_trylock(&kernel_flag));
	}
}
Exemplo n.º 28
0
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
	/*
	 * The preempt tracer hooks into preempt_count_add and will break
	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
	 * is set and before current->softirq_enabled is cleared.
	 * We must manually increment preempt_count here and manually
	 * call the trace_preempt_off later.
	 */
	__preempt_count_add(cnt);
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == (cnt & SOFTIRQ_MASK))
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);

	if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT
		current->preempt_disable_ip = get_lock_parent_ip();
#endif
		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
	}
}
Exemplo n.º 29
0
static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	/*
	 * Need to use raw, since this must be called before the
	 * recursive protection is performed.
	 */
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
		pc = preempt_count();
		trace_function(tr, ip, parent_ip, flags, pc);
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}
RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
{
#ifdef CONFIG_PREEMPT
    Assert(hThread == NIL_RTTHREAD);
# ifdef preemptible
    return preemptible();
# else
    return preempt_count() == 0 && !in_atomic() && !irqs_disabled();
# endif
#else
    int32_t c;

    Assert(hThread == NIL_RTTHREAD);
    c = g_acPreemptDisabled[smp_processor_id()];
    AssertMsg(c >= 0 && c < 32, ("%d\n", c));
    if (c != 0)
        return false;
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
    if (in_atomic())
        return false;
# endif
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 28)
    if (irqs_disabled())
        return false;
# else
    if (!ASMIntAreEnabled())
        return false;
# endif
    return true;
#endif
}