static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
		    struct ftrace_ops *op, struct pt_regs *pt_regs)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	int bit;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	pc = preempt_count();
	preempt_disable_notrace();

	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
	if (bit < 0)
		goto out;

	cpu = smp_processor_id();
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
	if (!atomic_read(&data->disabled)) {
		local_save_flags(flags);
		trace_function(tr, ip, parent_ip, flags, pc);
	}
	trace_clear_recursion(bit);

 out:
	preempt_enable_notrace();
}
示例#2
0
static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	pc = preempt_count();
	preempt_disable_notrace();
	local_save_flags(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		trace_function(tr, ip, parent_ip, flags, pc);

	atomic_dec(&data->disabled);
	preempt_enable_notrace();
}
示例#3
0
u64 notrace trace_clock_local(void)
{
	u64 clock;

	preempt_disable_notrace();
	clock = sched_clock();
	preempt_enable_notrace();

	return clock;
}
示例#4
0
文件: time.c 项目: Camedpuffer/linux
u64 xen_clocksource_read(void)
{
        struct pvclock_vcpu_time_info *src;
	u64 ret;

	preempt_disable_notrace();
	src = &__this_cpu_read(xen_vcpu)->time;
	ret = pvclock_clocksource_read(src);
	preempt_enable_notrace();
	return ret;
}
示例#5
0
cycle_t xen_clocksource_read(void)
{
        struct pvclock_vcpu_time_info *src;
	cycle_t ret;

	preempt_disable_notrace();
	src = &__get_cpu_var(xen_vcpu)->time;
	ret = pvclock_clocksource_read(src);
	preempt_enable_notrace();
	return ret;
}
void mt_sched_monitor_switch(int on)
{
    int cpu;
    preempt_disable_notrace();
    mutex_lock(&mt_sched_mon_lock);
    for_each_possible_cpu(cpu){
        printk("[mtprof] sched monitor on CPU#%d switch from %d to %d\n", cpu, per_cpu(mtsched_mon_enabled, cpu), on);
        per_cpu(mtsched_mon_enabled, cpu)= on; // 0x1 || 0x2, IRQ & Preempt
    }
    mutex_unlock(&mt_sched_mon_lock);
    preempt_enable_notrace();
}
/**
 * preempt_schedule_context - preempt_schedule called by tracing
 *
 * The tracing infrastructure uses preempt_enable_notrace to prevent
 * recursion and tracing preempt enabling caused by the tracing
 * infrastructure itself. But as tracing can happen in areas coming
 * from userspace or just about to enter userspace, a preempt enable
 * can occur before user_exit() is called. This will cause the scheduler
 * to be called when the system is still in usermode.
 *
 * To prevent this, the preempt_enable_notrace will use this function
 * instead of preempt_schedule() to exit user context if needed before
 * calling the scheduler.
 */
asmlinkage void __sched notrace preempt_schedule_context(void)
{
	enum ctx_state prev_ctx;

	if (likely(!preemptible()))
		return;

	/*
	 * Need to disable preemption in case user_exit() is traced
	 * and the tracer calls preempt_enable_notrace() causing
	 * an infinite recursion.
	 */
	preempt_disable_notrace();
	prev_ctx = exception_enter();
	preempt_enable_no_resched_notrace();

	preempt_schedule();

	preempt_disable_notrace();
	exception_exit(prev_ctx);
	preempt_enable_notrace();
}
/*
 * trace_clock_local(): the simplest and least coherent tracing clock.
 *
 * Useful for tracing that does not cross to other CPUs nor
 * does it go through idle events.
 */
u64 notrace trace_clock_local(void)
{
	u64 clock;

	/*
	 * sched_clock() is an architecture implemented, fast, scalable,
	 * lockless clock. It is not guaranteed to be coherent across
	 * CPUs, nor across CPU idle events.
	 */
	preempt_disable_notrace();
	clock = sched_clock();
	preempt_enable_notrace();

	return clock;
}
示例#9
0
notrace unsigned int debug_smp_processor_id(void)
{
	unsigned long preempt_count = preempt_count();
	int this_cpu = raw_smp_processor_id();

	if (likely(preempt_count))
		goto out;

	if (irqs_disabled())
		goto out;

	/*
	 * Kernel threads bound to a single CPU can safely use
	 * smp_processor_id():
	 */
	if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
		goto out;

	/*
	 * It is valid to assume CPU-locality during early bootup:
	 */
	if (system_state != SYSTEM_RUNNING)
		goto out;

	/*
	 * Avoid recursion:
	 */
	preempt_disable_notrace();

	if (!printk_ratelimit())
		goto out_enable;

	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
			"code: %s/%d\n",
			preempt_count() - 1, current->comm, current->pid);
	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
	dump_stack();

    #ifdef CONFIG_PANIC_ON_SMP_PROCESS_ID_BUG
    panic("Please check this smp_processor_id() used in preemptable code bug! after fix it, disable CONFIG_PANIC_ON_SMP_PROCESS_ID_BUG!\n");
    #endif
    

out_enable:
	preempt_enable_no_resched_notrace();
out:
	return this_cpu;
}
示例#10
0
文件: tsc.c 项目: EMCAntimatter/linux
void cyc2ns_read_begin(struct cyc2ns_data *data)
{
	int seq, idx;

	preempt_disable_notrace();

	do {
		seq = this_cpu_read(cyc2ns.seq.sequence);
		idx = seq & 1;

		data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
		data->cyc2ns_mul    = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
		data->cyc2ns_shift  = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);

	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
}
示例#11
0
// ARM10C 20130824
// FIXME: notrace와 관련하여 프로파일링-함수가 무엇인가?
notrace unsigned int debug_smp_processor_id(void)
{
	unsigned long preempt_count = preempt_count();//0x4000_0001
	int this_cpu = raw_smp_processor_id();

	//likely는 true일 가능성이 높은 코드라고 컴파일러에게 알려준다.
	if (likely(preempt_count))
		goto out;

// 2013/08/24 종료

	if (irqs_disabled())
		goto out;

	/*
	 * Kernel threads bound to a single CPU can safely use
	 * smp_processor_id():
	 */
	if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
		goto out;

	/*
	 * It is valid to assume CPU-locality during early bootup:
	 */
	if (system_state != SYSTEM_RUNNING)
		goto out;

	/*
	 * Avoid recursion:
	 */
	preempt_disable_notrace();

	if (!printk_ratelimit())
		goto out_enable;

	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
			"code: %s/%d\n",
			preempt_count() - 1, current->comm, current->pid);
	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
	dump_stack();

out_enable:
	preempt_enable_no_resched_notrace();
out:
	return this_cpu;
}
示例#12
0
文件: baselib.c 项目: joelagnel/ktap
/* don't engage with tstring when printf, use buffer directly */
static int ktap_lib_printf(ktap_state *ks)
{
	struct trace_seq *seq;

	preempt_disable_notrace();

	seq = kp_percpu_data(KTAP_PERCPU_DATA_BUFFER);
	trace_seq_init(seq);

	if (kp_strfmt(ks, seq))
		return 0;

	seq->buffer[seq->len] = '\0';
	kp_transport_write(ks, seq->buffer, seq->len + 1);

	preempt_enable_notrace();
	return 0;
}
示例#13
0
notrace static unsigned int check_preemption_disabled(const char *what1,
							const char *what2)
{
	int this_cpu = raw_smp_processor_id();

	if (likely(preempt_count()))
		goto out;

	if (irqs_disabled())
		goto out;

	/*
	 * Kernel threads bound to a single CPU can safely use
	 * smp_processor_id():
	 */
	if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
		goto out;

	/*
	 * It is valid to assume CPU-locality during early bootup:
	 */
	if (system_state != SYSTEM_RUNNING)
		goto out;

	/*
	 * Avoid recursion:
	 */
	preempt_disable_notrace();

	if (!printk_ratelimit())
		goto out_enable;

	printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
		what1, what2, preempt_count() - 1, current->comm, current->pid);

	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
	dump_stack();

out_enable:
	preempt_enable_no_resched_notrace();
out:
	return this_cpu;
}
示例#14
0
文件: lib_base.c 项目: feng-tao/ktap
/* don't engage with intern string in printf, use buffer directly */
static int kplib_printf(ktap_state_t *ks)
{
	struct trace_seq *seq;

	preempt_disable_notrace();

	seq = kp_this_cpu_print_buffer(ks);
	trace_seq_init(seq);

	if (kp_str_fmt(ks, seq))
		goto out;

	seq->buffer[seq->len] = '\0';
	kp_transport_write(ks, seq->buffer, seq->len + 1);

 out:
	preempt_enable_notrace();
	return 0;
}
示例#15
0
文件: clock.c 项目: 7799/linux
/*
 * Similar to cpu_clock(), but requires local IRQs to be disabled.
 *
 * See cpu_clock().
 */
u64 sched_clock_cpu(int cpu)
{
	struct sched_clock_data *scd;
	u64 clock;

	if (sched_clock_stable())
		return sched_clock();

	if (unlikely(!sched_clock_running))
		return 0ull;

	preempt_disable_notrace();
	scd = cpu_sdc(cpu);

	if (cpu != smp_processor_id())
		clock = sched_clock_remote(scd);
	else
		clock = sched_clock_local(scd);
	preempt_enable_notrace();

	return clock;
}
示例#16
0
文件: vm.c 项目: joelagnel/ktap
static void ktap_concat(ktap_state *ks, int start, int end)
{
	int i, len = 0;
	StkId top = ks->ci->u.l.base;
	ktap_string *ts;
	char *ptr, *buffer;

	for (i = start; i <= end; i++) {
		if (!ttisstring(top + i)) {
			kp_error(ks, "cannot concat non-string\n");
			setnilvalue(top + start);
			return;
		}

		len += rawtsvalue(top + i)->tsv.len;
	}

	if (len >= KTAP_PERCPU_BUFFER_SIZE) {
		kp_error(ks, "Error: too long string concatenation\n");
		return;
	}

	preempt_disable_notrace();

	buffer = kp_percpu_data(KTAP_PERCPU_DATA_BUFFER);
	ptr = buffer;

	for (i = start; i <= end; i++) {
		int len = rawtsvalue(top + i)->tsv.len;
		strncpy(ptr, svalue(top + i), len);
		ptr += len;
	}
	ts = kp_tstring_newlstr(ks, buffer, len);
	setsvalue(top + start, ts);

	preempt_enable_notrace();
}