u64 sched_clock_cpu(int cpu) { u64 now, clock, this_clock, remote_clock; struct sched_clock_data *scd; if (sched_clock_stable) return sched_clock(); scd = cpu_sdc(cpu); /* * Normally this is not called in NMI context - but if it is, * trying to do any locking here is totally lethal. */ if (unlikely(in_nmi())) return scd->clock; if (unlikely(!sched_clock_running)) return 0ull; WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); if (cpu != raw_smp_processor_id()) { struct sched_clock_data *my_scd = this_scd(); lock_double_clock(scd, my_scd); this_clock = __update_sched_clock(my_scd, now); remote_clock = scd->clock; /* * Use the opportunity that we have both locks * taken to couple the two clocks: we take the * larger time as the latest time for both * runqueues. (this creates monotonic movement) */ if (likely((s64)(remote_clock - this_clock) < 0)) { clock = this_clock; scd->clock = clock; } else { /* * Should be rare, but possible: */ clock = remote_clock; my_scd->clock = remote_clock; } __raw_spin_unlock(&my_scd->lock); } else { __raw_spin_lock(&scd->lock); clock = __update_sched_clock(scd, now); } __raw_spin_unlock(&scd->lock); return clock; }
/* * TSC-warp measurement loop running on both CPUs: */ static __cpuinit void check_tsc_warp(void) { cycles_t start, now, prev, end; int i; rdtsc_barrier(); start = get_cycles(); rdtsc_barrier(); /* * The measurement runs for 20 msecs: */ end = start + tsc_khz * 20ULL; now = start; for (i = 0; ; i++) { /* * We take the global lock, measure TSC, save the * previous TSC that was measured (possibly on * another CPU) and update the previous TSC timestamp. */ __raw_spin_lock(&sync_lock); prev = last_tsc; rdtsc_barrier(); now = get_cycles(); rdtsc_barrier(); last_tsc = now; __raw_spin_unlock(&sync_lock); /* * Be nice every now and then (and also check whether * measurement is done [we also insert a 10 million * loops safety exit, so we dont lock up in case the * TSC readout is totally broken]): */ if (unlikely(!(i & 7))) { if (now > end || i > 10000000) break; cpu_relax(); touch_nmi_watchdog(); } /* * Outside the critical section we can now see whether * we saw a time-warp of the TSC going backwards: */ if (unlikely(prev > now)) { __raw_spin_lock(&sync_lock); max_warp = max(max_warp, prev - now); nr_warps++; __raw_spin_unlock(&sync_lock); } } WARN(!(now-start), "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", now-start, end-start); }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if ((wakeup_rt && !rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_current_cpu = wakeup_cpu; wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
static int trace_test_buffer(struct trace_array *tr, unsigned long *count) { unsigned long flags, cnt = 0; int cpu, ret = 0; local_irq_save(flags); __raw_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(tr->buffer); tracing_off(); for_each_possible_cpu(cpu) { ret = trace_test_buffer_cpu(tr, cpu); if (ret) break; } tracing_on(); __raw_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); if (count) *count = cnt; return ret; }
/* * Test the trace buffer to see if all the elements * are still sane. */ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) { unsigned long flags, cnt = 0; int cpu, ret = 0; /* Don't allow flipping of max traces now */ local_irq_save(flags); __raw_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(tr->buffer); /* * The trace_test_buffer_cpu runs a while loop to consume all data. * If the calling tracer is broken, and is constantly filling * the buffer, this will run forever, and hard lock the box. * We disable the ring buffer while we do this test to prevent * a hard lock up. */ tracing_off(); for_each_possible_cpu(cpu) { ret = trace_test_buffer_cpu(tr, cpu); if (ret) break; } tracing_on(); __raw_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); if (count) *count = cnt; return ret; }
void __cpuinit rtas_take_timebase(void) { while (!timebase) barrier(); __raw_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; __raw_spin_unlock(&timebase_lock); }
static void wakeup_reset(struct trace_array *tr) { unsigned long flags; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); __wakeup_reset(tr); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); }
u64 sched_clock_cpu(int cpu) { struct sched_clock_data *scd = cpu_sdc(cpu); u64 now, clock; if (unlikely(!sched_clock_running)) return 0ull; WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); if (cpu != raw_smp_processor_id()) { /* * in order to update a remote cpu's clock based on our * unstable raw time rebase it against: * tick_raw (offset between raw counters) * tick_gotd (tick offset between cpus) */ struct sched_clock_data *my_scd = this_scd(); lock_double_clock(scd, my_scd); now -= my_scd->tick_raw; now += scd->tick_raw; now += my_scd->tick_gtod; now -= scd->tick_gtod; __raw_spin_unlock(&my_scd->lock); __update_sched_clock(scd, now, &clock); __raw_spin_unlock(&scd->lock); } else { __raw_spin_lock(&scd->lock); __update_sched_clock(scd, now, NULL); clock = scd->clock; __raw_spin_unlock(&scd->lock); } return clock; }
static inline void rgb_unlock (char *name, rgb_s *lock) { #if DEBUG printf("%s unlock %s\n", name, lock->name); #endif #if MUTEX pthread_mutex_unlock( &lock->lock); #elif SPIN __raw_spin_unlock( &lock->lock); #elif TSPIN pthread_spin_unlock( &lock->lock); #endif }
static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if (likely(!rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
void __cpuinit rtas_give_timebase(void) { unsigned long flags; local_irq_save(flags); hard_irq_disable(); __raw_spin_lock(&timebase_lock); rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); timebase = get_tb(); __raw_spin_unlock(&timebase_lock); while (timebase) barrier(); rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); local_irq_restore(flags); }
/* * irqsoff uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int resched; int cpu; int pc; if (likely(!wakeup_task)) return; pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (unlikely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); if (unlikely(!wakeup_task)) goto unlock; /* * The task can't disappear because it needs to * wake up first, and we have the wakeup_lock. */ if (task_cpu(wakeup_task) != cpu) goto unlock; trace_function(tr, ip, parent_ip, flags, pc); unlock: __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&data->disabled); ftrace_preempt_enable(resched); }
/* * We just idled delta nanoseconds (called with irqs disabled): */ void sched_clock_idle_wakeup_event(u64 delta_ns) { struct sched_clock_data *scd = this_scd(); u64 now = sched_clock(); /* * Override the previous timestamp and ignore all * sched_clock() deltas that occured while we idled, * and use the PM-provided delta_ns to advance the * rq clock: */ __raw_spin_lock(&scd->lock); scd->prev_raw = now; scd->clock += delta_ns; __raw_spin_unlock(&scd->lock); touch_softlockup_watchdog(); }
void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); u64 now, now_gtod; if (unlikely(!sched_clock_running)) return; WARN_ON_ONCE(!irqs_disabled()); now_gtod = ktime_to_ns(ktime_get()); now = sched_clock(); __raw_spin_lock(&scd->lock); scd->tick_raw = now; scd->tick_gtod = now_gtod; __update_sched_clock(scd, now); __raw_spin_unlock(&scd->lock); }
void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) { bust_spinlocks(0); die_owner = -1; add_taint(TAINT_DIE); __raw_spin_unlock(&die_lock); raw_local_irq_restore(flags); if (!regs) return; //if (kexec_should_crash(current)) //crash_kexec(regs); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); oops_exit(); do_exit(signr); }
void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); unsigned long now_jiffies = jiffies; s64 mult, delta_gtod, delta_raw; u64 now, now_gtod; if (unlikely(!sched_clock_running)) return; WARN_ON_ONCE(!irqs_disabled()); now_gtod = ktime_to_ns(ktime_get()); now = sched_clock(); __raw_spin_lock(&scd->lock); __update_sched_clock(scd, now, NULL); /* * update tick_gtod after __update_sched_clock() because that will * already observe 1 new jiffy; adding a new tick_gtod to that would * increase the clock 2 jiffies. */ delta_gtod = now_gtod - scd->tick_gtod; delta_raw = now - scd->tick_raw; if ((long)delta_raw > 0) { mult = delta_gtod << MULTI_SHIFT; do_div(mult, delta_raw); scd->multi = mult; if (scd->multi > MAX_MULTI) scd->multi = MAX_MULTI; else if (scd->multi < MIN_MULTI) scd->multi = MIN_MULTI; } else scd->multi = 1 << MULTI_SHIFT; scd->tick_raw = now; scd->tick_gtod = now_gtod; scd->tick_jiffies = now_jiffies; __raw_spin_unlock(&scd->lock); }
void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); die_owner = -1; add_taint(TAINT_DIE); die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ __raw_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); if (!signr) return; if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); do_exit(signr); }
void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); u64 now, now_gtod; if (unlikely(!sched_clock_running)) return; WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); now_gtod = ktime_to_ns(ktime_get()); __raw_spin_lock(&scd->lock); __update_sched_clock(scd, now); /* * update tick_gtod after __update_sched_clock() because that will * already observe 1 new jiffy; adding a new tick_gtod to that would * increase the clock 2 jiffies. */ scd->tick_raw = now; scd->tick_gtod = now_gtod; __raw_spin_unlock(&scd->lock); }
u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; u64 now; raw_local_irq_save(flags); this_cpu = raw_smp_processor_id(); now = cpu_clock(this_cpu); /* * If in an NMI context then dont risk lockups and return the * cpu_clock() time: */ if (unlikely(in_nmi())) goto out; __raw_spin_lock(&trace_clock_struct.lock); /* * TODO: if this happens often then maybe we should reset * my_scd->clock to prev_time+1, to make sure * we start ticking with the local clock from now on? */ if ((s64)(now - trace_clock_struct.prev_time) < 0) now = trace_clock_struct.prev_time + 1; trace_clock_struct.prev_time = now; __raw_spin_unlock(&trace_clock_struct.lock); out: raw_local_irq_restore(flags); return now; }
void _remote_spin_unlock(_remote_spinlock_t *lock) { __raw_spin_unlock((raw_spinlock_t *) (*lock)); }
static void unlock_rtas(unsigned long flags) { __raw_spin_unlock(&rtas.lock); local_irq_restore(flags); preempt_enable(); }
void __lockfunc __release_kernel_lock(void) { __raw_spin_unlock(&kernel_flag); preempt_enable_no_resched(); }
void _raw_spin_unlock(spinlock_t *lock) { debug_spin_unlock(lock); __raw_spin_unlock(&lock->raw_lock); }
static inline void __unlock_kernel(void) { __raw_spin_unlock(&kernel_flag); preempt_enable(); }
static void notrace probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; int pc; tracing_record_cmdline(prev); if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; pc = preempt_count(); /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (likely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; if (!report_latency(delta)) goto out_unlock; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); } out_unlock: __wakeup_reset(wakeup_trace); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); }
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); __raw_spin_unlock(&lock->raw_lock); }
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) { __raw_spin_unlock(lock); }