/* * Similar to cpu_clock() for the current cpu. Time will only be observed * to be monotonic if care is taken to only compare timestampt taken on the * same CPU. * * See cpu_clock(). */ u64 local_clock(void) { if (!sched_clock_stable()) return sched_clock_cpu(raw_smp_processor_id()); return sched_clock(); }
/* * Called before incrementing preempt_count on {soft,}irq_enter * and before decrementing preempt_count on {soft,}irq_exit. */ void irqtime_account_irq(struct task_struct *curr) { unsigned long flags; s64 delta; int cpu; if (!sched_clock_irqtime) return; local_irq_save(flags); cpu = smp_processor_id(); delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); __this_cpu_add(irq_start_time, delta); irq_time_write_begin(); /* * We do not account for softirq time from ksoftirqd here. * We want to continue accounting softirq time to ksoftirqd thread * in that case, so as not to confuse scheduler with a special task * that do not consume any time, but still wants to run. */ if (hardirq_count()) __this_cpu_add(cpu_hardirq_time, delta); else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) __this_cpu_add(cpu_softirq_time, delta); irq_time_write_end(); local_irq_restore(flags); }
/* * As outlined at the top, provides a fast, high resolution, nanosecond * time source that is monotonic per cpu argument and has bounded drift * between cpus. * * ######################### BIG FAT WARNING ########################## * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # * # go backwards !! # * #################################################################### */ u64 cpu_clock(int cpu) { if (!sched_clock_stable()) return sched_clock_cpu(cpu); return sched_clock(); }
/* * Called before incrementing preempt_count on {soft,}irq_enter * and before decrementing preempt_count on {soft,}irq_exit. */ void irqtime_account_irq(struct task_struct *curr) { struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); s64 delta; int cpu; if (!sched_clock_irqtime) return; cpu = smp_processor_id(); delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; irqtime->irq_start_time += delta; u64_stats_update_begin(&irqtime->sync); /* * We do not account for softirq time from ksoftirqd here. * We want to continue accounting softirq time to ksoftirqd thread * in that case, so as not to confuse scheduler with a special task * that do not consume any time, but still wants to run. */ if (hardirq_count()) irqtime->hardirq_time += delta; else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) irqtime->softirq_time += delta; u64_stats_update_end(&irqtime->sync); }
unsigned long long cpu_clock(int cpu) { unsigned long long clock; unsigned long flags; local_irq_save(flags); clock = sched_clock_cpu(cpu); local_irq_restore(flags); return clock; }
/* * Similar to cpu_clock() for the current cpu. Time will only be observed * to be monotonic if care is taken to only compare timestampt taken on the * same CPU. * * See cpu_clock(). */ u64 local_clock(void) { u64 clock; unsigned long flags; flags = hard_local_irq_save_notrace(); clock = sched_clock_cpu(smp_processor_id()); hard_local_irq_restore_notrace(flags); return clock; }
/* * As outlined at the top, provides a fast, high resolution, nanosecond * time source that is monotonic per cpu argument and has bounded drift * between cpus. * * ######################### BIG FAT WARNING ########################## * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # * # go backwards !! # * #################################################################### */ u64 cpu_clock(int cpu) { u64 clock; unsigned long flags; flags = hard_local_irq_save_notrace(); clock = sched_clock_cpu(cpu); hard_local_irq_restore_notrace(flags); return clock; }
/* * Similar to cpu_clock() for the current cpu. Time will only be observed * to be monotonic if care is taken to only compare timestampt taken on the * same CPU. * * See cpu_clock(). */ u64 local_clock(void) { u64 clock; unsigned long flags; local_irq_save(flags); clock = sched_clock_cpu(smp_processor_id()); local_irq_restore(flags); return clock; }
void update_rq_clock(struct rq *rq) { long delta; if (rq->skip_clock_update > 0) return; delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; rq->clock += delta; update_rq_clock_task(rq, delta); }
u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; u64 now; local_irq_save(flags); this_cpu = raw_smp_processor_id(); now = sched_clock_cpu(this_cpu); /* * If in an NMI context then dont risk lockups and return the * cpu_clock() time: */ if (unlikely(in_nmi())) goto out; arch_spin_lock(&trace_clock_struct.lock); /* * TODO: if this happens often then maybe we should reset * my_scd->clock to prev_time+1, to make sure * we start ticking with the local clock from now on? */ if ((s64)(now - trace_clock_struct.prev_time) < 0) now = trace_clock_struct.prev_time + 1; trace_clock_struct.prev_time = now; arch_spin_unlock(&trace_clock_struct.lock); out: local_irq_restore(flags); return now; }
u64 local_clock(void) { return sched_clock_cpu(0); }
u64 cpu_clock(int cpu) { return sched_clock_cpu(cpu); }
/* * We are going deep-idle (irqs are disabled): */ void sched_clock_idle_sleep_event(void) { sched_clock_cpu(smp_processor_id()); }
unsigned long long cpu_clock(int cpu) { return sched_clock_cpu(cpu); }