コード例 #1
0
ファイル: clock.c プロジェクト: 7799/linux
/*
 * Similar to cpu_clock() for the current cpu. Time will only be observed
 * to be monotonic if care is taken to only compare timestampt taken on the
 * same CPU.
 *
 * See cpu_clock().
 */
u64 local_clock(void)
{
	if (!sched_clock_stable())
		return sched_clock_cpu(raw_smp_processor_id());

	return sched_clock();
}
コード例 #2
0
ファイル: cputime.c プロジェクト: sombree/Hulk-Kernel-V2
/*
 * Called before incrementing preempt_count on {soft,}irq_enter
 * and before decrementing preempt_count on {soft,}irq_exit.
 */
void irqtime_account_irq(struct task_struct *curr)
{
	unsigned long flags;
	s64 delta;
	int cpu;

	if (!sched_clock_irqtime)
		return;

	local_irq_save(flags);

	cpu = smp_processor_id();
	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
	__this_cpu_add(irq_start_time, delta);

	irq_time_write_begin();
	/*
	 * We do not account for softirq time from ksoftirqd here.
	 * We want to continue accounting softirq time to ksoftirqd thread
	 * in that case, so as not to confuse scheduler with a special task
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count())
		__this_cpu_add(cpu_hardirq_time, delta);
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		__this_cpu_add(cpu_softirq_time, delta);

	irq_time_write_end();
	local_irq_restore(flags);
}
コード例 #3
0
ファイル: clock.c プロジェクト: 7799/linux
/*
 * As outlined at the top, provides a fast, high resolution, nanosecond
 * time source that is monotonic per cpu argument and has bounded drift
 * between cpus.
 *
 * ######################### BIG FAT WARNING ##########################
 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
 * # go backwards !!                                                  #
 * ####################################################################
 */
u64 cpu_clock(int cpu)
{
	if (!sched_clock_stable())
		return sched_clock_cpu(cpu);

	return sched_clock();
}
コード例 #4
0
ファイル: cputime.c プロジェクト: acton393/linux
/*
 * Called before incrementing preempt_count on {soft,}irq_enter
 * and before decrementing preempt_count on {soft,}irq_exit.
 */
void irqtime_account_irq(struct task_struct *curr)
{
	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
	s64 delta;
	int cpu;

	if (!sched_clock_irqtime)
		return;

	cpu = smp_processor_id();
	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
	irqtime->irq_start_time += delta;

	u64_stats_update_begin(&irqtime->sync);
	/*
	 * We do not account for softirq time from ksoftirqd here.
	 * We want to continue accounting softirq time to ksoftirqd thread
	 * in that case, so as not to confuse scheduler with a special task
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count())
		irqtime->hardirq_time += delta;
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		irqtime->softirq_time += delta;

	u64_stats_update_end(&irqtime->sync);
}
コード例 #5
0
ファイル: sched_clock.c プロジェクト: Asailer/LinSched
unsigned long long cpu_clock(int cpu)
{
	unsigned long long clock;
	unsigned long flags;

	local_irq_save(flags);
	clock = sched_clock_cpu(cpu);
	local_irq_restore(flags);
	return clock;
}
コード例 #6
0
/*
 * Similar to cpu_clock() for the current cpu. Time will only be observed
 * to be monotonic if care is taken to only compare timestampt taken on the
 * same CPU.
 *
 * See cpu_clock().
 */
u64 local_clock(void)
{
	u64 clock;
	unsigned long flags;

	flags = hard_local_irq_save_notrace();
	clock = sched_clock_cpu(smp_processor_id());
	hard_local_irq_restore_notrace(flags);

	return clock;
}
コード例 #7
0
/*
 * As outlined at the top, provides a fast, high resolution, nanosecond
 * time source that is monotonic per cpu argument and has bounded drift
 * between cpus.
 *
 * ######################### BIG FAT WARNING ##########################
 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
 * # go backwards !!                                                  #
 * ####################################################################
 */
u64 cpu_clock(int cpu)
{
	u64 clock;
	unsigned long flags;

	flags = hard_local_irq_save_notrace();
	clock = sched_clock_cpu(cpu);
	hard_local_irq_restore_notrace(flags);

	return clock;
}
コード例 #8
0
/*
 * Similar to cpu_clock() for the current cpu. Time will only be observed
 * to be monotonic if care is taken to only compare timestampt taken on the
 * same CPU.
 *
 * See cpu_clock().
 */
u64 local_clock(void)
{
	u64 clock;
	unsigned long flags;

	local_irq_save(flags);
	clock = sched_clock_cpu(smp_processor_id());
	local_irq_restore(flags);

	return clock;
}
コード例 #9
0
ファイル: smallercore.c プロジェクト: tobsan/parparse
void update_rq_clock(struct rq *rq)
{
	long delta;

	if (rq->skip_clock_update > 0)
		return;

	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
	rq->clock += delta;
	update_rq_clock_task(rq, delta);
}
コード例 #10
0
u64 notrace trace_clock_global(void)
{
    unsigned long flags;
    int this_cpu;
    u64 now;

    local_irq_save(flags);

    this_cpu = raw_smp_processor_id();
    now = sched_clock_cpu(this_cpu);
    /*
     * If in an NMI context then dont risk lockups and return the
     * cpu_clock() time:
     */
    if (unlikely(in_nmi()))
        goto out;

    arch_spin_lock(&trace_clock_struct.lock);

    /*
     * TODO: if this happens often then maybe we should reset
     * my_scd->clock to prev_time+1, to make sure
     * we start ticking with the local clock from now on?
     */
    if ((s64)(now - trace_clock_struct.prev_time) < 0)
        now = trace_clock_struct.prev_time + 1;

    trace_clock_struct.prev_time = now;

    arch_spin_unlock(&trace_clock_struct.lock);

out:
    local_irq_restore(flags);

    return now;
}
コード例 #11
0
u64 local_clock(void)
{
	return sched_clock_cpu(0);
}
コード例 #12
0
u64 cpu_clock(int cpu)
{
	return sched_clock_cpu(cpu);
}
コード例 #13
0
/*
 * We are going deep-idle (irqs are disabled):
 */
void sched_clock_idle_sleep_event(void)
{
	sched_clock_cpu(smp_processor_id());
}
コード例 #14
0
unsigned long long cpu_clock(int cpu)
{
	return sched_clock_cpu(cpu);
}