static u64 sched_clock_remote(struct sched_clock_data *scd)
{
	struct sched_clock_data *my_scd = this_scd();
	u64 this_clock, remote_clock;
	u64 *ptr, old_val, val;

	sched_clock_local(my_scd);
again:
	this_clock = my_scd->clock;
	remote_clock = scd->clock;

	/*
	 * Use the opportunity that we have both locks
	 * taken to couple the two clocks: we take the
	 * larger time as the latest time for both
	 * runqueues. (this creates monotonic movement)
	 */
	if (likely((s64)(remote_clock - this_clock) < 0)) {
		ptr = &scd->clock;
		old_val = remote_clock;
		val = this_clock;
	} else {
		/*
		 * Should be rare, but possible:
		 */
		ptr = &my_scd->clock;
		old_val = this_clock;
		val = remote_clock;
	}

	if (cmpxchg64(ptr, old_val, val) != old_val)
		goto again;

	return val;
}
Пример #2
0
static u64 sched_clock_remote(struct sched_clock_data *scd)
{
	struct sched_clock_data *my_scd = this_scd();
	u64 this_clock, remote_clock;
	u64 *ptr, old_val, val;

	sched_clock_local(my_scd);
again:
	this_clock = my_scd->clock;
	remote_clock = scd->clock;

	
	if (likely((s64)(remote_clock - this_clock) < 0)) {
		ptr = &scd->clock;
		old_val = remote_clock;
		val = this_clock;
	} else {
		
		ptr = &my_scd->clock;
		old_val = this_clock;
		val = remote_clock;
	}

	if (cmpxchg64(ptr, old_val, val) != old_val)
		goto again;

	return val;
}
Пример #3
0
u64 sched_clock_cpu(int cpu)
{
	u64 now, clock, this_clock, remote_clock;
	struct sched_clock_data *scd;

	if (sched_clock_stable)
		return sched_clock();

	scd = cpu_sdc(cpu);

	/*
	 * Normally this is not called in NMI context - but if it is,
	 * trying to do any locking here is totally lethal.
	 */
	if (unlikely(in_nmi()))
		return scd->clock;

	if (unlikely(!sched_clock_running))
		return 0ull;

	WARN_ON_ONCE(!irqs_disabled());
	now = sched_clock();

	if (cpu != raw_smp_processor_id()) {
		struct sched_clock_data *my_scd = this_scd();

		lock_double_clock(scd, my_scd);

		this_clock = __update_sched_clock(my_scd, now);
		remote_clock = scd->clock;

		/*
		 * Use the opportunity that we have both locks
		 * taken to couple the two clocks: we take the
		 * larger time as the latest time for both
		 * runqueues. (this creates monotonic movement)
		 */
		if (likely((s64)(remote_clock - this_clock) < 0)) {
			clock = this_clock;
			scd->clock = clock;
		} else {
			/*
			 * Should be rare, but possible:
			 */
			clock = remote_clock;
			my_scd->clock = remote_clock;
		}

		__raw_spin_unlock(&my_scd->lock);
	} else {
		__raw_spin_lock(&scd->lock);
		clock = __update_sched_clock(scd, now);
	}

	__raw_spin_unlock(&scd->lock);

	return clock;
}
Пример #4
0
static void __set_sched_clock_stable(void)
{
	struct sched_clock_data *scd = this_scd();

	/*
	 * Attempt to make the (initial) unstable->stable transition continuous.
	 */
	raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);

	printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
			scd->tick_gtod, gtod_offset,
			scd->tick_raw,  raw_offset);

	static_branch_enable(&__sched_clock_stable);
	tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
}
Пример #5
0
/*
 * We just idled delta nanoseconds (called with irqs disabled):
 */
void sched_clock_idle_wakeup_event(u64 delta_ns)
{
	struct sched_clock_data *scd = this_scd();
	u64 now = sched_clock();

	/*
	 * Override the previous timestamp and ignore all
	 * sched_clock() deltas that occured while we idled,
	 * and use the PM-provided delta_ns to advance the
	 * rq clock:
	 */
	__raw_spin_lock(&scd->lock);
	scd->prev_raw = now;
	scd->clock += delta_ns;
	__raw_spin_unlock(&scd->lock);

	touch_softlockup_watchdog();
}
Пример #6
0
u64 sched_clock_cpu(int cpu)
{
	struct sched_clock_data *scd = cpu_sdc(cpu);
	u64 now, clock;

	if (unlikely(!sched_clock_running))
		return 0ull;

	WARN_ON_ONCE(!irqs_disabled());
	now = sched_clock();

	if (cpu != raw_smp_processor_id()) {
		/*
		 * in order to update a remote cpu's clock based on our
		 * unstable raw time rebase it against:
		 *   tick_raw		(offset between raw counters)
		 *   tick_gotd          (tick offset between cpus)
		 */
		struct sched_clock_data *my_scd = this_scd();

		lock_double_clock(scd, my_scd);

		now -= my_scd->tick_raw;
		now += scd->tick_raw;

		now += my_scd->tick_gtod;
		now -= scd->tick_gtod;

		__raw_spin_unlock(&my_scd->lock);

		__update_sched_clock(scd, now, &clock);

		__raw_spin_unlock(&scd->lock);

	} else {
		__raw_spin_lock(&scd->lock);
		__update_sched_clock(scd, now, NULL);
		clock = scd->clock;
		__raw_spin_unlock(&scd->lock);
	}

	return clock;
}
Пример #7
0
void sched_clock_tick(void)
{
	struct sched_clock_data *scd = this_scd();
	u64 now, now_gtod;

	if (unlikely(!sched_clock_running))
		return;

	WARN_ON_ONCE(!irqs_disabled());

	now_gtod = ktime_to_ns(ktime_get());
	now = sched_clock();

	__raw_spin_lock(&scd->lock);
	scd->tick_raw = now;
	scd->tick_gtod = now_gtod;
	__update_sched_clock(scd, now);
	__raw_spin_unlock(&scd->lock);
}
Пример #8
0
void sched_clock_tick(void)
{
	struct sched_clock_data *scd;

	WARN_ON_ONCE(!irqs_disabled());

	/*
	 * Update these values even if sched_clock_stable(), because it can
	 * become unstable at any point in time at which point we need some
	 * values to fall back on.
	 *
	 * XXX arguably we can skip this if we expose tsc_clocksource_reliable
	 */
	scd = this_scd();
	scd->tick_raw  = sched_clock();
	scd->tick_gtod = ktime_get_ns();

	if (!sched_clock_stable() && likely(sched_clock_running))
		sched_clock_local(scd);
}
Пример #9
0
void sched_clock_tick(void)
{
	struct sched_clock_data *scd = this_scd();
	unsigned long now_jiffies = jiffies;
	s64 mult, delta_gtod, delta_raw;
	u64 now, now_gtod;

	if (unlikely(!sched_clock_running))
		return;

	WARN_ON_ONCE(!irqs_disabled());

	now_gtod = ktime_to_ns(ktime_get());
	now = sched_clock();

	__raw_spin_lock(&scd->lock);
	__update_sched_clock(scd, now, NULL);
	/*
	 * update tick_gtod after __update_sched_clock() because that will
	 * already observe 1 new jiffy; adding a new tick_gtod to that would
	 * increase the clock 2 jiffies.
	 */
	delta_gtod = now_gtod - scd->tick_gtod;
	delta_raw = now - scd->tick_raw;

	if ((long)delta_raw > 0) {
		mult = delta_gtod << MULTI_SHIFT;
		do_div(mult, delta_raw);
		scd->multi = mult;
		if (scd->multi > MAX_MULTI)
			scd->multi = MAX_MULTI;
		else if (scd->multi < MIN_MULTI)
			scd->multi = MIN_MULTI;
	} else
		scd->multi = 1 << MULTI_SHIFT;

	scd->tick_raw = now;
	scd->tick_gtod = now_gtod;
	scd->tick_jiffies = now_jiffies;
	__raw_spin_unlock(&scd->lock);
}
Пример #10
0
void sched_clock_tick(void)
{
	struct sched_clock_data *scd;
	u64 now, now_gtod;

	if (sched_clock_stable)
		return;

	if (unlikely(!sched_clock_running))
		return;

	WARN_ON_ONCE(!irqs_disabled());

	scd = this_scd();
	now_gtod = ktime_to_ns(ktime_get());
	now = sched_clock();

	scd->tick_raw = now;
	scd->tick_gtod = now_gtod;
	sched_clock_local(scd);
}
Пример #11
0
static void __clear_sched_clock_stable(struct work_struct *work)
{
	struct sched_clock_data *scd = this_scd();

	/*
	 * Attempt to make the stable->unstable transition continuous.
	 *
	 * Trouble is, this is typically called from the TSC watchdog
	 * timer, which is late per definition. This means the tick
	 * values can already be screwy.
	 *
	 * Still do what we can.
	 */
	gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);

	printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
			scd->tick_gtod, gtod_offset,
			scd->tick_raw,  raw_offset);

	static_branch_disable(&__sched_clock_stable);
	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
}
Пример #12
0
void sched_clock_tick(void)
{
	struct sched_clock_data *scd = this_scd();
	u64 now, now_gtod;

	if (unlikely(!sched_clock_running))
		return;

	WARN_ON_ONCE(!irqs_disabled());

	now = sched_clock();
	now_gtod = ktime_to_ns(ktime_get());

	__raw_spin_lock(&scd->lock);
	__update_sched_clock(scd, now);
	/*
	 * update tick_gtod after __update_sched_clock() because that will
	 * already observe 1 new jiffy; adding a new tick_gtod to that would
	 * increase the clock 2 jiffies.
	 */
	scd->tick_raw = now;
	scd->tick_gtod = now_gtod;
	__raw_spin_unlock(&scd->lock);
}
Пример #13
0
static u64 sched_clock_remote(struct sched_clock_data *scd)
{
	struct sched_clock_data *my_scd = this_scd();
	u64 this_clock, remote_clock;
	u64 *ptr, old_val, val;

#if BITS_PER_LONG != 64
again:
	/*
	 * Careful here: The local and the remote clock values need to
	 * be read out atomic as we need to compare the values and
	 * then update either the local or the remote side. So the
	 * cmpxchg64 below only protects one readout.
	 *
	 * We must reread via sched_clock_local() in the retry case on
	 * 32bit as an NMI could use sched_clock_local() via the
	 * tracer and hit between the readout of
	 * the low32bit and the high 32bit portion.
	 */
	this_clock = sched_clock_local(my_scd);
	/*
	 * We must enforce atomic readout on 32bit, otherwise the
	 * update on the remote cpu can hit inbetween the readout of
	 * the low32bit and the high 32bit portion.
	 */
	remote_clock = cmpxchg64(&scd->clock, 0, 0);
#else
	/*
	 * On 64bit the read of [my]scd->clock is atomic versus the
	 * update, so we can avoid the above 32bit dance.
	 */
	sched_clock_local(my_scd);
again:
	this_clock = my_scd->clock;
	remote_clock = scd->clock;
#endif

	/*
	 * Use the opportunity that we have both locks
	 * taken to couple the two clocks: we take the
	 * larger time as the latest time for both
	 * runqueues. (this creates monotonic movement)
	 */
	if (likely((s64)(remote_clock - this_clock) < 0)) {
		ptr = &scd->clock;
		old_val = remote_clock;
		val = this_clock;
	} else {
		/*
		 * Should be rare, but possible:
		 */
		ptr = &my_scd->clock;
		old_val = this_clock;
		val = remote_clock;
	}

	if (cmpxchg64(ptr, old_val, val) != old_val)
		goto again;

	return val;
}