Esempio n. 1
0
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
				   unsigned long was_set, void *priv)
{
	/* Protected by the calling core code serialization */
	static struct timespec64 next_sync;

	struct xen_platform_op op;
	struct timespec64 now;
	struct timekeeper *tk = priv;
	static bool settime64_supported = true;
	int ret;

	now.tv_sec = tk->xtime_sec;
	now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);

	/*
	 * We only take the expensive HV call when the clock was set
	 * or when the 11 minutes RTC synchronization time elapsed.
	 */
	if (!was_set && timespec64_compare(&now, &next_sync) < 0)
		return NOTIFY_OK;

again:
	if (settime64_supported) {
		op.cmd = XENPF_settime64;
		op.u.settime64.mbz = 0;
		op.u.settime64.secs = now.tv_sec;
		op.u.settime64.nsecs = now.tv_nsec;
		op.u.settime64.system_time = xen_clocksource_read();
	} else {
		op.cmd = XENPF_settime32;
		op.u.settime32.secs = now.tv_sec;
		op.u.settime32.nsecs = now.tv_nsec;
		op.u.settime32.system_time = xen_clocksource_read();
	}

	ret = HYPERVISOR_platform_op(&op);

	if (ret == -ENOSYS && settime64_supported) {
		settime64_supported = false;
		goto again;
	}
	if (ret < 0)
		return NOTIFY_BAD;

	/*
	 * Move the next drift compensation time 11 minutes
	 * ahead. That's emulating the sync_cmos_clock() update for
	 * the hardware RTC.
	 */
	next_sync = now;
	next_sync.tv_sec += 11 * 60;

	return NOTIFY_OK;
}
Esempio n. 2
0
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
				   unsigned long was_set, void *priv)
{
	/* Protected by the calling core code serialization */
	static struct timespec next_sync;

	struct xen_platform_op op;
	struct timespec now;

	now = __current_kernel_time();

	/*
	 * We only take the expensive HV call when the clock was set
	 * or when the 11 minutes RTC synchronization time elapsed.
	 */
	if (!was_set && timespec_compare(&now, &next_sync) < 0)
		return NOTIFY_OK;

	op.cmd = XENPF_settime;
	op.u.settime.secs = now.tv_sec;
	op.u.settime.nsecs = now.tv_nsec;
	op.u.settime.system_time = xen_clocksource_read();

	(void)HYPERVISOR_dom0_op(&op);

	/*
	 * Move the next drift compensation time 11 minutes
	 * ahead. That's emulating the sync_cmos_clock() update for
	 * the hardware RTC.
	 */
	next_sync = now;
	next_sync.tv_sec += 11 * 60;

	return NOTIFY_OK;
}
static inline void spin_time_accum_blocked(u64 start)
{
	u32 delta = xen_clocksource_read() - start;

	__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
	spinlock_stats.time_blocked += delta;
}
Esempio n. 4
0
/*
 * Xen sched_clock implementation.  Returns the number of unstolen
 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
 * states.
 */
unsigned long long xen_sched_clock(void)
{
	struct vcpu_runstate_info state;
	cycle_t now;
	u64 ret;
	s64 offset;

	/*
	 * Ideally sched_clock should be called on a per-cpu basis
	 * anyway, so preempt should already be disabled, but that's
	 * not current practice at the moment.
	 */
	preempt_disable();

	now = xen_clocksource_read();

	get_runstate_snapshot(&state);

	WARN_ON(state.state != RUNSTATE_running);

	offset = now - state.state_entry_time;
	if (offset < 0)
		offset = 0;

	ret = state.time[RUNSTATE_blocked] +
		state.time[RUNSTATE_running] +
		offset;

	preempt_enable();

	return ret;
}
Esempio n. 5
0
static int xen_set_wallclock(unsigned long now)
{
	struct xen_platform_op op;
	int rc;

	/* do nothing for domU */
	if (!xen_initial_domain())
		return -1;

	op.cmd = XENPF_settime;
	op.u.settime.secs = now;
	op.u.settime.nsecs = 0;
	op.u.settime.system_time = xen_clocksource_read();

	rc = HYPERVISOR_dom0_op(&op);
	WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now);

	return rc;
}
Esempio n. 6
0
static u64 xen_clocksource_get_cycles(struct clocksource *cs)
{
	return xen_clocksource_read();
}
Esempio n. 7
0
/*
  Get a hypervisor absolute time.  In theory we could maintain an
  offset between the kernel's time and the hypervisor's time, and
  apply that to a kernel's absolute timeout.  Unfortunately the
  hypervisor and kernel times can drift even if the kernel is using
  the Xen clocksource, because ntp can warp the kernel's clocksource.
*/
static s64 get_abs_timeout(unsigned long delta)
{
	return xen_clocksource_read() + delta;
}
Esempio n. 8
0
File: time.c Progetto: avagin/linux
static u64 xen_sched_clock(void)
{
	return xen_clocksource_read() - xen_sched_clock_offset;
}
static inline u64 spin_time_start(void)
{
	return xen_clocksource_read();
}