コード例 #1
0
ファイル: arch_timer.c プロジェクト: 0x000000FF/Linux4Edison
/**
 * kvm_timer_sync_hwstate - sync timer state from cpu
 * @vcpu: The vcpu pointer
 *
 * Check if the virtual timer was armed and either schedule a corresponding
 * soft timer or inject directly if already expired.
 */
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
	cycle_t cval, now;
	u64 ns;

	if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
		!(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
		return;

	cval = timer->cntv_cval;
	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;

	BUG_ON(timer_is_armed(timer));

	if (cval <= now) {
		/*
		 * Timer has already expired while we were not
		 * looking. Inject the interrupt and carry on.
		 */
		kvm_timer_inject_irq(vcpu);
		return;
	}

	ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
	timer_arm(timer, ns);
}
コード例 #2
0
ファイル: arch_timer.c プロジェクト: raoy1990/linux
/*
 * Schedule the background timer before calling kvm_vcpu_block, so that this
 * thread is removed from its waitqueue and made runnable when there's a timer
 * interrupt to handle.
 */
void kvm_timer_schedule(struct kvm_vcpu *vcpu)
{
	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
	u64 ns;
	cycle_t cval, now;

	BUG_ON(timer_is_armed(timer));

	/*
	 * No need to schedule a background timer if the guest timer has
	 * already expired, because kvm_vcpu_block will return before putting
	 * the thread to sleep.
	 */
	if (kvm_timer_should_fire(vcpu))
		return;

	/*
	 * If the timer is not capable of raising interrupts (disabled or
	 * masked), then there's no more work for us to do.
	 */
	if (!kvm_timer_irq_can_fire(vcpu))
		return;

	/*  The timer has not yet expired, schedule a background timer */
	cval = timer->cntv_cval;
	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;

	ns = cyclecounter_cyc2ns(timecounter->cc,
				 cval - now,
				 timecounter->mask,
				 &timecounter->frac);
	timer_arm(timer, ns);
}
コード例 #3
0
ファイル: en_clock.c プロジェクト: yangliang913/fastsocket
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
    struct mlx4_dev *dev = mdev->dev;
    u64 ns;

    memset(&mdev->cycles, 0, sizeof(mdev->cycles));
    mdev->cycles.read = mlx4_en_read_clock;
    mdev->cycles.mask = CLOCKSOURCE_MASK(48);
    /* Using shift to make calculation more accurate. Since current HW
     * clock frequency is 427 MHz, and cycles are given using a 48 bits
     * register, the biggest shift when calculating using u64, is 14
     * (max_cycles * multiplier < 2^64)
     */
    mdev->cycles.shift = 14;
    mdev->cycles.mult =
        clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);

    timecounter_init(&mdev->clock, &mdev->cycles,
                     ktime_to_ns(ktime_get_real()));

    /* Calculate period in seconds to call the overflow watchdog - to make
     * sure counter is checked at least once every wrap around.
     */
    ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
    do_div(ns, NSEC_PER_SEC / 2 / HZ);
    mdev->overflow_period = ns;
}
コード例 #4
0
ファイル: hrt.c プロジェクト: FrozenCow/FIRE-ICE
static inline u64 get_arch_time(struct timecounter *tc)
{
	cycle_t value;
	const struct cyclecounter *cc = tc->cc;

	value = cc->read(cc);
	return cyclecounter_cyc2ns(cc, value);
}
コード例 #5
0
u64 timecounter_cyc2time(struct timecounter *tc,
			 cycle_t cycle_tstamp)
{
	u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
	u64 nsec;

	/*
	 * Instead of always treating cycle_tstamp as more recent
	 * than tc->cycle_last, detect when it is too far in the
	 * future and treat it as old time stamp instead.
	 */
	if (cycle_delta > tc->cc->mask / 2) {
		cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
		nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
	} else {
		nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
	}

	return nsec;
}
コード例 #6
0
ファイル: en_clock.c プロジェクト: forgivemyheart/linux
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
	struct mlx5e_tstamp *tstamp = &priv->tstamp;
	u64 ns;
	u64 frac = 0;
	u32 dev_freq;

	mlx5e_timestamp_init_config(tstamp);
	dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
	if (!dev_freq) {
		mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
		return;
	}
	rwlock_init(&tstamp->lock);
	tstamp->cycles.read = mlx5e_read_internal_timer;
	tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
	tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
						   tstamp->cycles.shift);
	tstamp->nominal_c_mult = tstamp->cycles.mult;
	tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
	tstamp->mdev = priv->mdev;

	timecounter_init(&tstamp->clock, &tstamp->cycles,
			 ktime_to_ns(ktime_get_real()));

	/* Calculate period in seconds to call the overflow watchdog - to make
	 * sure counter is checked at least once every wrap around.
	 */
	ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
				 frac, &frac);
	do_div(ns, NSEC_PER_SEC / 2 / HZ);
	tstamp->overflow_period = ns;

	INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
	if (tstamp->overflow_period)
		schedule_delayed_work(&tstamp->overflow_work, 0);
	else
		mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");

	/* Configure the PHC */
	tstamp->ptp_info = mlx5e_ptp_clock_info;
	snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");

	tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
					 &priv->mdev->pdev->dev);
	if (IS_ERR(tstamp->ptp)) {
		mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
			       PTR_ERR(tstamp->ptp));
		tstamp->ptp = NULL;
	}
}
コード例 #7
0
ファイル: en_clock.c プロジェクト: anandab/akaros
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
	panic("Disabled");
#if 0 // AKAROS_PORT
	struct mlx4_dev *dev = mdev->dev;
	unsigned long flags;
	uint64_t ns, zero = 0;

	rwlock_init(&mdev->clock_lock);

	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
	mdev->cycles.read = mlx4_en_read_clock;
	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
	/* Using shift to make calculation more accurate. Since current HW
	 * clock frequency is 427 MHz, and cycles are given using a 48 bits
	 * register, the biggest shift when calculating using u64, is 14
	 * (max_cycles * multiplier < 2^64)
	 */
	mdev->cycles.shift = 14;
	mdev->cycles.mult =
		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
	mdev->nominal_c_mult = mdev->cycles.mult;

	write_lock_irqsave(&mdev->clock_lock, flags);
	timecounter_init(&mdev->clock, &mdev->cycles,
			 epoch_nsec());
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	/* Calculate period in seconds to call the overflow watchdog - to make
	 * sure counter is checked at least once every wrap around.
	 */
	ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
	do_div(ns, NSEC_PER_SEC / 2 / HZ);
	mdev->overflow_period = ns;

	/* Configure the PHC */
	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");

	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
					     &mdev->pdev->dev);
	if (IS_ERR(mdev->ptp_clock)) {
		mdev->ptp_clock = NULL;
		mlx4_err(mdev, "ptp_clock_register failed\n");
	} else {
		mlx4_info(mdev, "registered PHC clock\n");
	}

#endif
}
コード例 #8
0
ファイル: en_clock.c プロジェクト: sagark/linux-pf-profile
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
	struct mlx4_dev *dev = mdev->dev;
	unsigned long flags;
	u64 ns;

	/* mlx4_en_init_timestamp is called for each netdev.
	 * mdev->ptp_clock is common for all ports, skip initialization if
	 * was done for other port.
	 */
	if (mdev->ptp_clock)
		return;

	rwlock_init(&mdev->clock_lock);

	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
	mdev->cycles.read = mlx4_en_read_clock;
	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
	mdev->cycles.mult =
		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
	mdev->nominal_c_mult = mdev->cycles.mult;

	write_lock_irqsave(&mdev->clock_lock, flags);
	timecounter_init(&mdev->clock, &mdev->cycles,
			 ktime_to_ns(ktime_get_real()));
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	/* Calculate period in seconds to call the overflow watchdog - to make
	 * sure counter is checked at least once every wrap around.
	 */
	ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
	do_div(ns, NSEC_PER_SEC / 2 / HZ);
	mdev->overflow_period = ns;

	/* Configure the PHC */
	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");

	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
					     &mdev->pdev->dev);
	if (IS_ERR(mdev->ptp_clock)) {
		mdev->ptp_clock = NULL;
		mlx4_err(mdev, "ptp_clock_register failed\n");
	} else {
		mlx4_info(mdev, "registered PHC clock\n");
	}

}
コード例 #9
0
/**
 * timecounter_read_delta - get nanoseconds since last call of this function
 * @tc:         Pointer to time counter
 *
 * When the underlying cycle counter runs over, this will be handled
 * correctly as long as it does not run over more than once between
 * calls.
 *
 * The first call to this function for a new time counter initializes
 * the time tracking and returns an undefined result.
 */
static u64 timecounter_read_delta(struct timecounter *tc)
{
	cycle_t cycle_now, cycle_delta;
	u64 ns_offset;

	/* read cycle counter: */
	cycle_now = tc->cc->read(tc->cc);

	/* calculate the delta since the last timecounter_read_delta(): */
	cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;

	/* convert to nanoseconds: */
	ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);

	/* update time stamp of timecounter_read_delta() call: */
	tc->cycle_last = cycle_now;

	return ns_offset;
}
コード例 #10
0
ファイル: timecounter.c プロジェクト: 020gzh/linux
u64 timecounter_cyc2time(struct timecounter *tc,
			 cycle_t cycle_tstamp)
{
	u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
	u64 nsec = tc->nsec, frac = tc->frac;

	/*
	 * Instead of always treating cycle_tstamp as more recent
	 * than tc->cycle_last, detect when it is too far in the
	 * future and treat it as old time stamp instead.
	 */
	if (delta > tc->cc->mask / 2) {
		delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
		nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac);
	} else {
		nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac);
	}

	return nsec;
}