/*
 * Shared reprogramming for clock_realtime and clock_monotonic
 *
 * When a timer is enqueued and expires earlier than the already enqueued
 * timers, we have to check, whether it expires earlier than the timer for
 * which the clock event device was armed.
 *
 * Called with interrupts disabled and base->cpu_base.lock held
 */
static int hrtimer_reprogram(struct hrtimer *timer,
			     struct hrtimer_clock_base *base)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
	int res;

	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);

	/*
	 * When the callback is running, we do not reprogram the clock event
	 * device. The timer callback is either running on a different CPU or
	 * the callback is executed in the hrtimer_interrupt context. The
	 * reprogramming is handled either by the softirq, which called the
	 * callback or at the end of the hrtimer_interrupt.
	 */
	if (hrtimer_callback_running(timer))
		return 0;

	/*
	 * CLOCK_REALTIME timer might be requested with an absolute
	 * expiry time which is less than base->offset. Nothing wrong
	 * about that, just avoid to call into the tick code, which
	 * has now objections against negative expiry values.
	 */
	if (expires.tv64 < 0)
		return -ETIME;

	if (expires.tv64 >= cpu_base->expires_next.tv64)
		return 0;

	/*
	 * If a hang was detected in the last timer interrupt then we
	 * do not schedule a timer which is earlier than the expiry
	 * which we enforced in the hang detection. We want the system
	 * to make progress.
	 */
	if (cpu_base->hang_detected)
		return 0;

	/*
	 * Clockevents returns -ETIME, when the event was in the past.
	 */
	res = tick_program_event(expires, 0);
	if (!IS_ERR_VALUE(res))
		cpu_base->expires_next = expires;
	return res;
}
Пример #2
0
/*
 * Switch the timer base to the current CPU when possible.
 */
static inline struct hrtimer_clock_base *
switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
		    int pinned)
{
	struct hrtimer_clock_base *new_base;
	struct hrtimer_cpu_base *new_cpu_base;
	int this_cpu = smp_processor_id();
	int cpu = hrtimer_get_target(this_cpu, pinned);
	int basenum = base->index;

again:
	new_cpu_base = &per_cpu(hrtimer_bases, cpu);
	new_base = &new_cpu_base->clock_base[basenum];

	if (base != new_base) {
		/*
		 * We are trying to move timer to new_base.
		 * However we can't change timer's base while it is running,
		 * so we keep it on the same CPU. No hassle vs. reprogramming
		 * the event source in the high resolution case. The softirq
		 * code will take care of this when the timer function has
		 * completed. There is no conflict as we hold the lock until
		 * the timer is enqueued.
		 */
		if (unlikely(hrtimer_callback_running(timer)))
			return base;

		/* See the comment in lock_timer_base() */
		timer->base = NULL;
		raw_spin_unlock(&base->cpu_base->lock);
		raw_spin_lock(&new_base->cpu_base->lock);

		if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
			cpu = this_cpu;
			raw_spin_unlock(&new_base->cpu_base->lock);
			raw_spin_lock(&base->cpu_base->lock);
			timer->base = base;
			goto again;
		}
		timer->base = new_base;
	} else {
		if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
			cpu = this_cpu;
			goto again;
		}
	}
	return new_base;
}
Пример #3
0
/**
 * hrtimer_try_to_cancel - try to deactivate a timer
 * @timer:	hrtimer to stop
 *
 * Returns:
 *  0 when the timer was not active
 *  1 when the timer was active
 * -1 when the timer is currently excuting the callback function and
 *    cannot be stopped
 */
int hrtimer_try_to_cancel(struct hrtimer *timer)
{
	struct hrtimer_clock_base *base;
	unsigned long flags;
	int ret = -1;

	base = lock_hrtimer_base(timer, &flags);

	if (!hrtimer_callback_running(timer))
		ret = remove_hrtimer(timer, base);

	unlock_hrtimer_base(timer, &flags);

	return ret;

}
Пример #4
0
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
				struct hrtimer_clock_base *new_base)
{
	struct hrtimer *timer;
	struct rb_node *node;

	while ((node = rb_first(&old_base->active))) {
		timer = rb_entry(node, struct hrtimer, node);
		BUG_ON(hrtimer_callback_running(timer));
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
		timer->base = new_base;
		/*
		 * Enqueue the timer. Allow reprogramming of the event device
		 */
		enqueue_hrtimer(timer, new_base, 1);
	}
}
Пример #5
0
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
                                 struct hrtimer_clock_base *new_base)
{
    struct hrtimer *timer;
    struct timerqueue_node *node;

    while ((node = timerqueue_getnext(&old_base->active))) {
        timer = container_of(node, struct hrtimer, node);
        BUG_ON(hrtimer_callback_running(timer));
        debug_deactivate(timer);

        __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
        timer->base = new_base;
        enqueue_hrtimer(timer, new_base);


        timer->state &= ~HRTIMER_STATE_MIGRATE;
    }
}
Пример #6
0
void sobliv_on_exit_top_m(struct task_struct* t)
{
	if (budget_precisely_tracked(t)) {
		if (tsk_rt(t)->budget.timer.armed) {

			if (!is_running(t)) {
				/* the time at which we started draining budget while
				 * suspended is recorded in evt_timestamp.  evt_timestamp
				 * was set either when 't' exited the top-m while suspended
				 * or when 't' blocked. */
				lt_t suspend_cost;
				BUG_ON(!tsk_rt(t)->budget.suspend_timestamp);
				suspend_cost = litmus_clock() -
						tsk_rt(t)->budget.suspend_timestamp;
				TRACE_TASK(t, "budget consumed while suspended: %llu\n",
								suspend_cost);
				get_exec_time(t) += suspend_cost;

				/* timer should have fired before now */
				if (get_exec_time(t) + 1000000/10 > get_exec_cost(t)) {
					TRACE_TASK(t,
						"budget overrun while suspended by over 1/10 "
						"millisecond! timer should have already fired!\n");
					WARN_ON(1);
				}
			}

			TRACE_TASK(t, "stops draining budget\n");
			/* the callback will handle it if it is executing */
			if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) {
				/* TODO: record a timestamp if the task isn't running */
				cancel_enforcement_timer(t);
			}
			else {
				TRACE_TASK(t,
					"within callback context. skipping operation.\n");
			}
		}
		else {
			TRACE_TASK(t, "was not draining budget\n");
		}
	}
}
Пример #7
0
/*
 * Shared reprogramming for clock_realtime and clock_monotonic
 *
 * When a timer is enqueued and expires earlier than the already enqueued
 * timers, we have to check, whether it expires earlier than the timer for
 * which the clock event device was armed.
 *
 * Called with interrupts disabled and base->cpu_base.lock held
 */
static int hrtimer_reprogram(struct hrtimer *timer,
			     struct hrtimer_clock_base *base)
{
	ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
	ktime_t expires = ktime_sub(timer->expires, base->offset);
	int res;

	WARN_ON_ONCE(timer->expires.tv64 < 0);

	/*
	 * When the callback is running, we do not reprogram the clock event
	 * device. The timer callback is either running on a different CPU or
	 * the callback is executed in the hrtimer_interrupt context. The
	 * reprogramming is handled either by the softirq, which called the
	 * callback or at the end of the hrtimer_interrupt.
	 */
	if (hrtimer_callback_running(timer))
		return 0;

	/*
	 * CLOCK_REALTIME timer might be requested with an absolute
	 * expiry time which is less than base->offset. Nothing wrong
	 * about that, just avoid to call into the tick code, which
	 * has now objections against negative expiry values.
	 */
	if (expires.tv64 < 0)
		return -ETIME;

	if (expires.tv64 >= expires_next->tv64)
		return 0;

	/*
	 * Clockevents returns -ETIME, when the event was in the past.
	 */
	res = tick_program_event(expires, 0);
	if (!IS_ERR_VALUE(res))
		*expires_next = expires;
	return res;
}
Пример #8
0
void sobliv_on_enter_top_m(struct task_struct* t)
{
	if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
		if (tsk_rt(t)->budget.timer.armed)
			TRACE_TASK(t, "budget timer already armed.\n");
		else {
			/* if we're blocked, then record the time at which we
			   started measuring */
			if (!is_running(t))
				tsk_rt(t)->budget.suspend_timestamp = litmus_clock();

			/* the callback will handle it if it is executing */
			if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) {
				arm_enforcement_timer(t, 0);
			}
			else {
				TRACE_TASK(t,
					"within callback context. deferring timer arm.\n");
			}
		}
	}
}
Пример #9
0
static inline struct hrtimer_clock_base *
switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
		    int pinned)
{
	struct hrtimer_clock_base *new_base;
	struct hrtimer_cpu_base *new_cpu_base;
	int this_cpu = smp_processor_id();
	int cpu = hrtimer_get_target(this_cpu, pinned);
	int basenum = base->index;

again:
	new_cpu_base = &per_cpu(hrtimer_bases, cpu);
	new_base = &new_cpu_base->clock_base[basenum];

	if (base != new_base) {
		if (unlikely(hrtimer_callback_running(timer)))
			return base;

		
		timer->base = NULL;
		raw_spin_unlock(&base->cpu_base->lock);
		raw_spin_lock(&new_base->cpu_base->lock);

		this_cpu = smp_processor_id();

		if (cpu != this_cpu && (hrtimer_check_target(timer, new_base)
			|| !cpu_online(cpu))) {
			raw_spin_unlock(&new_base->cpu_base->lock);
			raw_spin_lock(&base->cpu_base->lock);
			cpu = smp_processor_id();
			timer->base = base;
			goto again;
		}
		timer->base = new_base;
	}
	return new_base;
}