Beispiel #1
0
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
				struct hrtimer_clock_base *new_base)
{
	struct hrtimer *timer;
	struct rb_node *node;

	while ((node = rb_first(&old_base->active))) {
		timer = rb_entry(node, struct hrtimer, node);
		BUG_ON(hrtimer_callback_running(timer));
		debug_hrtimer_deactivate(timer);

		/*
		 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
		 * timer could be seen as !active and just vanish away
		 * under us on another CPU
		 */
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
		timer->base = new_base;
		/*
		 * Enqueue the timers on the new cpu. This does not
		 * reprogram the event device in case the timer
		 * expires before the earliest on this CPU, but we run
		 * hrtimer_interrupt after we migrated everything to
		 * sort out already expired timers and reprogram the
		 * event device.
		 */
		enqueue_hrtimer(timer, new_base);

		/* Clear the migration state bit */
		timer->state &= ~HRTIMER_STATE_MIGRATE;
	}
}
/*
 * remove hrtimer, called with base lock held
 */
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
{
	if (hrtimer_is_queued(timer)) {
		unsigned long state;
		int reprogram;

		/*
		 * Remove the timer and force reprogramming when high
		 * resolution mode is active and the timer is on the current
		 * CPU. If we remove a timer on another CPU, reprogramming is
		 * skipped. The interrupt event on this CPU is fired and
		 * reprogramming happens in the interrupt handler. This is a
		 * rare case and less expensive than a smp call.
		 */
		debug_hrtimer_deactivate(timer);
		timer_stats_hrtimer_clear_start_info(timer);
		reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
		/*
		 * We must preserve the CALLBACK state flag here,
		 * otherwise we could move the timer base in
		 * switch_hrtimer_base.
		 */
		state = timer->state & HRTIMER_STATE_CALLBACK;
		__remove_hrtimer(timer, base, state, reprogram);
		return 1;
	}
	return 0;
}
Beispiel #3
0
static void __run_hrtimer(struct hrtimer *timer)
{
	struct hrtimer_clock_base *base = timer->base;
	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
	enum hrtimer_restart (*fn)(struct hrtimer *);
	int restart;

	WARN_ON(!irqs_disabled());

	debug_hrtimer_deactivate(timer);
	__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
	timer_stats_account_hrtimer(timer);
	fn = timer->function;

	/*
	 * Because we run timers from hardirq context, there is no chance
	 * they get migrated to another cpu, therefore its safe to unlock
	 * the timer base.
	 */
	spin_unlock(&cpu_base->lock);
	restart = fn(timer);
	spin_lock(&cpu_base->lock);

	/*
	 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
	 * we do not reprogramm the event hardware. Happens either in
	 * hrtimer_start_range_ns() or in hrtimer_interrupt()
	 */
	if (restart != HRTIMER_NORESTART) {
		BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
		enqueue_hrtimer(timer, base);
	}
	timer->state &= ~HRTIMER_STATE_CALLBACK;
}
Beispiel #4
0
/*
 * When High resolution timers are active, try to reprogram. Note, that in case
 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
 * check happens. The timer gets enqueued into the rbtree. The reprogramming
 * and expiry check is done in the hrtimer_interrupt or in the softirq.
 */
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
					    struct hrtimer_clock_base *base)
{
	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {

		/* Timer is expired, act upon the callback mode */
		switch(timer->cb_mode) {
		case HRTIMER_CB_IRQSAFE_PERCPU:
		case HRTIMER_CB_IRQSAFE_UNLOCKED:
			/*
			 * This is solely for the sched tick emulation with
			 * dynamic tick support to ensure that we do not
			 * restart the tick right on the edge and end up with
			 * the tick timer in the softirq ! The calling site
			 * takes care of this. Also used for hrtimer sleeper !
			 */
			debug_hrtimer_deactivate(timer);
			return 1;
		case HRTIMER_CB_SOFTIRQ:
			/*
			 * Move everything else into the softirq pending list !
			 */
			list_add_tail(&timer->cb_entry,
				      &base->cpu_base->cb_pending);
			timer->state = HRTIMER_STATE_PENDING;
			return 1;
		default:
			BUG();
		}
	}
	return 0;
}
Beispiel #5
0
static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
				struct hrtimer_clock_base *new_base, int dcpu)
{
	struct hrtimer *timer;
	struct rb_node *node;
	int raise = 0;

	while ((node = rb_first(&old_base->active))) {
		timer = rb_entry(node, struct hrtimer, node);
		BUG_ON(hrtimer_callback_running(timer));
		debug_hrtimer_deactivate(timer);

		/*
		 * Should not happen. Per CPU timers should be
		 * canceled _before_ the migration code is called
		 */
		if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
			__remove_hrtimer(timer, old_base,
					 HRTIMER_STATE_INACTIVE, 0);
			WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
			     timer, timer->function, dcpu);
			continue;
		}

		/*
		 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
		 * timer could be seen as !active and just vanish away
		 * under us on another CPU
		 */
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
		timer->base = new_base;
		/*
		 * Enqueue the timer. Allow reprogramming of the event device
		 */
		enqueue_hrtimer(timer, new_base, 1);

#ifdef CONFIG_HIGH_RES_TIMERS
		/*
		 * Happens with high res enabled when the timer was
		 * already expired and the callback mode is
		 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
		 * enqueue code does not move them to the soft irq
		 * pending list for performance/latency reasons, but
		 * in the migration state, we need to do that
		 * otherwise we end up with a stale timer.
		 */
		if (timer->state == HRTIMER_STATE_MIGRATE) {
			timer->state = HRTIMER_STATE_PENDING;
			list_add_tail(&timer->cb_entry,
				      &new_base->cpu_base->cb_pending);
			raise = 1;
		}
#endif
		/* Clear the migration state bit */
		timer->state &= ~HRTIMER_STATE_MIGRATE;
	}
	return raise;
}
static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
{
	spin_lock_irq(&cpu_base->lock);

	while (!list_empty(&cpu_base->cb_pending)) {
		enum hrtimer_restart (*fn)(struct hrtimer *);
		struct hrtimer *timer;
		int restart;

		timer = list_entry(cpu_base->cb_pending.next,
				   struct hrtimer, cb_entry);

		debug_hrtimer_deactivate(timer);
		timer_stats_account_hrtimer(timer);

		fn = timer->function;
		__remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
		spin_unlock_irq(&cpu_base->lock);

		restart = fn(timer);

		spin_lock_irq(&cpu_base->lock);

		timer->state &= ~HRTIMER_STATE_CALLBACK;
		if (restart == HRTIMER_RESTART) {
			BUG_ON(hrtimer_active(timer));
			/*
			 * Enqueue the timer, allow reprogramming of the event
			 * device
			 */
			enqueue_hrtimer(timer, timer->base, 1);
		} else if (hrtimer_active(timer)) {
			/*
			 * If the timer was rearmed on another CPU, reprogram
			 * the event device.
			 */
			struct hrtimer_clock_base *base = timer->base;

			if (base->first == &timer->node &&
			    hrtimer_reprogram(timer, base)) {
				/*
				 * Timer is expired. Thus move it from tree to
				 * pending list again.
				 */
				__remove_hrtimer(timer, base,
						 HRTIMER_STATE_PENDING, 0);
				list_add_tail(&timer->cb_entry,
					      &base->cpu_base->cb_pending);
			}
		}
	}
	spin_unlock_irq(&cpu_base->lock);
}
/*
 * When High resolution timers are active, try to reprogram. Note, that in case
 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
 * check happens. The timer gets enqueued into the rbtree. The reprogramming
 * and expiry check is done in the hrtimer_interrupt or in the softirq.
 */
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
					    struct hrtimer_clock_base *base)
{
	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {

		/* Timer is expired, act upon the callback mode */
		switch(timer->cb_mode) {
		case HRTIMER_CB_IRQSAFE_NO_RESTART:
			debug_hrtimer_deactivate(timer);
			/*
			 * We can call the callback from here. No restart
			 * happens, so no danger of recursion
			 */
			BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
			return 1;
		case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ:
			/*
			 * This is solely for the sched tick emulation with
			 * dynamic tick support to ensure that we do not
			 * restart the tick right on the edge and end up with
			 * the tick timer in the softirq ! The calling site
			 * takes care of this.
			 */
			debug_hrtimer_deactivate(timer);
			return 1;
		case HRTIMER_CB_IRQSAFE:
		case HRTIMER_CB_SOFTIRQ:
			/*
			 * Move everything else into the softirq pending list !
			 */
			list_add_tail(&timer->cb_entry,
				      &base->cpu_base->cb_pending);
			timer->state = HRTIMER_STATE_PENDING;
			return 1;
		default:
			BUG();
		}
	}
	return 0;
}
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
				struct hrtimer_clock_base *new_base)
{
	struct hrtimer *timer;
	struct rb_node *node;

	while ((node = rb_first(&old_base->active))) {
		timer = rb_entry(node, struct hrtimer, node);
		BUG_ON(hrtimer_callback_running(timer));
		debug_hrtimer_deactivate(timer);
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
		timer->base = new_base;
		/*
		 * Enqueue the timer. Allow reprogramming of the event device
		 */
		enqueue_hrtimer(timer, new_base, 1);
	}
}
Beispiel #9
0
static void __run_hrtimer(struct hrtimer *timer)
{
	struct hrtimer_clock_base *base = timer->base;
	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
	enum hrtimer_restart (*fn)(struct hrtimer *);
	int restart;

	debug_hrtimer_deactivate(timer);
	__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
	timer_stats_account_hrtimer(timer);

	fn = timer->function;
	if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
	    timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
		/*
		 * Used for scheduler timers, avoid lock inversion with
		 * rq->lock and tasklist_lock.
		 *
		 * These timers are required to deal with enqueue expiry
		 * themselves and are not allowed to migrate.
		 */
		spin_unlock(&cpu_base->lock);
		restart = fn(timer);
		spin_lock(&cpu_base->lock);
	} else
		restart = fn(timer);

	/*
	 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
	 * reprogramming of the event hardware. This happens at the end of this
	 * function anyway.
	 */
	if (restart != HRTIMER_NORESTART) {
		BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
		enqueue_hrtimer(timer, base, 0);
	}
	timer->state &= ~HRTIMER_STATE_CALLBACK;
}
Beispiel #10
0
/*
 * remove hrtimer, called with base lock held
 */
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
{
	if (hrtimer_is_queued(timer)) {
		int reprogram;

		/*
		 * Remove the timer and force reprogramming when high
		 * resolution mode is active and the timer is on the current
		 * CPU. If we remove a timer on another CPU, reprogramming is
		 * skipped. The interrupt event on this CPU is fired and
		 * reprogramming happens in the interrupt handler. This is a
		 * rare case and less expensive than a smp call.
		 */
		debug_hrtimer_deactivate(timer);
		timer_stats_hrtimer_clear_start_info(timer);
		reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
		__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
				 reprogram);
		return 1;
	}
	return 0;
}
static inline void debug_deactivate(struct hrtimer *timer)
{
	debug_hrtimer_deactivate(timer);
	trace_hrtimer_cancel(timer);
}
Beispiel #12
0
static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
{
	spin_lock_irq(&cpu_base->lock);

	while (!list_empty(&cpu_base->cb_pending)) {
		enum hrtimer_restart (*fn)(struct hrtimer *);
		struct hrtimer *timer;
		int restart;
		int emulate_hardirq_ctx = 0;

		timer = list_entry(cpu_base->cb_pending.next,
				   struct hrtimer, cb_entry);

		debug_hrtimer_deactivate(timer);
		timer_stats_account_hrtimer(timer);

		fn = timer->function;
		/*
		 * A timer might have been added to the cb_pending list
		 * when it was migrated during a cpu-offline operation.
		 * Emulate hardirq context for such timers.
		 */
		if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
		    timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
			emulate_hardirq_ctx = 1;

		__remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
		spin_unlock_irq(&cpu_base->lock);

		if (unlikely(emulate_hardirq_ctx)) {
			local_irq_disable();
			restart = fn(timer);
			local_irq_enable();
		} else
			restart = fn(timer);

		spin_lock_irq(&cpu_base->lock);

		timer->state &= ~HRTIMER_STATE_CALLBACK;
		if (restart == HRTIMER_RESTART) {
			BUG_ON(hrtimer_active(timer));
			/*
			 * Enqueue the timer, allow reprogramming of the event
			 * device
			 */
			enqueue_hrtimer(timer, timer->base, 1);
		} else if (hrtimer_active(timer)) {
			/*
			 * If the timer was rearmed on another CPU, reprogram
			 * the event device.
			 */
			struct hrtimer_clock_base *base = timer->base;

			if (base->first == &timer->node &&
			    hrtimer_reprogram(timer, base)) {
				/*
				 * Timer is expired. Thus move it from tree to
				 * pending list again.
				 */
				__remove_hrtimer(timer, base,
						 HRTIMER_STATE_PENDING, 0);
				list_add_tail(&timer->cb_entry,
					      &base->cpu_base->cb_pending);
			}
		}
	}
	spin_unlock_irq(&cpu_base->lock);
}