Exemple #1
0
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
{
	hrtimer_cancel(&ts->sched_timer);
	hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);

	while (1) {
		/* Forward the time to expire in the future */
		hrtimer_forward(&ts->sched_timer, now, tick_period);

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start_expires(&ts->sched_timer,
					      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				break;
		} else {
			if (!tick_program_event(
				hrtimer_get_expires(&ts->sched_timer), 0))
				break;
		}
		/* Reread time and update jiffies */
		now = ktime_get();
		tick_do_update_jiffies64(now);
	}
}
Exemple #2
0
/**
 * tick_setup_sched_timer - setup the tick emulation timer
 */
void tick_setup_sched_timer(void)
{
	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
	ktime_t now = ktime_get();

	/*
	 * Emulate tick processing via per-CPU hrtimers:
	 */
	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	ts->sched_timer.function = tick_sched_timer;

	/* Get the next period (per cpu) */
	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());

	for (;;) {
		hrtimer_forward(&ts->sched_timer, now, tick_period);
		hrtimer_start_expires(&ts->sched_timer,
				      HRTIMER_MODE_ABS_PINNED);
		/* Check, if the timer was already in the past */
		if (hrtimer_active(&ts->sched_timer))
			break;
		now = ktime_get();
	}

#ifdef CONFIG_NO_HZ
	if (tick_nohz_enabled) {
		ts->nohz_mode = NOHZ_MODE_HIGHRES;
		printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
	}
#endif
}
Exemple #3
0
/**
 * tick_nohz_switch_to_nohz - switch to nohz mode
 */
static void tick_nohz_switch_to_nohz(void)
{
	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
	ktime_t next;

	if (!tick_nohz_enabled)
		return;

	local_irq_disable();
	if (tick_switch_to_oneshot(tick_nohz_handler)) {
		local_irq_enable();
		return;
	}

	ts->nohz_mode = NOHZ_MODE_LOWRES;

	/*
	 * Recycle the hrtimer in ts, so we can share the
	 * hrtimer_forward with the highres code.
	 */
	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	/* Get the next period */
	next = tick_init_jiffy_update();

	for (;;) {
		hrtimer_set_expires(&ts->sched_timer, next);
		if (!tick_program_event(next, 0))
			break;
		next = ktime_add(next, tick_period);
	}
	local_irq_enable();

	printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
}
Exemple #4
0
/**
 * hrtimer_start - (re)start an relative timer on the current CPU
 * @timer:	the timer to be added
 * @tim:	expiry time
 * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
 *
 * Returns:
 *  0 on success
 *  1 when the timer was active
 */
int
hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
{
	struct hrtimer_clock_base *base, *new_base;
	unsigned long flags;
	int ret, raise;

	base = lock_hrtimer_base(timer, &flags);

	/* Remove an active timer from the queue: */
	ret = remove_hrtimer(timer, base);

	/* Switch the timer base, if necessary: */
	new_base = switch_hrtimer_base(timer, base);

	if (mode == HRTIMER_MODE_REL) {
		tim = ktime_add_safe(tim, new_base->get_time());
		/*
		 * CONFIG_TIME_LOW_RES is a temporary way for architectures
		 * to signal that they simply return xtime in
		 * do_gettimeoffset(). In this case we want to round up by
		 * resolution when starting a relative timer, to avoid short
		 * timeouts. This will go away with the GTOD framework.
		 */
#ifdef CONFIG_TIME_LOW_RES
		tim = ktime_add_safe(tim, base->resolution);
#endif
	}
	hrtimer_set_expires(timer, tim);

	timer_stats_hrtimer_set_start_info(timer);

	/*
	 * Only allow reprogramming if the new base is on this CPU.
	 * (it might still be on another CPU if the timer was pending)
	 */
	enqueue_hrtimer(timer, new_base,
			new_base->cpu_base == &__get_cpu_var(hrtimer_bases));

	/*
	 * The timer may be expired and moved to the cb_pending
	 * list. We can not raise the softirq with base lock held due
	 * to a possible deadlock with runqueue lock.
	 */
	raise = timer->state == HRTIMER_STATE_PENDING;

	unlock_hrtimer_base(timer, &flags);

	if (raise)
		hrtimer_raise_softirq();

	return ret;
}
Exemple #5
0
/*
 * Called to set the hrtick timer state.
 *
 * called with rq->lock held and irqs disabled
 */
void hrtick_start(struct rq *rq, int delay)
{
	struct hrtimer *timer = &rq->hrtick_timer;
	int time = ktime_add_ns(timer->base->get_time(), delay);

	hrtimer_set_expires(timer, time);

	if (rq == this_rq()) {
		hrtimer_restart(timer);
	} else if (!rq->hrtick_csd_pending) {
		smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
		rq->hrtick_csd_pending = 1;
	}
}
Exemple #6
0
/* timr->it_lock is taken. */
static int
common_timer_set(struct k_itimer *timr, int flags,
		 struct itimerspec *new_setting, struct itimerspec *old_setting)
{
	struct hrtimer *timer = &timr->it.real.timer;
	enum hrtimer_mode mode;

	if (old_setting)
		common_timer_get(timr, old_setting);

	/* disable the timer */
	timr->it.real.interval.tv64 = 0;
	/*
	 * careful here.  If smp we could be in the "fire" routine which will
	 * be spinning as we hold the lock.  But this is ONLY an SMP issue.
	 */
	if (hrtimer_try_to_cancel(timer) < 0)
		return TIMER_RETRY;

	timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 
		~REQUEUE_PENDING;
	timr->it_overrun_last = 0;

	/* switch off the timer when it_value is zero */
	if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
		return 0;

	mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
	timr->it.real.timer.function = posix_timer_fn;

	hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));

	/* Convert interval */
	timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);

	/* SIGEV_NONE timers are not queued ! See common_timer_get */
	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
		/* Setup correct expiry time for relative timers */
		if (mode == HRTIMER_MODE_REL) {
			hrtimer_add_expires(timer, timer->base->get_time());
		}
		return 0;
	}

	hrtimer_start_expires(timer, mode);
	return 0;
}
enum hrtimer_restart hrtimer_auto_selected_func(struct hrtimer *p_timer)
{ 

	//printk("The function SMI hrtimer_auto_selected_func, times is %d\n", hrtimer_selected_times); 

	//before Linux 2.6.3 //hrtimer_auto_trigger.expires=ktime_add_ns(hrtimer_auto_trigger.expires,(u64)(hrtimer_smi_time_interval*1000000));
	hrtimer_set_expires(&hrtimer_auto_selected, ktime_add_ns(hrtimer_get_expires(&hrtimer_auto_selected),(u64)(hrtimer_smi_sel_time_interval*1000000)));
	
	//Get SMI data
	if(hrtimer_selected_times > 0) // Get Previous Result (No first time)
	{
		SMI_Manual_Trigger_Result(&hrtimer_sel_cfg[hrtimer_selected_times-1], &hrtimer_sel_cfg_ex[hrtimer_selected_times-1], & hrtimer_sel_result);
		auto_selected_result[hrtimer_selected_times-1] = hrtimer_sel_result;
	
	}
	
	if(hrtimer_selected_times < auto_selected_result_count) // Set Current Cfg (no last time)
	{	
		//Set SMI config
		if(hrtimer_sel_cfg_ex != NULL)
		{	
			SMI_Manual_Trigger_Init(&hrtimer_sel_cfg[hrtimer_selected_times], &hrtimer_sel_cfg_ex[hrtimer_selected_times]);
		}
		else
		{
			SMI_Manual_Trigger_Init(&hrtimer_sel_cfg[hrtimer_selected_times], 0);
		}		
	}
	//Update Conuter 
	hrtimer_selected_times ++;
	
	if(hrtimer_selected_times>auto_selected_result_count)
	{
		//Complete
		if(NULL != auto_trigger_result)
		{
			kfree(auto_trigger_result);
		}	
		auto_trigger_result = auto_selected_result; //result matrix for dump
    hrtimer_smi_time_count = auto_selected_result_count;
		
		return HRTIMER_NORESTART;
	}
	else
	{		
		return HRTIMER_RESTART;
	}
}
int sched_wait_interval(int flags, const struct timespec __user * rqtp, struct timespec __user * rmtp) {
    struct hrtimer_sleeper t;
    enum hrtimer_mode mode = flags & TIMER_ABSTIME ?
                             HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
    int ret = 0;

    hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
    hrtimer_set_expires(&t.timer, timespec_to_ktime(*rqtp));
    hrtimer_init_sleeper(&t, current);

    do {
        set_current_state(TASK_INTERRUPTIBLE);
        hrtimer_start_expires(&t.timer, mode);
        if (!hrtimer_active(&t.timer))
            t.task = NULL;

        if (likely(t.task)) {
            t.task->dl.flags |= DL_NEW;
            schedule();
        }
        hrtimer_cancel(&t.timer);
        mode = HRTIMER_MODE_ABS;
    } while (t.task && !signal_pending(current));
    __set_current_state(TASK_RUNNING);

    if (t.task == NULL)
        goto out;

    if (mode == HRTIMER_MODE_ABS) {
        ret = -ERESTARTNOHAND;
        goto out;
    }

    if (rmtp) {
        ktime_t rmt;
        struct timespec rmt_ts;
        rmt = hrtimer_expires_remaining(&t.timer);
        if (rmt.tv64 > 0)
            goto out;
        rmt_ts = ktime_to_timespec(rmt);
        if (!timespec_valid(&rmt_ts))
            goto out;
        *rmtp = rmt_ts;
    }
out:
    destroy_hrtimer_on_stack(&t.timer);
    return ret;
}
Exemple #9
0
static int
common_timer_set(struct k_itimer *timr, int flags,
		 struct itimerspec *new_setting, struct itimerspec *old_setting)
{
	struct hrtimer *timer = &timr->it.real.timer;
	enum hrtimer_mode mode;

	if (old_setting)
		common_timer_get(timr, old_setting);

	
	timr->it.real.interval.tv64 = 0;
	if (hrtimer_try_to_cancel(timer) < 0)
		return TIMER_RETRY;

	timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 
		~REQUEUE_PENDING;
	timr->it_overrun_last = 0;

	
	if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
		return 0;

	mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
	timr->it.real.timer.function = posix_timer_fn;

	hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));

	
	timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);

	
	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
		
		if (mode == HRTIMER_MODE_REL) {
			hrtimer_add_expires(timer, timer->base->get_time());
		}
		return 0;
	}

	hrtimer_start_expires(timer, mode);
	return 0;
}
Exemple #10
0
/**
 * alarmtimer_fired - Handles alarm hrtimer being fired.
 * @timer: pointer to hrtimer being run
 *
 * When a alarm timer fires, this runs through the timerqueue to
 * see which alarms expired, and runs those. If there are more alarm
 * timers queued for the future, we set the hrtimer to fire when
 * when the next future alarm timer expires.
 */
static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
{
	struct alarm_base *base = container_of(timer, struct alarm_base, timer);
	struct timerqueue_node *next;
	unsigned long flags;
	ktime_t now;
	int ret = HRTIMER_NORESTART;

	spin_lock_irqsave(&base->lock, flags);
	now = base->gettime();
	while ((next = timerqueue_getnext(&base->timerqueue))) {
		struct alarm *alarm;
		ktime_t expired = next->expires;

		if (expired.tv64 >= now.tv64)
			break;

		alarm = container_of(next, struct alarm, node);

		timerqueue_del(&base->timerqueue, &alarm->node);
		alarm->enabled = 0;
		/* Re-add periodic timers */
		if (alarm->period.tv64) {
			alarm->node.expires = ktime_add(expired, alarm->period);
			timerqueue_add(&base->timerqueue, &alarm->node);
			alarm->enabled = 1;
		}
		spin_unlock_irqrestore(&base->lock, flags);
		if (alarm->function)
			alarm->function(alarm);
		spin_lock_irqsave(&base->lock, flags);
	}

	if (next) {
		hrtimer_set_expires(&base->timer, next->expires);
		ret = HRTIMER_RESTART;
	}
	spin_unlock_irqrestore(&base->lock, flags);

	return ret;

}
Exemple #11
0
int timerfd_setup(struct timerfd_ctx* ctx, const struct timespec* timer, int clockid, int mode)
{
  ktime_t texp;

  timerfd_lock_and_cancel(ctx);

  hrtimer_init(&ctx->timer, clockid, mode);
  texp = timespec_to_ktime(*timer);
  hrtimer_set_expires(&ctx->timer, texp);
  if (texp.tv64 != 0)
    hrtimer_start(&ctx->timer, texp, mode);

  ctx->timer.function = timerfd_callback;
  ctx->expired = 0;
  ctx->ticks = 0;

  spin_unlock_irq(&ctx->wqh.lock);

  return 0;
}
Exemple #12
0
/*
 * Entry point for SPU profiling.
 * NOTE:  SPU profiling is done system-wide, not per-CPU.
 *
 * cycles_reset is the count value specified by the user when
 * setting up OProfile to count SPU_CYCLES.
 */
int start_spu_profiling(unsigned int cycles_reset)
{
	ktime_t kt;

	pr_debug("timer resolution: %lu\n", TICK_NSEC);
	kt = ktime_set(0, profiling_interval);
	hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer_set_expires(&timer, kt);
	timer.function = profile_spus;

	/* Allocate arrays for collecting SPU PC samples */
	samples = kzalloc(SPUS_PER_NODE *
			  TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL);

	if (!samples)
		return -ENOMEM;

	spu_prof_running = 1;
	hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
	schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);

	return 0;
}
/*
 * This is called from the guts of the broadcast code when the cpu
 * which is about to enter idle has the earliest broadcast timer event.
 */
static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
{
	/*
	 * We try to cancel the timer first. If the callback is on
	 * flight on some other cpu then we let it handle it. If we
	 * were able to cancel the timer nothing can rearm it as we
	 * own broadcast_lock.
	 *
	 * However we can also be called from the event handler of
	 * ce_broadcast_hrtimer itself when it expires. We cannot
	 * restart the timer because we are in the callback, but we
	 * can set the expiry time and let the callback return
	 * HRTIMER_RESTART.
	 */
	if (hrtimer_try_to_cancel(&bctimer) >= 0) {
		hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
		/* Bind the "device" to the cpu */
		bc->bound_on = smp_processor_id();
	} else if (bc->bound_on == smp_processor_id()) {
		hrtimer_set_expires(&bctimer, expires);
	}
	return 0;
}
enum hrtimer_restart hrtimer_auto_trigger_func(struct hrtimer *p_timer)
{ 

	//printk("The function SMI hrtimer_auto_trigger_func, times is %lu\n", hrtimer_trigger_times); 

	//before Linux 2.6.3 //hrtimer_auto_trigger.expires=ktime_add_ns(hrtimer_auto_trigger.expires,(u64)(hrtimer_smi_time_interval*1000000));
	hrtimer_set_expires(&hrtimer_auto_trigger, ktime_add_ns(hrtimer_get_expires(&hrtimer_auto_trigger),(u64)(hrtimer_smi_time_interval*1000000)));
	
	//Get SMI data
	SMI_Manual_Trigger_Result(hrtimer_cfg, hrtimer_cfg_ex, & hrtimer_result);
	auto_trigger_result[hrtimer_trigger_times] = hrtimer_result;
	
	//Update Conuter 
	hrtimer_trigger_times ++;
	
	if(hrtimer_trigger_times>=hrtimer_smi_time_count)
	{
		return HRTIMER_NORESTART;
	}
	else
	{		
		return HRTIMER_RESTART;
	}
}