static enum hrtimer_restart my_hrtimer_callback_inq( struct hrtimer *timer_inq )
{
	struct skypopen_dev *dev = container_of(timer_inq, struct skypopen_dev, timer_inq);
	ktime_t now;

	if(unload)
		return HRTIMER_NORESTART;

	now = ktime_get();
	hrtimer_forward(&dev->timer_inq, now, ktime_set(0, SKYPOPEN_SLEEP * 1000000));
	wake_up_interruptible(&dev->inq);

	return HRTIMER_RESTART;
}
/*
 * We rearm the timer until we get disabled by the idle code.
 * Called with interrupts disabled and timer->base->cpu_base->lock held.
 */
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	/*
	 * Check if the do_timer duty was dropped. We don't care about
	 * concurrency: This happens only when the cpu in charge went
	 * into a long sleep. If two cpus happen to assign themself to
	 * this duty, then the jiffies update is still serialized by
	 * xtime_lock.
	 */
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;
#endif

	/* Check, if the jiffies need an update */
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * Do not call, when we are not in irq context and have
	 * no valid regs pointer
	 */
	if (regs) {
		/*
		 * When we are idle and the tick is stopped, we have to touch
		 * the watchdog as we might not schedule for a really long
		 * time. This happens on complete idle SMP systems while
		 * waiting on the login prompt. We also increment the "start of
		 * idle" jiffy stamp so the idle accounting adjustment we do
		 * when we go busy again does not account too much ticks.
		 */
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		update_process_times(user_mode(regs));
		profile_tick(CPU_PROFILING);
	}

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
Example #3
0
static void schedule_next_timer(struct k_itimer *timr)
{
	struct hrtimer *timer = &timr->it.real.timer;

	if (timr->it.real.interval.tv64 == 0)
		return;

	timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
					    timr->it.real.interval);

	timr->it_overrun_last = timr->it_overrun;
	timr->it_overrun = -1;
	++timr->it_requeue_pending;
	hrtimer_restart(timer);
}
Example #4
0
// Sets the hardware timer to go off at a specified time
void __rk_update_hw_timer_cpu(struct rk_timer *tmr, int cpunum)
{
 	cpu_tick_data_t now, val;
  	ktime_t delta, nowhr;
  
  	rk_rdtsc(&now);
 
#ifndef RK_GLOBAL_SCHED
	if (cpunum != smp_processor_id()) {
		printk("rk_update_hw_timer_cpu: ERROR - cpunum:%d, curcpu:%d, type:%d\n", cpunum, smp_processor_id(), tmr->tmr_type);
		return;
	}
#endif
 	if (tmr == NULL) {
		printk("rk_update_hw_timer: Called with a NULL timer\n");
		return;
  	}
  	if (tmr != NULL && tmr->tmr_expire < now) {
		// We are setting a timer in the past, do the best that we can
		val = RK_MIN_TIMER;
	}
	else {
		val = (tmr->tmr_expire - now);
		if (val < RK_MIN_TIMER) val = RK_MIN_TIMER;
	}

	delta = ns_to_ktime((u64)(val));
	nowhr = ptr_rk_virtual_timer(cpunum)->t.base->get_time();
	ptr_rk_virtual_timer(cpunum)->t.node.expires = nowhr;
	ptr_rk_virtual_timer(cpunum)->t._softexpires = nowhr;
	
	hrtimer_forward(&(ptr_rk_virtual_timer(cpunum)->t), nowhr, delta);

	// Note: Do not check hrtimer_callback_running, and use HRTIMER_NORESTART in rk_timer_isr()
	// - When we use HRTIMER_RESTART, __run_hrtimer() checks whether enqueuing happened 
	//   while serving timer handler (rk_timer_isr). 
	//   This is why we checked if the timer handler is running, using hrtimer_callback_running().
	// - However, it turned out that hrtimer_callback_running() is not reliable 
	//   when it is called by other CPUs. This may cause breaking the hrtimer constraint.
	// - When we use HRTIMER_NORESTART, we can avoid this issue. 
	//   There's no extra overhead for calling hrtimer_start() directly, 
	//   because reprogramming will eventually happen only once when the timer handler is running. 
	//if (!hrtimer_callback_running(&(ptr_rk_virtual_timer(cpunum)->t))) {
	hrtimer_start(&(ptr_rk_virtual_timer(cpunum)->t), ptr_rk_virtual_timer(cpunum)->t.node.expires, RK_HRTIMER_MODE);
  	//}

	//printk("U: %llu\n", ptr_rk_virtual_timer(cpunum)->_expires.tv64);
}
Example #5
0
static enum hrtimer_restart profile_spus(struct hrtimer *timer)
{
	ktime_t kt;
	int cpu, node, k, num_samples, spu_num;

	if (!spu_prof_running)
		goto stop;

	for_each_online_cpu(cpu) {
		if (cbe_get_hw_thread_id(cpu))
			continue;

		node = cbe_cpu_to_node(cpu);

		spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
				  oprof_spu_smpl_arry_lck_flags);
		num_samples = cell_spu_pc_collection(cpu);

		if (num_samples == 0) {
			spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
					       oprof_spu_smpl_arry_lck_flags);
			continue;
		}

		for (k = 0; k < SPUS_PER_NODE; k++) {
			spu_num = k + (node * SPUS_PER_NODE);
			spu_sync_buffer(spu_num,
					samples + (k * TRACE_ARRAY_SIZE),
					num_samples);
		}

		spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
				       oprof_spu_smpl_arry_lck_flags);

	}
	smp_wmb();	/* insure spu event buffer updates are written */
			

	kt = ktime_set(0, profiling_interval);
	if (!spu_prof_running)
		goto stop;
	hrtimer_forward(timer, timer->base->get_time(), kt);
	return HRTIMER_RESTART;

 stop:
	printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
	return HRTIMER_NORESTART;
}
Example #6
0
enum hrtimer_restart mycallback(struct hrtimer *psthrt)
{
	ktime_t s64current, s64interval;
	/* Reset Timer */
	s64current  = ktime_get();
	s64interval = ktime_set(0, gu64int);
	hrtimer_forward(psthrt, s64current, s64interval);

	/* Wake up my kthread */
	wake_up_process(gpkthmykth[0]);
	wake_up_process(gpkthmykth[1]);
	wake_up_process(gpkthmykth[2]);
	wake_up_process(gpkthmykth[3]);

	return HRTIMER_RESTART;
}
Example #7
0
/*
 * The timer is automagically restarted, when interval != 0
 */
int it_real_fn(struct hrtimer *timer)
{
	struct signal_struct *sig =
	    container_of(timer, struct signal_struct, real_timer);

	MARK(kernel_timer_expired, "%d", sig->tsk->pid);

	send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk);

	if (sig->it_real_incr.tv64 != 0) {
		hrtimer_forward(timer, timer->base->softirq_time,
				sig->it_real_incr);
		return HRTIMER_RESTART;
	}
	return HRTIMER_NORESTART;
}
static enum hrtimer_restart hrtimer_handle(struct hrtimer *cur_timer)
{
	struct timeval tv_cur;

	if(atomic_add_return(1, &g_cur_cnt[1]) >= g_loopcnt[1]) {
		/* print cur time in sec */
		do_gettimeofday(&tv_cur);
		printk("%s: cur sec %d\n", __func__, (int)tv_cur.tv_sec);

		/* clear g_cur_cnt[1] */
		atomic_set(&g_cur_cnt[1], 0);
	}

	/* if not call this, hrtimer_handle will called again and again */
	hrtimer_forward(cur_timer, cur_timer->base->get_time(), g_ktime);
	return HRTIMER_RESTART;
}
Example #9
0
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
	struct rt_bandwidth *rt_b =
		container_of(timer, struct rt_bandwidth, rt_period_timer);
	ktime_t now;
	int overrun;
	int idle = 0;

	for (;;) {
		now = hrtimer_cb_get_time(timer);
		overrun = hrtimer_forward(timer, now, rt_b->rt_period);

		if (!overrun)
			break;

		idle = do_sched_rt_period_timer(rt_b, overrun);
	}

	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
Example #10
0
/*
 * Get the time remaining on a POSIX.1b interval timer.  This function
 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
 * mess with irq.
 *
 * We have a couple of messes to clean up here.  First there is the case
 * of a timer that has a requeue pending.  These timers should appear to
 * be in the timer list with an expiry as if we were to requeue them
 * now.
 *
 * The second issue is the SIGEV_NONE timer which may be active but is
 * not really ever put in the timer list (to save system resources).
 * This timer may be expired, and if so, we will do it here.  Otherwise
 * it is the same as a requeue pending timer WRT to what we should
 * report.
 */
static void
common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
	ktime_t now, remaining, iv;
	struct hrtimer *timer = &timr->it.real.timer;

	memset(cur_setting, 0, sizeof(struct itimerspec));

	iv = timr->it.real.interval;

	/* interval timer ? */
	if (iv.tv64)
		cur_setting->it_interval = ktime_to_timespec(iv);
	else if (!hrtimer_active(timer) &&
		 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
		return;

	now = timer->base->get_time();

	/*
	 * When a requeue is pending or this is a SIGEV_NONE
	 * timer move the expiry time forward by intervals, so
	 * expiry is > now.
	 */
	if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
		timr->it_overrun += (unsigned int) hrtimer_forward(timer, now,
								   iv);

	remaining = ktime_sub(hrtimer_get_expires(timer), now);
	/* Return 0 only, when the timer is expired and not pending */
	if (remaining.tv64 <= 0) {
		/*
		 * A single shot SIGEV_NONE timer must return 0, when
		 * it is expired !
		 */
		if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
			cur_setting->it_value.tv_nsec = 1;
	} else
		cur_setting->it_value = ktime_to_timespec(remaining);
}
Example #11
0
enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
{
	struct snd_pcsp *chip = container_of(handle, struct snd_pcsp, timer);
	int pointer_update;
	u64 ns;

	if (!atomic_read(&chip->timer_active) || !chip->playback_substream)
		return HRTIMER_NORESTART;

	pointer_update = !chip->thalf;
	ns = pcsp_timer_update(chip);
	if (!ns) {
		printk(KERN_WARNING "PCSP: unexpected stop\n");
		return HRTIMER_NORESTART;
	}

	if (pointer_update)
		pcsp_pointer_update(chip);

	hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns));

	return HRTIMER_RESTART;
}
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;
#endif

	
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	if (regs) {
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		update_process_times(user_mode(regs));
		profile_tick(CPU_PROFILING);

		if ((rq_info.init == 1) && (tick_do_timer_cpu == cpu)) {

			update_rq_stats();

			wakeup_user();
		}
	}

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
Example #13
0
/*
 * Get the time remaining on a POSIX.1b interval timer.  This function
 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
 * mess with irq.
 *
 * We have a couple of messes to clean up here.  First there is the case
 * of a timer that has a requeue pending.  These timers should appear to
 * be in the timer list with an expiry as if we were to requeue them
 * now.
 *
 * The second issue is the SIGEV_NONE timer which may be active but is
 * not really ever put in the timer list (to save system resources).
 * This timer may be expired, and if so, we will do it here.  Otherwise
 * it is the same as a requeue pending timer WRT to what we should
 * report.
 */
static void
common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
	ktime_t remaining;
	struct hrtimer *timer = &timr->it.real.timer;

	memset(cur_setting, 0, sizeof(struct itimerspec));
	remaining = hrtimer_get_remaining(timer);

	/* Time left ? or timer pending */
	if (remaining.tv64 > 0 || hrtimer_active(timer))
		goto calci;
	/* interval timer ? */
	if (timr->it.real.interval.tv64 == 0)
		return;
	/*
	 * When a requeue is pending or this is a SIGEV_NONE timer
	 * move the expiry time forward by intervals, so expiry is >
	 * now.
	 */
	if (timr->it_requeue_pending & REQUEUE_PENDING ||
	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
		timr->it_overrun +=
			hrtimer_forward(timer, timr->it.real.interval);
		remaining = hrtimer_get_remaining(timer);
	}
 calci:
	/* interval timer ? */
	if (timr->it.real.interval.tv64 != 0)
		cur_setting->it_interval =
			ktime_to_timespec(timr->it.real.interval);
	/* Return 0 only, when the timer is expired and not pending */
	if (remaining.tv64 <= 0)
		cur_setting->it_value.tv_nsec = 1;
	else
		cur_setting->it_value = ktime_to_timespec(remaining);
}
Example #14
0
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
	struct k_itimer *timr;
	unsigned long flags;
	int si_private = 0;
	enum hrtimer_restart ret = HRTIMER_NORESTART;

	timr = container_of(timer, struct k_itimer, it.real.timer);
	spin_lock_irqsave(&timr->it_lock, flags);

	if (timr->it.real.interval.tv64 != 0)
		si_private = ++timr->it_requeue_pending;

	if (posix_timer_event(timr, si_private)) {
		if (timr->it.real.interval.tv64 != 0) {
			ktime_t now = hrtimer_cb_get_time(timer);

#ifdef CONFIG_HIGH_RES_TIMERS
			{
				ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);

				if (timr->it.real.interval.tv64 < kj.tv64)
					now = ktime_add(now, kj);
			}
#endif
			timr->it_overrun += (unsigned int)
				hrtimer_forward(timer, now,
						timr->it.real.interval);
			ret = HRTIMER_RESTART;
			++timr->it_requeue_pending;
		}
	}

	unlock_timer(timr, flags);
	return ret;
}
Example #15
0
enum hrtimer_restart mhi_initiate_M1(struct hrtimer *timer)
{
	int ret_val = 0;
	unsigned long flags;
	ktime_t curr_time, timer_inc;
	mhi_device_ctxt *mhi_dev_ctxt = container_of(timer,
			mhi_device_ctxt,
			m1_timer);
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);

	/* We will allow M1 if no data is pending, the current
	 * state is M0 and no M3 transition is pending */
	if ((0 == atomic_read(&mhi_dev_ctxt->flags.data_pending)) &&
			(MHI_STATE_M1 == mhi_dev_ctxt->mhi_state ||
			 MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) &&
			(0 == mhi_dev_ctxt->flags.pending_M3) &&
			mhi_dev_ctxt->flags.mhi_initialized &&
			(0 == atomic_read(&mhi_dev_ctxt->counters.outbound_acks))) {
		mhi_dev_ctxt->mhi_state = MHI_STATE_M1;
		ret_val = mhi_deassert_device_wake(mhi_dev_ctxt);
		mhi_dev_ctxt->counters.m0_m1++;
		if (ret_val)
			mhi_log(MHI_MSG_ERROR | MHI_DBG_POWER,
					"Could not set DEVICE WAKE GPIO LOW\n");
	}
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
			mhi_dev_ctxt->mhi_state == MHI_STATE_M1 ||
			mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
		curr_time = ktime_get();
		timer_inc = ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L);
		hrtimer_forward(timer, curr_time, timer_inc);
		return HRTIMER_RESTART;
	}
	return HRTIMER_NORESTART;
}
static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
{
	hrtimer_forward(&ts->sched_timer, now, tick_period);
	return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
}
Example #17
0
/*
 * This function gets called when a POSIX.1b interval timer expires.  It
 * is used as a callback from the kernel internal timer.  The
 * run_timer_list code ALWAYS calls with interrupts on.

 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
 */
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
{
	struct k_itimer *timr;
	unsigned long flags;
	int si_private = 0;
	enum hrtimer_restart ret = HRTIMER_NORESTART;

	timr = container_of(timer, struct k_itimer, it.real.timer);
	spin_lock_irqsave(&timr->it_lock, flags);

	if (timr->it.real.interval.tv64 != 0)
		si_private = ++timr->it_requeue_pending;

	if (posix_timer_event(timr, si_private)) {
		/*
		 * signal was not sent because of sig_ignor
		 * we will not get a call back to restart it AND
		 * it should be restarted.
		 */
		if (timr->it.real.interval.tv64 != 0) {
			ktime_t now = hrtimer_cb_get_time(timer);

			/*
			 * FIXME: What we really want, is to stop this
			 * timer completely and restart it in case the
			 * SIG_IGN is removed. This is a non trivial
			 * change which involves sighand locking
			 * (sigh !), which we don't want to do late in
			 * the release cycle.
			 *
			 * For now we just let timers with an interval
			 * less than a jiffie expire every jiffie to
			 * avoid softirq starvation in case of SIG_IGN
			 * and a very small interval, which would put
			 * the timer right back on the softirq pending
			 * list. By moving now ahead of time we trick
			 * hrtimer_forward() to expire the timer
			 * later, while we still maintain the overrun
			 * accuracy, but have some inconsistency in
			 * the timer_gettime() case. This is at least
			 * better than a starved softirq. A more
			 * complex fix which solves also another related
			 * inconsistency is already in the pipeline.
			 */
#ifdef CONFIG_HIGH_RES_TIMERS
			{
				ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);

				if (timr->it.real.interval.tv64 < kj.tv64)
					now = ktime_add(now, kj);
			}
#endif
			timr->it_overrun += (unsigned int)
				hrtimer_forward(timer, now,
						timr->it.real.interval);
			ret = HRTIMER_RESTART;
			++timr->it_requeue_pending;
		}
	}

	unlock_timer(timr, flags);
	return ret;
}
Example #18
0
/**
 * nohz_restart_sched_tick - restart the idle tick from the idle task
 *
 * Restart the idle tick when the CPU is woken up from idle
 */
void tick_nohz_restart_sched_tick(void)
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
	unsigned long ticks;
	ktime_t now, delta;

	if (!ts->tick_stopped)
		return;

	/* Update jiffies first */
	now = ktime_get();

	local_irq_disable();
	tick_do_update_jiffies64(now);
	cpu_clear(cpu, nohz_cpu_mask);

	/* Account the idle time */
	delta = ktime_sub(now, ts->idle_entrytime);
	ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);

	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX) {
		add_preempt_count(HARDIRQ_OFFSET);
		account_system_time(current, HARDIRQ_OFFSET,
				    jiffies_to_cputime(ticks));
		sub_preempt_count(HARDIRQ_OFFSET);
	}

	/*
	 * Cancel the scheduled timer and restore the tick
	 */
	ts->tick_stopped  = 0;
	hrtimer_cancel(&ts->sched_timer);
	ts->sched_timer.expires = ts->idle_tick;

	while (1) {
		/* Forward the time to expire in the future */
		hrtimer_forward(&ts->sched_timer, now, tick_period);

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer,
				      ts->sched_timer.expires,
				      HRTIMER_MODE_ABS);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				break;
		} else {
			if (!tick_program_event(ts->sched_timer.expires, 0))
				break;
		}
		/* Update jiffies and reread time */
		tick_do_update_jiffies64(now);
		now = ktime_get();
	}
	local_irq_enable();
}
Example #19
0
static void resubmit_it(struct hrtimer *var, struct kt_data *data)
{
	ktime_t now = var->base->get_time();
	data->start_time = jiffies;
	hrtimer_forward(var, now, data->period);
}
Example #20
0
__BEGIN_PROGRAM

/*
 *  kernel/sched/core.c
 *
 *  Kernel scheduler and related syscalls
 *
 *  Copyright (C) 1991-2002  Linus Torvalds
 *
 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
 *		make semaphores SMP safe
 *  1998-11-19	Implemented schedule_timeout() and related stuff
 *		by Andrea Arcangeli
 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
 *		hybrid priority-list and round-robin design with
 *		an array-switch method of distributing timeslices
 *		and per-CPU runqueues.  Cleanups and useful suggestions
 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
 *  2003-09-03	Interactivity tuning by Con Kolivas.
 *  2004-04-02	Scheduler domains code by Nick Piggin
 *  2007-04-15  Work begun on replacing all interactivity tuning with a
 *              fair scheduling design by Con Kolivas.
 *  2007-05-05  Load balancing (smp-nice) and other improvements
 *              by Peter Williams
 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
 *              Thomas Gleixner, Mike Kravetz
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/freezer.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/pid_namespace.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/tsacct_kern.h>
#include <linux/kprobes.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include <linux/init_task.h>
#include <linux/binfmts.h>

#include <asm/switch_to.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
#include <asm/mutex.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif

#include "sched.h"
#include "../workqueue_sched.h"
#include "../smpboot.h"

#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>

void start_bandwidth_timer(struct hrtimer period_timer , int period)
{
	unsigned long delta;
	int soft, hard, now;

	for (;;) {
		if (hrtimer_active(period_timer))
			break;

		now = hrtimer_cb_get_time(period_timer);
		hrtimer_forward(period_timer, now, period);

		soft = hrtimer_get_softexpires(period_timer);
		hard = hrtimer_get_expires(period_timer);
		delta = into_ns(ktime_sub(hard, soft));
		hrtimer_start_range_ns(period_timer, soft, delta,
					 HRTIMER_MODE_ABS_PINNED, 0);
	}
}