/* * We want the task to sleep while waiting for next job to become * ready, so we set the timer to the release instant and try to activate it. */ void start_timer_rtws(struct rq *rq, struct sched_rtws_entity *rtws_se) { ktime_t now, act; ktime_t soft, hard; unsigned long range; s64 delta; /* * We want the timer to fire at given release time, but considering * that it is actually coming from rq->clock and not from * hrtimer's time base reading. */ act = ns_to_ktime(rtws_se->job.release); now = hrtimer_cb_get_time(&rtws_se->timer); delta = ktime_to_ns(now) - rq->clock; act = ktime_add_ns(act, delta); hrtimer_set_expires(&rtws_se->timer, act); soft = hrtimer_get_softexpires(&rtws_se->timer); hard = hrtimer_get_expires(&rtws_se->timer); printk(KERN_INFO "expire: %Ld, soft: %Ld, hard: %Ld\n", ktime_to_ns(act), ktime_to_ns(soft), ktime_to_ns(hard)); range = ktime_to_ns(ktime_sub(hard, soft)); __hrtimer_start_range_ns(&rtws_se->timer, soft, range, HRTIMER_MODE_ABS, 0); }
static int dummy_hrtimer_start(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer); hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL); atomic_set(&dpcm->running, 1); return 0; }
static enum hrtimer_restart timer_callback( struct hrtimer* p_hrtimer ) { ktime_t now; pr_info( "%s\n", __FUNCTION__ ); // we need to modify the next fire jiffies....else beware :-) now = hrtimer_cb_get_time(p_hrtimer); hrtimer_forward( p_hrtimer, now, TIMER_INTERVAL_MS*1E6L ); return HRTIMER_RESTART; }
static enum hrtimer_restart lights_timer_cb(struct hrtimer *timer) { gpio_set_value(XMAS_OUT_0, lights_toggle); lights_toggle = lights_toggle ? 0 : 1; ktime_t kt_now; ktime_t kt_period = ktime_set(0, 1000 * (lights_toggle ? 20 : 10)); kt_now = hrtimer_cb_get_time(&lights_timer); hrtimer_forward(&lights_timer, kt_now, kt_period); return HRTIMER_RESTART; }
static snd_pcm_uframes_t dummy_hrtimer_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_hrtimer_pcm *dpcm = runtime->private_data; u64 delta; u32 pos; delta = ktime_us_delta(hrtimer_cb_get_time(&dpcm->timer), dpcm->base_time); delta = div_u64(delta * runtime->rate + 999999, 1000000); div_u64_rem(delta, runtime->buffer_size, &pos); return pos; }
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) { struct rt_bandwidth *rt_b = container_of(timer, struct rt_bandwidth, rt_period_timer); ktime_t now; int overrun; int idle = 0; for (;;) { now = hrtimer_cb_get_time(timer); overrun = hrtimer_forward(timer, now, rt_b->rt_period); if (!overrun) break; idle = do_sched_rt_period_timer(rt_b, overrun); } return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; }
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); if (timr->it.real.interval.tv64 != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { if (timr->it.real.interval.tv64 != 0) { ktime_t now = hrtimer_cb_get_time(timer); #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); if (timr->it.real.interval.tv64 < kj.tv64) now = ktime_add(now, kj); } #endif timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, timr->it.real.interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; } } unlock_timer(timr, flags); return ret; }
/* * This function gets called when a POSIX.1b interval timer expires. It * is used as a callback from the kernel internal timer. The * run_timer_list code ALWAYS calls with interrupts on. * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. */ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); if (timr->it.real.interval.tv64 != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it.real.interval.tv64 != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); if (timr->it.real.interval.tv64 < kj.tv64) now = ktime_add(now, kj); } #endif timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, timr->it.real.interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; } } unlock_timer(timr, flags); return ret; }
__BEGIN_PROGRAM /* * kernel/sched/core.c * * Kernel scheduler and related syscalls * * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: * hybrid priority-list and round-robin design with * an array-switch method of distributing timeslices * and per-CPU runqueues. Cleanups and useful suggestions * by Davide Libenzi, preemptible kernel bits by Robert Love. * 2003-09-03 Interactivity tuning by Con Kolivas. * 2004-04-02 Scheduler domains code by Nick Piggin * 2007-04-15 Work begun on replacing all interactivity tuning with a * fair scheduling design by Con Kolivas. * 2007-05-05 Load balancing (smp-nice) and other improvements * by Peter Williams * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, * Thomas Gleixner, Mike Kravetz */ #include <linux/mm.h> #include <linux/module.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/uaccess.h> #include <linux/highmem.h> #include <asm/mmu_context.h> #include <linux/interrupt.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/kernel_stat.h> #include <linux/debug_locks.h> #include <linux/perf_event.h> #include <linux/security.h> #include <linux/notifier.h> #include <linux/profile.h> #include <linux/freezer.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/pid_namespace.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/timer.h> #include <linux/rcupdate.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/percpu.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sysctl.h> #include <linux/syscalls.h> #include <linux/times.h> #include <linux/tsacct_kern.h> #include <linux/kprobes.h> #include <linux/delayacct.h> #include <linux/unistd.h> #include <linux/pagemap.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/debugfs.h> #include <linux/ctype.h> #include <linux/ftrace.h> #include <linux/slab.h> #include <linux/init_task.h> #include <linux/binfmts.h> #include <asm/switch_to.h> #include <asm/tlb.h> #include <asm/irq_regs.h> #include <asm/mutex.h> #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #endif #include "sched.h" #include "../workqueue_sched.h" #include "../smpboot.h" #define CREATE_TRACE_POINTS #include <trace/events/sched.h> void start_bandwidth_timer(struct hrtimer period_timer , int period) { unsigned long delta; int soft, hard, now; for (;;) { if (hrtimer_active(period_timer)) break; now = hrtimer_cb_get_time(period_timer); hrtimer_forward(period_timer, now, period); soft = hrtimer_get_softexpires(period_timer); hard = hrtimer_get_expires(period_timer); delta = into_ns(ktime_sub(hard, soft)); hrtimer_start_range_ns(period_timer, soft, delta, HRTIMER_MODE_ABS_PINNED, 0); } }