RTAI_SYSCALL_MODE void rt_remove_timer(struct rt_tasklet_struct *timer) { if (timer->next && timer->prev && timer->next != timer && timer->prev != timer) { spinlock_t *lock; unsigned long flags; flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); rem_timer(timer); rt_spin_unlock_irqrestore(flags, lock); asgn_min_prio(TIMER_CPUID); } }
static inline void set_timer_firing_time(struct rt_tasklet_struct *timer, RTIME firing_time) { if (timer->next != timer && timer->prev != timer) { spinlock_t *lock; unsigned long flags; timer->firing_time = firing_time; flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); rem_timer(timer); enq_timer(timer); rt_spin_unlock_irqrestore(flags, lock); } }
static void rt_timers_manager(long cpuid) { RTIME now; RT_TASK *timer_manager; struct rtdm_timer_struct *tmr, *timer, *timerl; spinlock_t *lock; unsigned long flags, timer_tol; int priority; timer_manager = &timers_manager[LIST_CPUID]; timerl = &timers_list[LIST_CPUID]; lock = &timers_lock[LIST_CPUID]; timer_tol = tuned.timers_tol[LIST_CPUID]; while (1) { rt_sleep_until((timerl->next)->firing_time); now = rt_get_time() + timer_tol; while (1) { tmr = timer = timerl; priority = RT_SCHED_LOWEST_PRIORITY; flags = rt_spin_lock_irqsave(lock); while ((tmr = tmr->next)->firing_time <= now) { if (tmr->priority < priority) { priority = (timer = tmr)->priority; } } rt_spin_unlock_irqrestore(flags, lock); if (timer == timerl) { if (timer_manager->priority > TimersManagerPrio) { timer_manager->priority = TimersManagerPrio; } break; } timer_manager->priority = priority; flags = rt_spin_lock_irqsave(lock); rem_timer(timer); if (timer->period) { timer->firing_time += timer->period; enq_timer(timer); } rt_spin_unlock_irqrestore(flags, lock); timer->handler(timer->data); } asgn_min_prio(LIST_CPUID); } }
static void rt_timers_manager(long cpuid) { RTIME now; RT_TASK *timer_manager; struct rt_tasklet_struct *tmr, *timer, *timerl; spinlock_t *lock; unsigned long flags, timer_tol; int priority, used_fpu; timer_manager = &timers_manager[LIST_CPUID]; timerl = &timers_list[LIST_CPUID]; lock = &timers_lock[LIST_CPUID]; timer_tol = tuned.timers_tol[LIST_CPUID]; while (1) { int retval; retval = rt_sleep_until((timerl->next)->firing_time); // now = timer_manager->resume_time + timer_tol; now = rt_get_time() + timer_tol; // find all the timers to be fired, in priority order while (1) { used_fpu = 0; tmr = timer = timerl; priority = RT_SCHED_LOWEST_PRIORITY; flags = rt_spin_lock_irqsave(lock); while ((tmr = tmr->next)->firing_time <= now) { if (tmr->priority < priority) { priority = (timer = tmr)->priority; } } rt_spin_unlock_irqrestore(flags, lock); if (timer == timerl) { if (timer_manager->priority > TimersManagerPrio) { timer_manager->priority = TimersManagerPrio; } break; } timer_manager->priority = priority; #if 1 flags = rt_spin_lock_irqsave(lock); rem_timer(timer); if (timer->period) { timer->firing_time += timer->period; enq_timer(timer); } rt_spin_unlock_irqrestore(flags, lock); #else if (!timer->period) { flags = rt_spin_lock_irqsave(lock); rem_timer(timer); rt_spin_unlock_irqrestore(flags, lock); } else { set_timer_firing_time(timer, timer->firing_time + timer->period); } #endif // if (retval != RTE_TMROVRN) { tmr->overrun = 0; if (!timer->task) { if (!used_fpu && timer->uses_fpu) { used_fpu = 1; save_fpcr_and_enable_fpu(linux_cr0); save_fpenv(timer_manager->fpu_reg); } timer->handler(timer->data); } else { rt_task_resume(timer->task); } // } else { // tmr->overrun++; // } } if (used_fpu) { restore_fpenv(timer_manager->fpu_reg); restore_fpcr(linux_cr0); } // set next timers_manager priority according to the highest priority timer asgn_min_prio(LIST_CPUID); // if no more timers in timers_struct remove timers_manager from tasks list } }