void c_handler (void) { hal_root_domain->irqs[IRQ].acknowledge(IRQ); save_fpcr_and_enable_fpu(cr0); save_fpenv(saved_fpu_reg); restore_fpenv(my_fpu_reg); ++cnt; ++fcnt; save_fpenv(my_fpu_reg); restore_fpenv(saved_fpu_reg); restore_fpcr(cr0); rt_pend_linux_irq(IRQ); }
static void rt_timers_manager(int dummy) { static unsigned long cr0; RTIME now; struct rt_tasklet_struct *tmr, *timer; unsigned long flags; int priority, used_fpu; while (1) { rt_sleep_until((timers_list.next)->firing_time); now = timers_manager.resume_time + tuned.timers_tol[0]; // find all the timers to be fired, in priority order while (1) { used_fpu = 0; tmr = timer = &timers_list; priority = RT_LOWEST_PRIORITY; flags = rt_spin_lock_irqsave(&timers_lock); while ((tmr = tmr->next)->firing_time <= now) { if (tmr->priority < priority) { priority = (timer = tmr)->priority; } } timers_manager.priority = priority; rt_spin_unlock_irqrestore(flags, &timers_lock); if (timer == &timers_list) { break; } if (!timer->period) { flags = rt_spin_lock_irqsave(&timers_lock); (timer->next)->prev = timer->prev; (timer->prev)->next = timer->next; timer->next = timer->prev = timer; rt_spin_unlock_irqrestore(flags, &timers_lock); } else { set_timer_firing_time(timer, timer->firing_time + timer->period); } if (!timer->task) { if (!used_fpu && timer->uses_fpu) { used_fpu = 1; save_cr0_and_clts(cr0); save_fpenv(timers_manager.fpu_reg); } timer->handler(timer->data); } else { rt_task_resume(timer->task); } } if (used_fpu) { restore_fpenv(timers_manager.fpu_reg); restore_cr0(cr0); } // set next timers_manager priority according to the highest priority timer asgn_min_prio(); // if no more timers in timers_struct remove timers_manager from tasks list } }
int _init_module(void) { unsigned long flags; init_timer(&timer); timer.function = timer_fun; mod_timer(&timer, jiffies + ECHO_PERIOD*HZ); printk("TIMER IRQ/VECTOR %d/%d\n", IRQ, vector); save_fpcr_and_enable_fpu(cr0); save_fpenv(my_fpu_reg); restore_fpcr(cr0); flags = hal_critical_enter(NULL); desc = rtai_set_gate_vector(vector, 14, 0, asm_handler); hal_critical_exit(flags); return 0; }
static void rt_timers_manager(long cpuid) { RTIME now; RT_TASK *timer_manager; struct rt_tasklet_struct *tmr, *timer, *timerl; spinlock_t *lock; unsigned long flags, timer_tol; int priority, used_fpu; timer_manager = &timers_manager[LIST_CPUID]; timerl = &timers_list[LIST_CPUID]; lock = &timers_lock[LIST_CPUID]; timer_tol = tuned.timers_tol[LIST_CPUID]; while (1) { int retval; retval = rt_sleep_until((timerl->next)->firing_time); // now = timer_manager->resume_time + timer_tol; now = rt_get_time() + timer_tol; // find all the timers to be fired, in priority order while (1) { used_fpu = 0; tmr = timer = timerl; priority = RT_SCHED_LOWEST_PRIORITY; flags = rt_spin_lock_irqsave(lock); while ((tmr = tmr->next)->firing_time <= now) { if (tmr->priority < priority) { priority = (timer = tmr)->priority; } } rt_spin_unlock_irqrestore(flags, lock); if (timer == timerl) { if (timer_manager->priority > TimersManagerPrio) { timer_manager->priority = TimersManagerPrio; } break; } timer_manager->priority = priority; #if 1 flags = rt_spin_lock_irqsave(lock); rem_timer(timer); if (timer->period) { timer->firing_time += timer->period; enq_timer(timer); } rt_spin_unlock_irqrestore(flags, lock); #else if (!timer->period) { flags = rt_spin_lock_irqsave(lock); rem_timer(timer); rt_spin_unlock_irqrestore(flags, lock); } else { set_timer_firing_time(timer, timer->firing_time + timer->period); } #endif // if (retval != RTE_TMROVRN) { tmr->overrun = 0; if (!timer->task) { if (!used_fpu && timer->uses_fpu) { used_fpu = 1; save_fpcr_and_enable_fpu(linux_cr0); save_fpenv(timer_manager->fpu_reg); } timer->handler(timer->data); } else { rt_task_resume(timer->task); } // } else { // tmr->overrun++; // } } if (used_fpu) { restore_fpenv(timer_manager->fpu_reg); restore_fpcr(linux_cr0); } // set next timers_manager priority according to the highest priority timer asgn_min_prio(LIST_CPUID); // if no more timers in timers_struct remove timers_manager from tasks list } }