/** * main - entry point for SPU-side context restore. * * This code deviates from the documented sequence in the * following aspects: * * 1. The EA for LSCSA is passed from PPE in the * signal notification channels. * 2. The register spill area is pulled by SPU * into LS, rather than pushed by PPE. * 3. All 128 registers are restored by exit(). * 4. The exit() function is modified at run * time in order to properly restore the * SPU_Status register. */ int main() { addr64 lscsa_ea; lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1); lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2); fetch_regs_from_mem(lscsa_ea); set_event_mask(); /* Step 1. */ set_tag_mask(); /* Step 2. */ build_dma_list(lscsa_ea); /* Step 3. */ restore_upper_240kb(lscsa_ea); /* Step 4. */ /* Step 5: done by 'exit'. */ enqueue_putllc(lscsa_ea); /* Step 7. */ set_tag_update(); /* Step 8. */ read_tag_status(); /* Step 9. */ restore_decr(); /* moved Step 6. */ read_llar_status(); /* Step 10. */ write_ppu_mb(); /* Step 11. */ write_ppuint_mb(); /* Step 12. */ restore_fpcr(); /* Step 13. */ restore_srr0(); /* Step 14. */ restore_event_mask(); /* Step 15. */ restore_tag_mask(); /* Step 16. */ /* Step 17. done by 'exit'. */ restore_complete(); /* Step 18. */ return 0; }
void c_handler (void) { hal_root_domain->irqs[IRQ].acknowledge(IRQ); save_fpcr_and_enable_fpu(cr0); save_fpenv(saved_fpu_reg); restore_fpenv(my_fpu_reg); ++cnt; ++fcnt; save_fpenv(my_fpu_reg); restore_fpenv(saved_fpu_reg); restore_fpcr(cr0); rt_pend_linux_irq(IRQ); }
int _init_module(void) { unsigned long flags; init_timer(&timer); timer.function = timer_fun; mod_timer(&timer, jiffies + ECHO_PERIOD*HZ); printk("TIMER IRQ/VECTOR %d/%d\n", IRQ, vector); save_fpcr_and_enable_fpu(cr0); save_fpenv(my_fpu_reg); restore_fpcr(cr0); flags = hal_critical_enter(NULL); desc = rtai_set_gate_vector(vector, 14, 0, asm_handler); hal_critical_exit(flags); return 0; }
static void rt_timers_manager(long cpuid) { RTIME now; RT_TASK *timer_manager; struct rt_tasklet_struct *tmr, *timer, *timerl; spinlock_t *lock; unsigned long flags, timer_tol; int priority, used_fpu; timer_manager = &timers_manager[LIST_CPUID]; timerl = &timers_list[LIST_CPUID]; lock = &timers_lock[LIST_CPUID]; timer_tol = tuned.timers_tol[LIST_CPUID]; while (1) { int retval; retval = rt_sleep_until((timerl->next)->firing_time); // now = timer_manager->resume_time + timer_tol; now = rt_get_time() + timer_tol; // find all the timers to be fired, in priority order while (1) { used_fpu = 0; tmr = timer = timerl; priority = RT_SCHED_LOWEST_PRIORITY; flags = rt_spin_lock_irqsave(lock); while ((tmr = tmr->next)->firing_time <= now) { if (tmr->priority < priority) { priority = (timer = tmr)->priority; } } rt_spin_unlock_irqrestore(flags, lock); if (timer == timerl) { if (timer_manager->priority > TimersManagerPrio) { timer_manager->priority = TimersManagerPrio; } break; } timer_manager->priority = priority; #if 1 flags = rt_spin_lock_irqsave(lock); rem_timer(timer); if (timer->period) { timer->firing_time += timer->period; enq_timer(timer); } rt_spin_unlock_irqrestore(flags, lock); #else if (!timer->period) { flags = rt_spin_lock_irqsave(lock); rem_timer(timer); rt_spin_unlock_irqrestore(flags, lock); } else { set_timer_firing_time(timer, timer->firing_time + timer->period); } #endif // if (retval != RTE_TMROVRN) { tmr->overrun = 0; if (!timer->task) { if (!used_fpu && timer->uses_fpu) { used_fpu = 1; save_fpcr_and_enable_fpu(linux_cr0); save_fpenv(timer_manager->fpu_reg); } timer->handler(timer->data); } else { rt_task_resume(timer->task); } // } else { // tmr->overrun++; // } } if (used_fpu) { restore_fpenv(timer_manager->fpu_reg); restore_fpcr(linux_cr0); } // set next timers_manager priority according to the highest priority timer asgn_min_prio(LIST_CPUID); // if no more timers in timers_struct remove timers_manager from tasks list } }