asmlinkage void __sched __ipipe_preempt_schedule_irq(void) { struct ipipe_percpu_domain_data *p; unsigned long flags; BUG_ON(!hard_irqs_disabled()); local_irq_save(flags); hard_local_irq_enable(); preempt_schedule_irq(); /* Ok, may reschedule now. */ hard_local_irq_disable(); /* * Flush any pending interrupt that may have been logged after * preempt_schedule_irq() stalled the root stage before * returning to us, and now. */ p = ipipe_this_cpu_root_context(); if (unlikely(__ipipe_ipending_p(p))) { add_preempt_count(PREEMPT_ACTIVE); trace_hardirqs_on(); __clear_bit(IPIPE_STALL_FLAG, &p->status); __ipipe_sync_stage(); sub_preempt_count(PREEMPT_ACTIVE); } __ipipe_restore_root_nosync(flags); }
asmlinkage extern void end_of_interrupt(int syscall,struct pt_regs* regs) { unsigned int flags; if(user_mode(regs)){ /*return to userpace*/ flags=atomic_get_thread_flags(); while(flags & _TIF_WORK_MASK){ if(flags & _TIF_NEED_RESCHED){ schedule(); flags=atomic_get_thread_flags(); continue; } do_notify_resume(syscall,flags,regs); break; } }else{ /*return to kernelspace*/ #ifdef CONFIG_PREEMPT local_irq_disable(); if(preempt_count()){ /* *FIXME: if (exception path) return; */ if(current_thread_info()->flags & _TIF_NEED_RESCHED){ preempt_schedule_irq(); } } #endif } return; }