void __ipipe_do_sync_pipeline(struct ipipe_domain *top) { struct ipipe_percpu_domain_data *p; struct ipipe_domain *ipd; /* We must enter over the root domain. */ IPIPE_WARN_ONCE(__ipipe_current_domain != ipipe_root_domain); ipd = top; next: p = ipipe_this_cpu_context(ipd); if (test_bit(IPIPE_STALL_FLAG, &p->status)) return; if (__ipipe_ipending_p(p)) { if (ipd == ipipe_root_domain) __ipipe_sync_stage(); else { /* Switching to head. */ p->coflags &= ~__IPIPE_ALL_R; __ipipe_set_current_context(p); __ipipe_sync_stage(); __ipipe_set_current_domain(ipipe_root_domain); } } if (ipd != ipipe_root_domain) { ipd = ipipe_root_domain; goto next; } }
asmlinkage void __sched __ipipe_preempt_schedule_irq(void) { struct ipipe_percpu_domain_data *p; unsigned long flags; BUG_ON(!hard_irqs_disabled()); local_irq_save(flags); hard_local_irq_enable(); preempt_schedule_irq(); /* Ok, may reschedule now. */ hard_local_irq_disable(); /* * Flush any pending interrupt that may have been logged after * preempt_schedule_irq() stalled the root stage before * returning to us, and now. */ p = ipipe_this_cpu_root_context(); if (unlikely(__ipipe_ipending_p(p))) { add_preempt_count(PREEMPT_ACTIVE); trace_hardirqs_on(); __clear_bit(IPIPE_STALL_FLAG, &p->status); __ipipe_sync_stage(); sub_preempt_count(PREEMPT_ACTIVE); } __ipipe_restore_root_nosync(flags); }
void ___ipipe_sync_pipeline(void) { if (__ipipe_root_domain_p && test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) return; __ipipe_sync_stage(); }
void __ipipe_dispatch_irq_fast(unsigned int irq) /* hw interrupts off */ { struct ipipe_percpu_domain_data *p = ipipe_this_cpu_leading_context(), *old; struct ipipe_domain *head = p->domain; if (unlikely(test_bit(IPIPE_STALL_FLAG, &p->status))) { __ipipe_set_irq_pending(head, irq); return; } old = __ipipe_current_context; /* Switch to the head domain. */ __ipipe_set_current_context(p); p->irqall[irq]++; __set_bit(IPIPE_STALL_FLAG, &p->status); barrier(); if (likely(head != ipipe_root_domain)) { head->irqs[irq].handler(irq, head->irqs[irq].cookie); __ipipe_run_irqtail(irq); } else { if (ipipe_virtual_irq_p(irq)) { irq_enter(); head->irqs[irq].handler(irq, head->irqs[irq].cookie); irq_exit(); } else head->irqs[irq].handler(irq, head->irqs[irq].cookie); root_stall_after_handler(); } hard_local_irq_disable(); __clear_bit(IPIPE_STALL_FLAG, &p->status); if (__ipipe_current_context == p) { __ipipe_set_current_context(old); if (old == p) { if (__ipipe_ipending_p(p)) __ipipe_sync_stage(); return; } } /* * We must be running over the root domain, synchronize * the pipeline for high priority IRQs. */ __ipipe_do_sync_pipeline(head); }
void ipipe_unstall_root(void) { struct ipipe_percpu_domain_data *p; hard_local_irq_disable(); /* This helps catching bad usage from assembly call sites. */ ipipe_root_only(); p = ipipe_this_cpu_root_context(); __clear_bit(IPIPE_STALL_FLAG, &p->status); if (unlikely(__ipipe_ipending_p(p))) __ipipe_sync_stage(); hard_local_irq_enable(); }
asmlinkage int __ipipe_syscall_root(struct pt_regs regs) { ipipe_declare_cpuid; unsigned long flags; __fixup_if(®s); /* This routine either returns: 0 -- if the syscall is to be passed to Linux; >0 -- if the syscall should not be passed to Linux, and no tail work should be performed; <0 -- if the syscall should not be passed to Linux but the tail work has to be performed (for handling signals etc). */ if (__ipipe_syscall_watched_p(current, regs.orig_eax) && __ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,®s) > 0) { /* We might enter here over a non-root domain and exit * over the root one as a result of the syscall * (i.e. by recycling the register set of the current * context across the migration), so we need to fixup * the interrupt flag upon return too, so that * __ipipe_unstall_iret_root() resets the correct * stall bit on exit. */ __fixup_if(®s); if (ipipe_current_domain == ipipe_root_domain) { /* Sync pending VIRQs before _TIF_NEED_RESCHED * is tested. */ ipipe_lock_cpu(flags); if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) != 0) __ipipe_sync_stage(IPIPE_IRQMASK_VIRT); ipipe_unlock_cpu(flags); return -1; } return 1; } return 0; }