static void __ipipe_do_work(unsigned int virq, void *cookie) { struct ipipe_work_header *work; unsigned long flags; void *curr, *tail; int cpu; /* * Work is dispatched in enqueuing order. This interrupt * context can't migrate to another CPU. */ cpu = smp_processor_id(); curr = per_cpu(work_buf, cpu); for (;;) { flags = hard_local_irq_save(); tail = per_cpu(work_tail, cpu); if (curr == tail) { per_cpu(work_tail, cpu) = per_cpu(work_buf, cpu); hard_local_irq_restore(flags); return; } work = curr; curr += work->size; hard_local_irq_restore(flags); work->handler(work); } }
int __ipipe_notify_trap(int exception, struct pt_regs *regs) { struct ipipe_percpu_domain_data *p; struct ipipe_trap_data data; unsigned long flags; int ret = 0; flags = hard_local_irq_save(); /* * We send a notification about all traps raised over a * registered head domain only. */ if (__ipipe_root_p) goto out; p = ipipe_this_cpu_head_context(); if (likely(p->coflags & __IPIPE_TRAP_E)) { p->coflags |= __IPIPE_TRAP_R; hard_local_irq_restore(flags); data.exception = exception; data.regs = regs; ret = ipipe_trap_hook(&data); flags = hard_local_irq_save(); p->coflags &= ~__IPIPE_TRAP_R; } out: hard_local_irq_restore(flags); return ret; }
void __ipipe_spin_unlock_irqrestore(ipipe_spinlock_t *lock, unsigned long x) { arch_spin_unlock(&lock->arch_lock); if (!arch_demangle_irq_bits(&x)) __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); hard_local_irq_restore(x); }
void __ipipe_unlock_root(void) { unsigned long *p, flags; flags = hard_local_irq_save(); p = &__ipipe_root_status; __clear_bit(IPIPE_SYNCDEFER_FLAG, p); hard_local_irq_restore(flags); }
void __ipipe_complete_domain_migration(void) { unsigned long flags; ipipe_root_only(); flags = hard_local_irq_save(); complete_domain_migration(); hard_local_irq_restore(flags); }
/* * We could use standard atomic bitops in the following root status * manipulation routines, but let's prepare for SMP support in the * same move, preventing CPU migration as required. */ void __ipipe_stall_root(void) { unsigned long *p, flags; flags = hard_local_irq_save(); p = &__ipipe_root_status; __set_bit(IPIPE_STALL_FLAG, p); hard_local_irq_restore(flags); }
void ipipe_critical_exit(unsigned long flags) { if (num_online_cpus() == 1) { hard_local_irq_restore(flags); return; } #ifdef CONFIG_SMP if (atomic_dec_and_test(&__ipipe_critical_count)) { spin_unlock(&__ipipe_cpu_barrier); while (!cpus_empty(__ipipe_cpu_sync_map)) cpu_relax(); cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map); clear_bit(0, &__ipipe_critical_lock); smp_mb__after_clear_bit(); } #endif /* CONFIG_SMP */ hard_local_irq_restore(flags); }
unsigned long __ipipe_test_and_stall_root(void) { unsigned long *p, flags; int x; flags = hard_local_irq_save(); p = &__ipipe_root_status; x = __test_and_set_bit(IPIPE_STALL_FLAG, p); hard_local_irq_restore(flags); return x; }
int __ipipe_spin_trylock_irq(ipipe_spinlock_t *lock) { unsigned long flags; flags = hard_local_irq_save(); if (!arch_spin_trylock(&lock->arch_lock)) { hard_local_irq_restore(flags); return 0; } __set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); return 1; }
int __ipipe_notify_kevent(int kevent, void *data) { struct ipipe_percpu_domain_data *p; unsigned long flags; int ret = 0; ipipe_root_only(); flags = hard_local_irq_save(); p = ipipe_this_cpu_root_context(); if (likely(p->coflags & __IPIPE_KEVENT_E)) { p->coflags |= __IPIPE_KEVENT_R; hard_local_irq_restore(flags); ret = ipipe_kevent_hook(kevent, data); flags = hard_local_irq_save(); p->coflags &= ~__IPIPE_KEVENT_R; } hard_local_irq_restore(flags); return ret; }
int __ipipe_notify_syscall(struct pt_regs *regs) { struct ipipe_domain *caller_domain, *this_domain, *ipd; struct ipipe_percpu_domain_data *p; unsigned long flags; int ret = 0; flags = hard_local_irq_save(); caller_domain = this_domain = __ipipe_current_domain; ipd = ipipe_head_domain; next: p = ipipe_this_cpu_context(ipd); if (likely(p->coflags & __IPIPE_SYSCALL_E)) { __ipipe_set_current_context(p); p->coflags |= __IPIPE_SYSCALL_R; hard_local_irq_restore(flags); ret = ipipe_syscall_hook(caller_domain, regs); flags = hard_local_irq_save(); p->coflags &= ~__IPIPE_SYSCALL_R; if (__ipipe_current_domain != ipd) /* Account for domain migration. */ this_domain = __ipipe_current_domain; else __ipipe_set_current_domain(this_domain); } if (this_domain == ipipe_root_domain && ipd != ipipe_root_domain && ret == 0) { ipd = ipipe_root_domain; goto next; } hard_local_irq_restore(flags); return ret; }
int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock, unsigned long *x) { unsigned long flags; int s; flags = hard_local_irq_save(); if (!arch_spin_trylock(&lock->arch_lock)) { hard_local_irq_restore(flags); return 0; } s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); *x = arch_mangle_irq_bits(s, flags); return 1; }
/* * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline * just like if it has been actually received from a hw source. Also * works for virtual interrupts. */ int ipipe_trigger_irq(unsigned irq) { unsigned long flags; #ifdef CONFIG_IPIPE_DEBUG if (irq >= IPIPE_NR_IRQS || (ipipe_virtual_irq_p(irq) && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))) return -EINVAL; #endif flags = hard_local_irq_save(); __ipipe_handle_irq(irq, NULL); hard_local_irq_restore(flags); return 1; }
asmlinkage void __ipipe_sync_root(void) { void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; struct ipipe_percpu_domain_data *p; unsigned long flags; BUG_ON(irqs_disabled()); flags = hard_local_irq_save(); if (irq_tail_hook) irq_tail_hook(); clear_thread_flag(TIF_IRQ_SYNC); p = ipipe_root_cpudom_ptr(); if (__ipipe_ipending_p(p)) __ipipe_sync_pipeline(); hard_local_irq_restore(flags); }
void __ipipe_spin_unlock_irqcomplete(unsigned long x) { if (!arch_demangle_irq_bits(&x)) __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); hard_local_irq_restore(x); }