void __ipipe_spin_unlock_debug(unsigned long flags) { /* * We catch a nasty issue where spin_unlock_irqrestore() on a * regular kernel spinlock is about to re-enable hw interrupts * in a section entered with hw irqs off. This is clearly the * sign of a massive breakage coming. Usual suspect is a * regular spinlock which was overlooked, used within a * section which must run with hw irqs disabled. */ WARN_ON_ONCE(!raw_irqs_disabled_flags(flags) && hard_irqs_disabled()); }
void __ipipe_restore_root_nosync(unsigned long x) { struct ipipe_percpu_domain_data *p = ipipe_this_cpu_root_context(); if (raw_irqs_disabled_flags(x)) { __set_bit(IPIPE_STALL_FLAG, &p->status); trace_hardirqs_off(); } else { trace_hardirqs_on(); __clear_bit(IPIPE_STALL_FLAG, &p->status); } }
int __ipipe_divert_exception(struct pt_regs *regs, int vector) { bool root_entry = false; unsigned long flags = 0; if (ipipe_root_domain_p) { root_entry = true; local_save_flags(flags); if (irqs_disabled_hw()) { /* * Same root state handling as in * __ipipe_handle_exception. */ local_irq_disable(); } } #ifdef CONFIG_KGDB /* catch int1 and int3 over non-root domains */ else { #ifdef CONFIG_X86_32 if (vector != ex_do_device_not_available) #endif { unsigned int condition = 0; if (vector == 1) get_debugreg(condition, 6); if (!kgdb_handle_exception(vector, SIGTRAP, condition, regs)) return 1; } } #endif /* CONFIG_KGDB */ if (unlikely(ipipe_trap_notify(vector, regs))) { if (root_entry) local_irq_restore_nosync(flags); return 1; } /* see __ipipe_handle_exception */ if (likely(ipipe_root_domain_p)) __fixup_if(root_entry ? raw_irqs_disabled_flags(flags) : raw_irqs_disabled(), regs); /* * No need to restore root state in the 64-bit case, the Linux handler * and the return code will take care of it. */ return 0; }
int notrace __ipipe_check_percpu_access(void) { struct ipipe_percpu_domain_data *p; struct ipipe_domain *this_domain; unsigned long flags; int ret = 0; flags = hard_local_irq_save_notrace(); /* * Don't use __ipipe_current_domain here, this would recurse * indefinitely. */ this_domain = __this_cpu_read(ipipe_percpu.curr)->domain; /* * Only the root domain may implement preemptive CPU migration * of tasks, so anything above in the pipeline should be fine. */ if (this_domain != ipipe_root_domain) goto out; if (raw_irqs_disabled_flags(flags)) goto out; /* * Last chance: hw interrupts were enabled on entry while * running over the root domain, but the root stage might be * currently stalled, in which case preemption would be * disabled, and no migration could occur. */ if (this_domain == ipipe_root_domain) { p = ipipe_this_cpu_root_context(); if (test_bit(IPIPE_STALL_FLAG, &p->status)) goto out; } /* * Our caller may end up accessing the wrong per-cpu variable * instance due to CPU migration; tell it to complain about * this. */ ret = 1; out: hard_local_irq_restore_notrace(flags); return ret; }
int __ipipe_handle_exception(struct pt_regs *regs, long error_code, int vector) { bool root_entry = false; unsigned long flags = 0; unsigned long cr2 = 0; if (ipipe_root_domain_p) { root_entry = true; local_save_flags(flags); /* * Replicate hw interrupt state into the virtual mask * before calling the I-pipe event handler over the * root domain. Also required later when calling the * Linux exception handler. */ if (irqs_disabled_hw()) local_irq_disable(); } #ifdef CONFIG_KGDB /* catch exception KGDB is interested in over non-root domains */ else if (__ipipe_xlate_signo[vector] >= 0 && !kgdb_handle_exception(vector, __ipipe_xlate_signo[vector], error_code, regs)) return 1; #endif /* CONFIG_KGDB */ if (vector == ex_do_page_fault) cr2 = native_read_cr2(); if (unlikely(ipipe_trap_notify(vector, regs))) { if (root_entry) local_irq_restore_nosync(flags); return 1; } if (likely(ipipe_root_domain_p)) { /* * If root is not the topmost domain or in case we faulted in * the iret path of x86-32, regs.flags does not match the root * domain state. The fault handler or the low-level return * code may evaluate it. So fix this up, either by the root * state sampled on entry or, if we migrated to root, with the * current state. */ __fixup_if(root_entry ? raw_irqs_disabled_flags(flags) : raw_irqs_disabled(), regs); } else { /* Detect unhandled faults over non-root domains. */ struct ipipe_domain *ipd = ipipe_current_domain; /* Switch to root so that Linux can handle the fault cleanly. */ __ipipe_current_domain = ipipe_root_domain; ipipe_trace_panic_freeze(); /* Always warn about user land and unfixable faults. */ if (user_mode_vm(regs) || !search_exception_tables(instruction_pointer(regs))) { printk(KERN_ERR "BUG: Unhandled exception over domain" " %s at 0x%lx - switching to ROOT\n", ipd->name, instruction_pointer(regs)); dump_stack(); ipipe_trace_panic_dump(); #ifdef CONFIG_IPIPE_DEBUG /* Also report fixable ones when debugging is enabled. */ } else { printk(KERN_WARNING "WARNING: Fixable exception over " "domain %s at 0x%lx - switching to ROOT\n", ipd->name, instruction_pointer(regs)); dump_stack(); ipipe_trace_panic_dump(); #endif /* CONFIG_IPIPE_DEBUG */ } } if (vector == ex_do_page_fault) write_cr2(cr2); __ipipe_std_extable[vector](regs, error_code); /* * Relevant for 64-bit: Restore root domain state as the low-level * return code will not align it to regs.flags. */ if (root_entry) local_irq_restore_nosync(flags); return 0; }