/* * Runstate accounting */ static void get_runstate_snapshot(struct vcpu_runstate_info *res) { u64 state_time; struct vcpu_runstate_info *state; BUG_ON(preemptible()); state = &__get_cpu_var(runstate); /* * The runstate info is always updated by the hypervisor on * the current CPU, so there's no need to use anything * stronger than a compiler barrier when fetching it. */ do { state_time = get64(&state->state_entry_time); barrier(); *res = *state; barrier(); } while (get64(&state->state_entry_time) != state_time); }
/** * preempt_schedule_context - preempt_schedule called by tracing * * The tracing infrastructure uses preempt_enable_notrace to prevent * recursion and tracing preempt enabling caused by the tracing * infrastructure itself. But as tracing can happen in areas coming * from userspace or just about to enter userspace, a preempt enable * can occur before user_exit() is called. This will cause the scheduler * to be called when the system is still in usermode. * * To prevent this, the preempt_enable_notrace will use this function * instead of preempt_schedule() to exit user context if needed before * calling the scheduler. */ asmlinkage void __sched notrace preempt_schedule_context(void) { enum ctx_state prev_ctx; if (likely(!preemptible())) return; /* * Need to disable preemption in case user_exit() is traced * and the tracer calls preempt_enable_notrace() causing * an infinite recursion. */ preempt_disable_notrace(); prev_ctx = exception_enter(); preempt_enable_no_resched_notrace(); preempt_schedule(); preempt_disable_notrace(); exception_exit(prev_ctx); preempt_enable_notrace(); }
struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) { struct mc_buffer *b = &__get_cpu_var(mc_buffer); struct multicall_space ret = { NULL, NULL }; BUG_ON(preemptible()); BUG_ON(b->argidx >= MC_ARGS); if (b->mcidx == 0) return ret; if (b->entries[b->mcidx - 1].op != op) return ret; if ((b->argidx + size) >= MC_ARGS) return ret; ret.mc = &b->entries[b->mcidx - 1]; ret.args = &b->args[b->argidx]; b->argidx += size; BUG_ON(b->argidx >= MC_ARGS); return ret; }
struct multicall_space __xen_mc_entry(size_t args) { struct mc_buffer *b = &__get_cpu_var(mc_buffer); struct multicall_space ret; unsigned argidx = roundup(b->argidx, sizeof(u64)); BUG_ON(preemptible()); BUG_ON(b->argidx > MC_ARGS); if (b->mcidx == MC_BATCH || (argidx + args) > MC_ARGS) { mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); xen_mc_flush(); argidx = roundup(b->argidx, sizeof(u64)); } ret.mc = &b->entries[b->mcidx]; b->mcidx++; ret.args = &b->args[argidx]; b->argidx = argidx + args; BUG_ON(b->argidx > MC_ARGS); return ret; }
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; unsigned long irq_stack_ptr; int skip; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; if (!try_get_task_stack(tsk)) return; /* * Switching between stacks is valid when tracing current and in * non-preemptible context. */ if (tsk == current && !preemptible()) irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); else irq_stack_ptr = 0; if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.pc = (unsigned long)dump_backtrace; } else { /* * task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = tsk->curr_ret_stack; #endif skip = !!regs; printk("Call trace:\n"); while (1) { unsigned long where = frame.pc; unsigned long stack; int ret; /* skip until specified stack frame */ if (!skip) { dump_backtrace_entry(where); } else if (frame.fp == regs->regs[29]) { skip = 0; /* * Mostly, this is the case where this function is * called in panic/abort. As exception handler's * stack frame does not contain the corresponding pc * at which an exception has taken place, use regs->pc * instead. */ dump_backtrace_entry(regs->pc); } ret = unwind_frame(tsk, &frame); if (ret < 0) break; stack = frame.sp; if (in_exception_text(where)) { /* * If we switched to the irq_stack before calling this * exception handler, then the pt_regs will be on the * task stack. The easiest way to tell is if the large * pt_regs would overlap with the end of the irq_stack. */ if (stack < irq_stack_ptr && (stack + sizeof(struct pt_regs)) > irq_stack_ptr) stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); dump_mem("", "Exception stack", stack, stack + sizeof(struct pt_regs)); } } put_task_stack(tsk); }
/** * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. * Must be called from non-preemptible context */ struct kvm_vcpu *kvm_arm_get_running_vcpu(void) { BUG_ON(preemptible()); return __this_cpu_read(kvm_arm_running_vcpu); }
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) { BUG_ON(preemptible()); __this_cpu_write(kvm_arm_running_vcpu, vcpu); }
void xen_setup_cpu_clockevents(void) { BUG_ON(preemptible()); clockevents_register_device(&__get_cpu_var(xen_clock_events)); }
void xen_mc_flush(void) { struct mc_buffer *b = &__get_cpu_var(mc_buffer); struct multicall_entry *mc; int ret = 0; unsigned long flags; int i; BUG_ON(preemptible()); /* Disable interrupts in case someone comes in and queues something in the middle */ local_irq_save(flags); trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); switch (b->mcidx) { case 0: /* no-op */ BUG_ON(b->argidx != 0); break; case 1: /* Singleton multicall - bypass multicall machinery and just do the call directly. */ mc = &b->entries[0]; mc->result = privcmd_call(mc->op, mc->args[0], mc->args[1], mc->args[2], mc->args[3], mc->args[4]); ret = mc->result < 0; break; default: #if MC_DEBUG memcpy(b->debug, b->entries, b->mcidx * sizeof(struct multicall_entry)); #endif if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) BUG(); for (i = 0; i < b->mcidx; i++) if (b->entries[i].result < 0) ret++; #if MC_DEBUG if (ret) { printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", ret, smp_processor_id()); dump_stack(); for (i = 0; i < b->mcidx; i++) { printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n", i+1, b->mcidx, b->debug[i].op, b->debug[i].args[0], b->entries[i].result, b->caller[i]); } } #endif } b->mcidx = 0; b->argidx = 0; for (i = 0; i < b->cbidx; i++) { struct callback *cb = &b->callbacks[i]; (*cb->fn)(cb->data); } b->cbidx = 0; local_irq_restore(flags); WARN_ON(ret); }
/* * AArch64 PCS assigns the frame pointer to x29. * * A simple function prologue looks like this: * sub sp, sp, #0x10 * stp x29, x30, [sp] * mov x29, sp * * A simple function epilogue looks like this: * mov sp, x29 * ldp x29, x30, [sp] * add sp, sp, #0x10 */ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) { unsigned long high, low; unsigned long fp = frame->fp; unsigned long irq_stack_ptr; /* * Switching between stacks is valid when tracing current and in * non-preemptible context. */ if (tsk == current && !preemptible()) irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); else irq_stack_ptr = 0; low = frame->sp; /* irq stacks are not THREAD_SIZE aligned */ if (on_irq_stack(frame->sp, raw_smp_processor_id())) high = irq_stack_ptr; else high = ALIGN(low, THREAD_SIZE) - 0x20; if (fp < low || fp > high || fp & 0xf) return -EINVAL; frame->sp = fp + 0x10; frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk && tsk->ret_stack && (frame->pc == (unsigned long)return_to_handler)) { /* * This is a case where function graph tracer has * modified a return address (LR) in a stack frame * to hook a function return. * So replace it to an original value. */ frame->pc = tsk->ret_stack[frame->graph--].ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* * Check whether we are going to walk through from interrupt stack * to task stack. * If we reach the end of the stack - and its an interrupt stack, * unpack the dummy frame to find the original elr. * * Check the frame->fp we read from the bottom of the irq_stack, * and the original task stack pointer are both in current->stack. */ if (frame->sp == irq_stack_ptr) { struct pt_regs *irq_args; unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); if (object_starts_on_stack((void *)orig_sp) && object_starts_on_stack((void *)frame->fp)) { frame->sp = orig_sp; /* orig_sp is the saved pt_regs, find the elr */ irq_args = (struct pt_regs *)orig_sp; frame->pc = irq_args->pc; } else { /* * This frame has a non-standard format, and we * didn't fix it, because the data looked wrong. * Refuse to output this frame. */ return -EINVAL; } } return 0; }