static int xenoprof_is_xen_mode(struct vcpu *v, struct pt_regs *regs) { if (VMX_DOMAIN(v)) return !vmx_user_mode(regs); return ring_0(regs); }
static int nmi_callback(const struct cpu_user_regs *regs, int cpu) { int xen_mode, ovf; ovf = model->check_ctrs(cpu, &cpu_msrs[cpu], regs); xen_mode = ring_0(regs); if ( ovf && is_active(current->domain) && !xen_mode ) send_guest_vcpu_virq(current, VIRQ_XENOPROF); if ( ovf == 2 ) current->nmi_pending = 1; return 1; }
unsigned int compat_iret(void) { struct cpu_user_regs *regs = guest_cpu_user_regs(); struct vcpu *v = current; u32 eflags; /* Trim stack pointer to 32 bits. */ regs->rsp = (u32)regs->rsp; /* Restore EAX (clobbered by hypercall). */ if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) ) { domain_crash(v->domain); return 0; } /* Restore CS and EIP. */ if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) || unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) ) { domain_crash(v->domain); return 0; } /* * Fix up and restore EFLAGS. We fix up in a local staging area * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest. */ if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) ) { domain_crash(v->domain); return 0; } if ( VM_ASSIST(v->domain, architectural_iopl) ) v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL; regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF; if ( unlikely(eflags & X86_EFLAGS_VM) ) { /* * Cannot return to VM86 mode: inject a GP fault instead. Note that * the GP fault is reported on the first VM86 mode instruction, not on * the IRET (which is why we can simply leave the stack frame as-is * (except for perhaps having to copy it), which in turn seems better * than teaching create_bounce_frame() to needlessly deal with vm86 * mode frames). */ const struct trap_info *ti; u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40; unsigned int i; int rc = 0; gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n", regs->esp, ksp); if ( ksp < regs->esp ) { for (i = 1; i < 10; ++i) { rc |= __get_user(x, (u32 *)regs->rsp + i); rc |= __put_user(x, (u32 *)(unsigned long)ksp + i); } } else if ( ksp > regs->esp ) { for ( i = 9; i > 0; --i ) { rc |= __get_user(x, (u32 *)regs->rsp + i); rc |= __put_user(x, (u32 *)(unsigned long)ksp + i); } } if ( rc ) { domain_crash(v->domain); return 0; } regs->esp = ksp; regs->ss = v->arch.pv_vcpu.kernel_ss; ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault]; if ( TI_GET_IF(ti) ) eflags &= ~X86_EFLAGS_IF; regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF| X86_EFLAGS_NT|X86_EFLAGS_TF); if ( unlikely(__put_user(0, (u32 *)regs->rsp)) ) { domain_crash(v->domain); return 0; } regs->eip = ti->address; regs->cs = ti->cs; } else if ( unlikely(ring_0(regs)) ) { domain_crash(v->domain); return 0; } else if ( ring_1(regs) ) regs->esp += 16; /* Return to ring 2/3: restore ESP and SS. */ else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) || __get_user(regs->esp, (u32 *)regs->rsp + 4) ) { domain_crash(v->domain); return 0; } /* Restore upcall mask from supplied EFLAGS.IF. */ vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF); async_exception_cleanup(v); /* * The hypercall exit path will overwrite EAX with this return * value. */ return regs->eax; }