static int vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) { uint64_t val; uint32_t eax, edx; int error; val = 0; error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val); if (error != 0) { fprintf(stderr, "rdmsr to register %#x on vcpu %d\n", vme->u.msr.code, *pvcpu); if (strictmsr) { vm_inject_gp(ctx, *pvcpu); return (VMEXIT_CONTINUE); } } eax = val; error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax); assert(error == 0); edx = val >> 32; error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx); assert(error == 0); return (VMEXIT_CONTINUE); }
static int vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) { int error; error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval); if (error != 0) { fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n", vme->u.msr.code, vme->u.msr.wval, *pvcpu); if (strictmsr) { vm_inject_gp(ctx, *pvcpu); return (VMEXIT_CONTINUE); } } return (VMEXIT_CONTINUE); }
/* * Update the vcpu registers to reflect the state of the new task. * * Returns 0 on success. * Returns 1 if an exception was injected into the guest. * Returns -1 otherwise. */ static int tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts, uint16_t ot_sel, struct tss32 *tss, struct iovec *iov) { struct seg_desc seg_desc, seg_desc2; uint64_t *pdpte, maxphyaddr, reserved; uint32_t eflags; int error, i; bool nested; nested = false; if (ts->reason != TSR_IRET && ts->reason != TSR_JMP) { tss->tss_link = ot_sel; nested = true; } eflags = tss->tss_eflags; if (nested) eflags |= PSL_NT; /* LDTR */ SETREG(ctx, vcpu, VM_REG_GUEST_LDTR, tss->tss_ldt); /* PBDR */ if (ts->paging.paging_mode != PAGING_MODE_FLAT) { if (ts->paging.paging_mode == PAGING_MODE_PAE) { /* * XXX Assuming 36-bit MAXPHYADDR. */ maxphyaddr = (1UL << 36) - 1; pdpte = paddr_guest2host(ctx, tss->tss_cr3 & ~0x1f, 32); for (i = 0; i < 4; i++) { /* Check reserved bits if the PDPTE is valid */ if (!(pdpte[i] & 0x1)) continue; /* * Bits 2:1, 8:5 and bits above the processor's * maximum physical address are reserved. */ reserved = ~maxphyaddr | 0x1E6; if (pdpte[i] & reserved) { vm_inject_gp(ctx, vcpu); return (1); } } SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE0, pdpte[0]); SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE1, pdpte[1]); SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE2, pdpte[2]); SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE3, pdpte[3]); } SETREG(ctx, vcpu, VM_REG_GUEST_CR3, tss->tss_cr3); ts->paging.cr3 = tss->tss_cr3; } /* eflags and eip */ SETREG(ctx, vcpu, VM_REG_GUEST_RFLAGS, eflags); SETREG(ctx, vcpu, VM_REG_GUEST_RIP, tss->tss_eip); /* General purpose registers */ SETREG(ctx, vcpu, VM_REG_GUEST_RAX, tss->tss_eax); SETREG(ctx, vcpu, VM_REG_GUEST_RCX, tss->tss_ecx); SETREG(ctx, vcpu, VM_REG_GUEST_RDX, tss->tss_edx); SETREG(ctx, vcpu, VM_REG_GUEST_RBX, tss->tss_ebx); SETREG(ctx, vcpu, VM_REG_GUEST_RSP, tss->tss_esp); SETREG(ctx, vcpu, VM_REG_GUEST_RBP, tss->tss_ebp); SETREG(ctx, vcpu, VM_REG_GUEST_RSI, tss->tss_esi); SETREG(ctx, vcpu, VM_REG_GUEST_RDI, tss->tss_edi); /* Segment selectors */ SETREG(ctx, vcpu, VM_REG_GUEST_ES, tss->tss_es); SETREG(ctx, vcpu, VM_REG_GUEST_CS, tss->tss_cs); SETREG(ctx, vcpu, VM_REG_GUEST_SS, tss->tss_ss); SETREG(ctx, vcpu, VM_REG_GUEST_DS, tss->tss_ds); SETREG(ctx, vcpu, VM_REG_GUEST_FS, tss->tss_fs); SETREG(ctx, vcpu, VM_REG_GUEST_GS, tss->tss_gs); /* * If this is a nested task then write out the new TSS to update * the previous link field. */ if (nested) vm_copyout(ctx, vcpu, tss, iov, sizeof(*tss)); /* Validate segment descriptors */ error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_LDTR, &seg_desc); if (error) return (error); update_seg_desc(ctx, vcpu, VM_REG_GUEST_LDTR, &seg_desc); /* * Section "Checks on Guest Segment Registers", Intel SDM, Vol 3. * * The SS and CS attribute checks on VM-entry are inter-dependent so * we need to make sure that both segments are valid before updating * either of them. This ensures that the VMCS state can pass the * VM-entry checks so the guest can handle any exception injected * during task switch emulation. */ error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_CS, &seg_desc); if (error) return (error); error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_SS, &seg_desc2); if (error) return (error); update_seg_desc(ctx, vcpu, VM_REG_GUEST_CS, &seg_desc); update_seg_desc(ctx, vcpu, VM_REG_GUEST_SS, &seg_desc2); ts->paging.cpl = tss->tss_cs & SEL_RPL_MASK; error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_DS, &seg_desc); if (error) return (error); update_seg_desc(ctx, vcpu, VM_REG_GUEST_DS, &seg_desc); error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_ES, &seg_desc); if (error) return (error); update_seg_desc(ctx, vcpu, VM_REG_GUEST_ES, &seg_desc); error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_FS, &seg_desc); if (error) return (error); update_seg_desc(ctx, vcpu, VM_REG_GUEST_FS, &seg_desc); error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_GS, &seg_desc); if (error) return (error); update_seg_desc(ctx, vcpu, VM_REG_GUEST_GS, &seg_desc); return (0); }
int emulate_inout(int vcpu, struct vm_exit *vmexit, int strict) { int addrsize, bytes, flags, in, port, prot, rep; uint32_t eax, val; inout_func_t handler; void *arg; int error, fault, retval; enum vm_reg_name idxreg; uint64_t gla, index, iterations, count; struct vm_inout_str *vis; struct iovec iov[2]; bytes = vmexit->u.inout.bytes; in = vmexit->u.inout.in; port = vmexit->u.inout.port; assert(port < MAX_IOPORTS); assert(bytes == 1 || bytes == 2 || bytes == 4); handler = inout_handlers[port].handler; if (strict && handler == default_inout) return (-1); flags = inout_handlers[port].flags; arg = inout_handlers[port].arg; if (in) { if (!(flags & IOPORT_F_IN)) return (-1); } else { if (!(flags & IOPORT_F_OUT)) return (-1); } retval = 0; if (vmexit->u.inout.string) { vis = &vmexit->u.inout_str; rep = vis->inout.rep; addrsize = vis->addrsize; prot = in ? XHYVE_PROT_WRITE : XHYVE_PROT_READ; assert(addrsize == 2 || addrsize == 4 || addrsize == 8); /* Index register */ idxreg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; index = vis->index & vie_size2mask(addrsize); /* Count register */ count = vis->count & vie_size2mask(addrsize); /* Limit number of back-to-back in/out emulations to 16 */ iterations = min(count, 16); while (iterations > 0) { assert(retval == 0); if (vie_calculate_gla(vis->paging.cpu_mode, vis->seg_name, &vis->seg_desc, index, bytes, addrsize, prot, &gla)) { vm_inject_gp(vcpu); break; } error = xh_vm_copy_setup(vcpu, &vis->paging, gla, ((size_t) bytes), prot, iov, nitems(iov), &fault); if (error) { retval = -1; /* Unrecoverable error */ break; } else if (fault) { retval = 0; /* Resume guest to handle fault */ break; } if (vie_alignment_check(vis->paging.cpl, bytes, vis->cr0, vis->rflags, gla)) { vm_inject_ac(vcpu, 0); break; } val = 0; if (!in) xh_vm_copyin(iov, &val, ((size_t) bytes)); retval = handler(vcpu, in, port, bytes, &val, arg); if (retval != 0) break; if (in) xh_vm_copyout(&val, iov, ((size_t) bytes)); /* Update index */ if (vis->rflags & PSL_D) index -= ((uint64_t) bytes); else index += ((uint64_t) bytes); count--; iterations--; } /* Update index register */ error = update_register(vcpu, idxreg, index, addrsize); assert(error == 0); /* * Update count register only if the instruction had a repeat * prefix. */ if (rep) { error = update_register(vcpu, VM_REG_GUEST_RCX, count, addrsize); assert(error == 0); } /* Restart the instruction if more iterations remain */ if (retval == 0 && count != 0) { error = xh_vm_restart_instruction(vcpu); assert(error == 0); } } else { eax = vmexit->u.inout.eax; val = eax & vie_size2mask(bytes); retval = handler(vcpu, in, port, bytes, &val, arg); if (retval == 0 && in) { eax &= ~vie_size2mask(bytes); eax |= val & vie_size2mask(bytes); error = xh_vm_set_register(vcpu, VM_REG_GUEST_RAX, eax); assert(error == 0); } } return (retval); }