Beispiel #1
0
/*
** Hardware exec traps are checked before
** insn execution. But hardware data, i/o
** and single-step traps are checked after.
**
** If we emulated an insn, we may loose
** a #DB condition, so take care here.
**
** We do not inject #DB, we use pending db
*/
static int vmx_db_check_pending_any()
{
   if(!__vmexit_on_insn())
   {
#ifdef CONFIG_VMX_DB_DBG
      if(vm_state.rflags.tf)
      {
         vmcs_read(vm_state.activity);
         vmcs_read(vm_state.interrupt);
         debug(VMX_DB,
               "TF is set, pending #DB: be:%d bs:%d sti:%d mss:%d activity:0x%x\n"
               ,vm_state.dbg_excp.be, vm_state.dbg_excp.bs
               ,vm_state.interrupt.sti, vm_state.interrupt.mss
               ,vm_state.activity.raw);
      }
#endif
      return VM_IGNORE;
   }

   if(vmx_db_check_pending_stp() == VM_DONE)
   {
      debug(VMX_DB, "pending #DB: set stp\n");
      vm_state.dbg_excp.bs = 1;
      vmcs_dirty(vm_state.dbg_excp);
   }

   /* XXX: missing data/io */

   return VM_DONE;
}
Beispiel #2
0
/* Vol 3C Sections 26.6.3, 32.2, 25.5.2, 26.5.2
**
** no pending DB delivered after vm entry if:
** - vm entry is vectoring external int, nmi, hard excp, soft pvl excp
** - no mov ss and vectoring soft int or soft excp
** - vm entry is not vectoring, activity == shutdown or wait sipi
*/
static int vmx_db_pending_discarded()
{
   if(!vm_entry_ctrls.int_info.v)
   {
      vmcs_read(vm_state.activity);
      if(vm_state.activity.raw == VMX_VMCS_GUEST_ACTIVITY_STATE_SHUTDOWN ||
         vm_state.activity.raw == VMX_VMCS_GUEST_ACTIVITY_STATE_SIPI)
         return 1;
   }
   else
   {
      switch(vm_entry_ctrls.int_info.type)
      {
      case VMCS_EVT_INFO_TYPE_NMI:
      case VMCS_EVT_INFO_TYPE_HW_INT:
      case VMCS_EVT_INFO_TYPE_HW_EXCP:
      case VMCS_EVT_INFO_TYPE_PS_EXCP:
         return 1;

      case VMCS_EVT_INFO_TYPE_SW_INT:
      case VMCS_EVT_INFO_TYPE_SW_EXCP:
         vmcs_read(vm_state.interrupt);
         if(!vm_state.interrupt.mss)
            return 1;
      }
   }

   return 0;
}
Beispiel #3
0
int vmx_vmexit_resolve_ept_viol()
{
   vmcs_read(vm_exit_info.qualification);
   info->vm.cpu.fault.npf.err.raw = vm_exit_info.qualification.raw;

   vmcs_read(vm_exit_info.guest_physical);
   info->vm.cpu.fault.npf.paddr = vm_exit_info.guest_physical.raw;

   if(info->vm.cpu.fault.npf.err.gl)
   {
      vmcs_read(vm_exit_info.guest_linear);
      info->vm.cpu.fault.npf.vaddr = vm_exit_info.guest_linear.raw;
      debug(VMX_EPT,
	    "#NPF gv 0x%X gp 0x%X err 0x%X "
	    "(r:%d w:%d x:%d gr:%d gw:%d gx:%d gl:%d final:%d nmi:%d)\n"
	    ,info->vm.cpu.fault.npf.vaddr, info->vm.cpu.fault.npf.paddr
	    ,info->vm.cpu.fault.npf.err.raw
	    ,info->vm.cpu.fault.npf.err.r
	    ,info->vm.cpu.fault.npf.err.w
	    ,info->vm.cpu.fault.npf.err.x
	    ,info->vm.cpu.fault.npf.err.gr
	    ,info->vm.cpu.fault.npf.err.gw
	    ,info->vm.cpu.fault.npf.err.gx
	    ,info->vm.cpu.fault.npf.err.gl
	    ,info->vm.cpu.fault.npf.err.final
	    ,info->vm.cpu.fault.npf.err.nmi);
   }
Beispiel #4
0
void ept_hw_get_pdtprs(GUEST_CPU_HANDLE gcpu, UINT64 pdptr[])
{
    VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu);

    CHECK_EXECUTION_ON_LOCAL_HOST_CPU(gcpu);
    pdptr[0] = vmcs_read(vmcs, VMCS_GUEST_PDPTR0);
    pdptr[1] = vmcs_read(vmcs, VMCS_GUEST_PDPTR1);
    pdptr[2] = vmcs_read(vmcs, VMCS_GUEST_PDPTR2);
    pdptr[3] = vmcs_read(vmcs, VMCS_GUEST_PDPTR3);
}
Beispiel #5
0
static int __vmx_vmexit_sgdt(dt_reg_t *dt_reg)
{
   vmcs_read(vm_state.gdtr.base);
   vmcs_read(vm_state.gdtr.limit);

   dt_reg->base.raw = vm_state.gdtr.base.raw;
   dt_reg->limit = vm_state.gdtr.limit.wlow;

   debug(VMX_DT, "sgdt\n");
   return VM_DONE;
}
VMEXIT_HANDLING_STATUS vmexit_gdtr_idtr_access(GUEST_CPU_HANDLE gcpu)
{
    VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu);
    REPORT_DTR_ACCESS_DATA gdtr_idtr_access_data;

    gdtr_idtr_access_data.qualification = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION);
    gdtr_idtr_access_data.instruction_info = (UINT32) vmcs_read(vmcs, VMCS_EXIT_INFO_INSTRUCTION_INFO);

    if (!report_uvmm_event(UVMM_EVENT_GDTR_IDTR_ACCESS, (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), (void *)&gdtr_idtr_access_data)) {
        VMM_LOG(mask_anonymous, level_trace, "report_gdtr_idtr_access failed\n");
    }
    return VMEXIT_HANDLED;
}
Beispiel #7
0
void vmx_db_show_pending()
{
#ifdef CONFIG_VMX_DB_DBG
   vmcs_read(vm_state.activity);
   vmcs_read(vm_state.interrupt);
   vmcs_read(vm_state.ia32_dbgctl);
   vmcs_read(vm_state.dbg_excp);

   debug(VMX_DB,
         "pending #DB: be:%d bs:%d raw:0x%X sti:%d mss:%d activity:0x%x btf:%d\n"
         ,vm_state.dbg_excp.be, vm_state.dbg_excp.bs
         ,vm_state.dbg_excp.raw
         ,vm_state.interrupt.sti, vm_state.interrupt.mss
         ,vm_state.activity.raw
         ,vm_state.ia32_dbgctl.btf);
#endif
}
Beispiel #8
0
BOOLEAN ept_hw_is_ept_enabled(GUEST_CPU_HANDLE gcpu)
{
    PROCESSOR_BASED_VM_EXECUTION_CONTROLS2 proc_ctrls2;

    CHECK_EXECUTION_ON_LOCAL_HOST_CPU(gcpu);
    proc_ctrls2.Uint32 = (UINT32) vmcs_read(gcpu_get_vmcs(gcpu), VMCS_CONTROL2_VECTOR_PROCESSOR_EVENTS);
    return proc_ctrls2.Bits.EnableEPT;
}
Beispiel #9
0
int
vmcs_getdesc(int vcpuid, int seg, struct seg_desc *desc)
{
    int error;
    uint32_t base, limit, access;

    error = vmcs_seg_desc_encoding(seg, &base, &limit, &access);
    if (error != 0)
        xhyve_abort("vmcs_setdesc: invalid segment register %d\n", seg);

    desc->base = vmcs_read(vcpuid, base);
    desc->limit = (uint32_t) vmcs_read(vcpuid, limit);
    if (access != VMCS_INVALID_ENCODING) {
        desc->access = (uint32_t) vmcs_read(vcpuid, access);
    }

    return (0);
}
Beispiel #10
0
void vmx_db_check_pending()
{
   vmcs_read(vm_state.dbg_excp);

   vmx_db_check_pending_any();

   if((vm_state.dbg_excp.be || vm_state.dbg_excp.bs) && vmx_db_pending_discarded())
      debug(VMX_DB, "pending #DB: lost one\n");
}
Beispiel #11
0
int vmx_vmexit_resolve_dr_access()
{
   vmcs_exit_info_dr_t *access;
   uint8_t             gpr;

   vmcs_read(vm_exit_info.qualification);
   access = &vm_exit_info.qualification.dr;
   gpr = GPR64_RAX - (access->gpr & GPR64_RAX);

   if(__resolve_dr(!access->dir, access->nr, gpr) == DR_SUCCESS)
   {
      info->vm.cpu.emu_done = 1;
      vmcs_read(vm_exit_info.insn_len);
      vm_update_rip(vm_exit_info.insn_len.raw);
      return 1;
   }

   return 0;
}
Beispiel #12
0
int vmx_vmexit_resolve_io()
{
   int rc = dev_access();

   if(rc != VM_DONE)
      return rc;

   vmcs_read(vm_exit_info.insn_len);
   return emulate_done(VM_DONE, vm_exit_info.insn_len.raw);
}
Beispiel #13
0
static int __vmx_vmexit_sidt(dt_reg_t *dt_reg)
{
   vmcs_read(vm_state.idtr.base);

   dt_reg->base.raw = vm_state.idtr.base.raw;
   dt_reg->limit = info->vm.idt_limit;

   debug(VMX_DT, "sidt\n");
   return VM_DONE;
}
VMEXIT_HANDLING_STATUS vmexit_dr_access(GUEST_CPU_HANDLE gcpu)
{
    VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu);
    REPORT_CR_DR_LOAD_ACCESS_DATA dr_load_access_data;

    dr_load_access_data.qualification = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION);

    if (!report_uvmm_event(UVMM_EVENT_DR_LOAD_ACCESS, (VMM_IDENTIFICATION_DATA)gcpu, (const GUEST_VCPU*)guest_vcpu(gcpu), (void *)&dr_load_access_data)) {
        VMM_LOG(mask_anonymous, level_trace, "report_dr_load_access failed\n");
    }
    return VMEXIT_HANDLED;
}
Beispiel #15
0
/*
** Vol. 3C-32.2 (Virtualization of System Resources)
*/
void vmx_check_dbgctl()
{
   vmcs_read(vm_state.activity);
   vmcs_read(vm_state.interrupt);

#ifdef CONFIG_VMX_DB_DBG
   vmcs_read(vm_state.dbg_excp);

   if(vm_state.dbg_excp.be || vm_state.dbg_excp.bs)
      debug(VMX_DB, "pending #DB: be:%d bs:%d 0x%X\n"
            ,vm_state.dbg_excp.be, vm_state.dbg_excp.bs
            ,vm_state.dbg_excp.raw);
#endif

   if(!vm_state.interrupt.sti && !vm_state.interrupt.mss &&
      vm_state.activity.raw != VMX_VMCS_GUEST_ACTIVITY_STATE_HALT)
      return;

#ifdef CONFIG_VMX_DB_DBG
   debug(VMX_DB, "pending #DB: sti:%d mss:%d activity:0x%x\n"
         ,vm_state.interrupt.sti, vm_state.interrupt.mss
         ,vm_state.activity.raw);
#endif

   vmcs_read(vm_state.ia32_dbgctl);
   vmcs_read(vm_state.dbg_excp);

   if(vm_state.rflags.tf && !vm_state.ia32_dbgctl.btf)
   {
      debug(VMX_DB, "pending #DB (sti/mss/hlt): set sstep\n");
      vm_state.dbg_excp.bs = 1;
   }
   else if(vm_state.dbg_excp.bs)
   {
      debug(VMX_DB, "pending #DB (sti/mss/hlt): clr sstep\n");
      vm_state.dbg_excp.bs = 0;
   }

   vmcs_dirty(vm_state.dbg_excp);
}
Beispiel #16
0
UINT64 ept_hw_get_eptp(GUEST_CPU_HANDLE gcpu)
{
    VMCS_OBJECT* vmcs = gcpu_get_vmcs(gcpu);
    UINT64 eptp = 0;

    VMM_ASSERT(gcpu);
    CHECK_EXECUTION_ON_LOCAL_HOST_CPU(gcpu);
    if(! ept_hw_is_ept_supported()) {
        return eptp;
    }
    eptp = vmcs_read( vmcs, VMCS_EPTP_ADDRESS);
    return eptp;
}
Beispiel #17
0
/*
** Vol. 3C-32.2 (Virtualization of System Resources)
*/
void vmx_check_dbgctl()
{
   vmcs_read(vm_state.activity);
   vmcs_read(vm_state.interrupt);

   if(!vm_state.interrupt.sti && !vm_state.interrupt.mss &&
      vm_state.activity.raw != VMX_VMCS_GUEST_ACTIVITY_STATE_HALT)
      return;

   vmcs_read(vm_state.ia32_dbgctl);
   vmcs_read(vm_state.dbg_excp);

   if(vm_state.rflags.tf && !vm_state.ia32_dbgctl.btf)
   {
      debug(VMX_DB, "setting pending #DB for sstep\n");
      vm_state.dbg_excp.bs = 1;
   }

   if(!vm_state.rflags.tf || vm_state.ia32_dbgctl.btf)
      vm_state.dbg_excp.bs = 0;

   vmcs_dirty(vm_state.dbg_excp);
}
Beispiel #18
0
int vmx_vmexit_resolve_excp()
{
   vmcs_read(vm_exit_info.int_info);
   vmcs_read(vm_exit_info.int_err_code);
   vmcs_read(vm_exit_info.qualification);

   switch(vm_exit_info.int_info.vector)
   {
   case GP_EXCP:
      if(__rmode())
         return vmx_vmexit_resolve_rmode_gp();
      break;
   case DB_EXCP:
      vm_state.dr6.wlow = vm_exit_info.qualification.wlow;
      vmcs_set_read(vm_state.dr6);
      break;
   case MC_EXCP:
      vmm_excp_mce();
      break;
   }

   return resolve_exception();
}
Beispiel #19
0
int vmx_vmexit_idt_deliver()
{
   vmcs_read(vm_exit_info.idt_info);

   if(!vm_exit_info.idt_info.v)
   {
      vmcs_read(vm_exit_info.int_info);

      if(vm_exit_info.int_info.nmi && vm_exit_info.int_info.vector != DF_EXCP)
      {
	 vmcs_read(vm_state.interrupt);
	 vm_state.interrupt.nmi = 1;
	 vmcs_dirty(vm_state.interrupt);
      }

      return VM_DONE;
   }

   if(__rmode())
      return __vmx_vmexit_idt_deliver_rmode();

   return __vmx_vmexit_idt_deliver_pmode();
}
Beispiel #20
0
static void vmx_vmcs_collect_entry()
{
   vmcs_read(vm_entry_ctrls.msr_load_addr);
   vmcs_read(vm_entry_ctrls.entry);
   vmcs_read(vm_entry_ctrls.msr_load_count);
   vmcs_read(vm_entry_ctrls.int_info);
   vmcs_read(vm_entry_ctrls.err_code);
   vmcs_read(vm_entry_ctrls.insn_len);
}
Beispiel #21
0
static int __vmx_vmexit_resolve_msr_sysenter_cs(uint8_t wr)
{
   if(wr)
   {
      vm_state.ia32_sysenter_cs.raw = info->vm.cpu.gpr->rax.low;
      vmcs_dirty(vm_state.ia32_sysenter_cs);
   }
   else
   {
      vmcs_read(vm_state.ia32_sysenter_cs);
      info->vm.cpu.gpr->rax.low = vm_state.ia32_sysenter_cs.raw;
   }

   return VM_DONE;
}
Beispiel #22
0
static int __vmx_vmexit_resolve_msr_perf(uint8_t wr)
{
   if(wr)
   {
      vm_state.ia32_perf.low  = info->vm.cpu.gpr->rax.low;
      vm_state.ia32_perf.high = info->vm.cpu.gpr->rdx.low;
      vmcs_dirty(vm_state.ia32_perf);
   }
   else
   {
      vmcs_read(vm_state.ia32_perf);
      info->vm.cpu.gpr->rax.low = vm_state.ia32_perf.low;
      info->vm.cpu.gpr->rdx.low = vm_state.ia32_perf.high;
   }

   return VM_DONE;
}
Beispiel #23
0
static void vmx_vmcs_collect_exit()
{
   vmcs_read(vm_exit_ctrls.msr_store_addr);
   vmcs_read(vm_exit_ctrls.msr_load_addr);

   vmcs_read(vm_exit_ctrls.exit);

   if(vm_exit_ctrls.exit.save_ia32_pat)
      vmcs_read(vm_state.ia32_pat);

   if(vm_exit_ctrls.exit.save_ia32_efer)
      vmcs_read(vm_state.ia32_efer);

   if(vm_exit_ctrls.exit.save_dbgctl)
      vmcs_read(vm_state.ia32_dbgctl);

   vmcs_read(vm_exit_ctrls.msr_store_count);
   vmcs_read(vm_exit_ctrls.msr_load_count);
}
Beispiel #24
0
static int __vmx_vmexit_resolve_msr_dbgctl(uint8_t wr)
{
   if(wr)
   {
      vm_state.ia32_dbgctl.low  = info->vm.cpu.gpr->rax.low;
      vm_state.ia32_dbgctl.high = info->vm.cpu.gpr->rdx.low;
      /* check in vmx_check_dbgctl() */
      vmcs_dirty(vm_state.ia32_dbgctl);
   }
   else
   {
      vmcs_read(vm_state.ia32_dbgctl);
      info->vm.cpu.gpr->rax.low = vm_state.ia32_dbgctl.low;
      info->vm.cpu.gpr->rdx.low = vm_state.ia32_dbgctl.high;
   }

   return VM_DONE;
}
Beispiel #25
0
// Function : vmdb_settings_apply_to_hw
// Purpose  : Update GCPU DRs from its guest's VMDB context
// Arguments: GUEST_CPU_HANDLE gcpu
// Returns  : void
void vmdb_settings_apply_to_hw ( GUEST_CPU_HANDLE gcpu)
    {
    VMDB_THREAD_CONTEXT *vmdb = gcpu_get_vmdb(gcpu);

    if (NULL != vmdb) {
        UINT64      rflags;
        VMCS_OBJECT *vmcs = gcpu_get_vmcs(gcpu);

        gcpu_set_debug_reg(gcpu, IA32_REG_DR7, vmdb->dr7);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR0, vmdb->dr[0]);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR1, vmdb->dr[1]);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR2, vmdb->dr[2]);
        gcpu_set_debug_reg(gcpu, IA32_REG_DR3, vmdb->dr[3]);

        rflags = vmcs_read(vmcs, VMCS_GUEST_RFLAGS);
        if (vmdb->sstep)
            BIT_SET64(rflags, RFLAGS_TF_BIT);
        else
            BIT_CLR64(rflags, RFLAGS_TF_BIT);
        vmcs_write(vmcs, VMCS_GUEST_RFLAGS, rflags);
        }
    }
Beispiel #26
0
static int __vmx_vmexit_resolve_msr_efer(uint8_t wr)
{
   if(wr)
   {
      ia32_efer_msr_t update;

      update.low = info->vm.efer.low ^ info->vm.cpu.gpr->rax.low;

      info->vm.efer.low  = info->vm.cpu.gpr->rax.low;
      info->vm.efer.high = info->vm.cpu.gpr->rdx.low;

      vm_state.ia32_efer.low  = info->vm.efer.low;
      vm_state.ia32_efer.high = info->vm.efer.high;

      vmcs_read(vm_entry_ctrls.entry);
      vm_state.ia32_efer.lme = vm_state.ia32_efer.lma = vm_entry_ctrls.entry.ia32e;
      vmcs_dirty(vm_state.ia32_efer);

      if(info->vm.efer.lma && !info->vm.efer.lme)
	 info->vm.efer.lma = 0;

      if(update.lme && __cr0.pg)
      {
	 debug(VMX_MSR, "modifying LME while paging-on #GP\n");
	 __inject_exception(GP_EXCP, 0, 0);
	 return VM_FAULT;
      }
   }
   else
   {
      info->vm.cpu.gpr->rax.low = info->vm.efer.low;
      info->vm.cpu.gpr->rdx.low = info->vm.efer.high;
   }

   return VM_DONE;
}
Beispiel #27
0
VMEXIT_HANDLING_STATUS vmdb_dr_access_vmexit_handler(GUEST_CPU_HANDLE gcpu)
    {
    VMCS_OBJECT                 *vmcs = gcpu_get_vmcs(gcpu);
    IA32_VMX_EXIT_QUALIFICATION qualification;
    int                         dbreg_id;
    VMM_IA32_GP_REGISTERS       gpreg_id;

    qualification.Uint64 = vmcs_read(vmcs, VMCS_EXIT_INFO_QUALIFICATION);
    gpreg_id = lkup_operand[qualification.DrAccess.MoveGpr];
    dbreg_id = (int) qualification.DrAccess.Number;
    if (6 == dbreg_id) dbreg_id = IA32_REG_DR6;
    if (7 == dbreg_id) dbreg_id = IA32_REG_DR7;

    if  (0 == qualification.DrAccess.Direction) {
        // do nothing
        }
    else {
        UINT64 reg_value = gcpu_get_debug_reg(gcpu, (VMM_IA32_DEBUG_REGISTERS)dbreg_id);
        gcpu_set_native_gp_reg(gcpu, gpreg_id, reg_value);
        }

    gcpu_skip_guest_instruction(gcpu);
    return VMEXIT_HANDLED;
    }
Beispiel #28
0
int
vmcs_getreg(int vcpuid, int ident, uint64_t *retval)
{
    uint32_t encoding;

    /*
     * If we need to get at vmx-specific state in the VMCS we can bypass
     * the translation of 'ident' to 'encoding' by simply setting the
     * sign bit. As it so happens the upper 16 bits are reserved (i.e
     * set to 0) in the encodings for the VMCS so we are free to use the
     * sign bit.
     */
    if (ident < 0)
        encoding = ident & 0x7fffffff;
    else
        encoding = vmcs_field_encoding(ident);

    if (encoding == (uint32_t)-1)
        return (EINVAL);

    *retval = vmcs_read(vcpuid, encoding);

    return (0);
}
Beispiel #29
0
BOOLEAN vmcs_read_nmi_window_bit(struct _VMCS_OBJECT *vmcs)
{
    UINT64 value = vmcs_read(vmcs, VMCS_CONTROL_VECTOR_PROCESSOR_EVENTS);
    return (0 != BIT_GET64(value, NMI_WINDOW_BIT));
}
Beispiel #30
0
int __vmx_io_init(io_insn_t *io)
{
   vmcs_exit_info_io_t *vmx_io;

   vmcs_read(vm_exit_info.qualification);
   vmx_io = &vm_exit_info.qualification.io;

   io->in   = vmx_io->d;
   io->s    = vmx_io->s;
   io->sz   = vmx_io->sz+1;
   io->rep  = vmx_io->rep;
   io->port = vmx_io->port;

   /*
   ** XXX: to be removed, vmx does not provide bad info
   ** (maybe vmware ?)
   */
   if(io->sz != 1 && io->sz != 2 && io->sz != 4)
   {
      debug(VMX_IO, "invalid io size (%d)\n", io->sz);
      return VM_FAIL;
   }

   if(!io->s)
   {
      io->cnt = 1;
      return VM_DONE;
   }

   vmcs_read(vm_exit_info.guest_linear);

#ifdef CONFIG_VMX_FEAT_EXIT_EXT_IO
   vmcs_exit_info_insn_io_t *vmx_io_s;

   vmcs_read(vm_exit_info.insn_info);
   vmx_io_s = &vm_exit_info.insn_info.io;

   io->seg  = vmx_io_s->seg;
   io->addr = 1<<vmx_io_s->addr;
#else
   ud_t *insn = &info->vm.cpu.disasm;
   int   rc   = disassemble(insn);

   if(rc != VM_DONE)
      return rc;

   if(insn->dis_mode == 64)
   {
      if(insn->pfx_adr)
         io->addr = 2;
      else
         io->addr = 4;
   }
   else if(insn->dis_mode == 32)
   {
      if(insn->pfx_adr)
         io->addr = 1;
      else
         io->addr = 2;
   }
   else
   {
      if(insn->pfx_adr)
         io->addr = 2;
      else
         io->addr = 1;
   }

   if(insn->pfx_seg)
      io->seg = insn->pfx_seg - UD_R_ES;
   else if(io->in)
      io->seg = IO_S_PFX_ES;
   else
      io->seg = IO_S_PFX_DS;
#endif

   if(io->seg > 5)
   {
      debug(VMX_IO, "invalid io seg pfx %d\n", io->seg);
      return VM_FAIL;
   }

   io->back = vm_state.rflags.df;
   io->msk  = (-1ULL)>>(64 - 16*io->addr);
   io->cnt  = io->rep ? (info->vm.cpu.gpr->rcx.raw & io->msk) : 1;

   return VM_DONE;
}