Esempio n. 1
0
  void
bx_cpu_c::RETfar32(BxInstruction_t *i)
{
  Bit32u eip, ecs_raw;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  invalidate_prefetch_q();

#if BX_CPU_LEVEL >= 2
  if ( protected_mode() ) {
    bx_cpu. return_protected(i, 0);
    goto done;
    }
#endif


    pop_32(&eip);
    pop_32(&ecs_raw); /* 32bit pop, MSW discarded */
    bx_cpu. eip = eip;
    load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], (Bit16u) ecs_raw);

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_RET,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Esempio n. 2
0
File: vm8086.cpp Progetto: iver6/BA
void BX_CPU_C::iret32_stack_return_from_v86(bxInstruction_c *i)
{
  if (BX_CPU_THIS_PTR get_IOPL() < 3) {
    // trap to virtual 8086 monitor
    BX_DEBUG(("IRET in vm86 with IOPL != 3, VME = 0"));
    exception(BX_GP_EXCEPTION, 0);
  }

  Bit32u eip, cs_raw, flags32;
  // Build a mask of the following bits:
  // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
  Bit32u change_mask = EFlagsOSZAPCMask | EFlagsTFMask | EFlagsIFMask
                         | EFlagsDFMask | EFlagsNTMask | EFlagsRFMask;

#if BX_CPU_LEVEL >= 4
  change_mask |= (EFlagsIDMask | EFlagsACMask);  // ID/AC
#endif

  eip     = pop_32();
  cs_raw  = pop_32();
  flags32 = pop_32();

  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) cs_raw);
  EIP = eip;
  // VIF, VIP, VM, IOPL unchanged
  writeEFlags(flags32, change_mask);
}
Esempio n. 3
0
  void
bx_cpu_c::RETnear32_Iw(BxInstruction_t *i)
{
  Bit16u imm16;
  Bit32u temp_ESP;
  Bit32u return_EIP;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
    temp_ESP = ESP;
  else
    temp_ESP = SP;

  imm16 = i->Iw;

  invalidate_prefetch_q();


    if (protected_mode()) {
      if ( !can_pop(4) ) {
        BX_PANIC(("retnear_iw: can't pop EIP"));
        /* ??? #SS(0) -or #GP(0) */
        }

      access_linear(bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0,
        4, CPL==3, BX_READ, &return_EIP);

      if (protected_mode() &&
          (return_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) ) {
        BX_DEBUG(("retnear_iw: EIP > limit"));
        exception(BX_GP_EXCEPTION, 0, 0);
        }

      /* Pentium book says imm16 is number of words ??? */
      if ( !can_pop(4 + imm16) ) {
        BX_PANIC(("retnear_iw: can't release bytes from stack"));
        /* #GP(0) -or #SS(0) ??? */
        }

      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += 4 + imm16; /* ??? should it be 2*imm16 ? */
      else
        SP  += 4 + imm16;
      }
    else {
      pop_32(&return_EIP);
      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += imm16; /* ??? should it be 2*imm16 ? */
      else
        SP  += imm16;
      }

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, bx_cpu. eip);
}
Esempio n. 4
0
void BX_CPP_AttrRegparmN(1) BX_CPU_C::POP32_DS(bxInstruction_c *i)
{
  BX_CPU_THIS_PTR speculative_rsp = 1;
  BX_CPU_THIS_PTR prev_rsp = RSP;

  Bit32u ds = pop_32();
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS], (Bit16u) ds);

  BX_CPU_THIS_PTR speculative_rsp = 0;
}
Esempio n. 5
0
  void
bx_cpu_c::IRET32(BxInstruction_t *i)
{
  Bit32u eip, ecs_raw, eflags;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_iret;
  bx_cpu. show_eip = bx_cpu. eip;
#endif

  invalidate_prefetch_q();

  if (v8086_mode()) {
    // IOPL check in stack_return_from_v86()
    stack_return_from_v86(i);
    goto done;
    }

#if BX_CPU_LEVEL >= 2
  if (bx_cpu. cr0.pe) {
    iret_protected(i);
    goto done;
    }
#endif

  BX_ERROR(("IRET32 called when you're not in vm8086 mode or protected mode."));
  BX_ERROR(("IRET32 may not be implemented right, since it doesn't check anything."));
  BX_PANIC(("Please report that you have found a test case for bx_cpu_c::IRET32."));

    pop_32(&eip);
    pop_32(&ecs_raw);
    pop_32(&eflags);

    load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], (Bit16u) ecs_raw);
    bx_cpu. eip = eip;
    //FIXME: this should do (eflags & 0x257FD5) | (EFLAGS | 0x1A0000)
    write_eflags(eflags, /* change IOPL? */ 1, /* change IF? */ 1, 0, 1);

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_IRET,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Esempio n. 6
0
  void
bx_cpu_c::RETfar32_Iw(BxInstruction_t *i)
{
  Bit32u eip, ecs_raw;
  Bit16s imm16;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif
  /* ??? is imm16, number of bytes/words depending on operandsize ? */

  imm16 = i->Iw;

  invalidate_prefetch_q();

#if BX_CPU_LEVEL >= 2
  if (protected_mode()) {
    bx_cpu. return_protected(i, imm16);
    goto done;
    }
#endif


    pop_32(&eip);
    pop_32(&ecs_raw);
    bx_cpu. eip = eip;
    load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], (Bit16u) ecs_raw);
    if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
      ESP += imm16;
    else
      SP  += imm16;

done:
  BX_INSTR_FAR_BRANCH(BX_INSTR_IS_RET,
                      bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip);
  return;
}
Esempio n. 7
0
void BX_CPP_AttrRegparmN(1) BX_CPU_C::POP_EdM(bxInstruction_c *i)
{
  BX_CPU_THIS_PTR speculative_rsp = 1;
  BX_CPU_THIS_PTR prev_rsp = RSP;

  Bit32u val32 = pop_32();

  // Note: there is one little weirdism here.  It is possible to use
  // ESP in the modrm addressing. If used, the value of ESP after the
  // pop is used to calculate the address.
  BX_CPU_CALL_METHODR (i->ResolveModrm, (i));

  write_virtual_dword(i->seg(), RMAddr(i), val32);

  BX_CPU_THIS_PTR speculative_rsp = 0;
}
Esempio n. 8
0
void BX_CPP_AttrRegparmN(1) BX_CPU_C::POP32_SS(bxInstruction_c *i)
{
  BX_CPU_THIS_PTR speculative_rsp = 1;
  BX_CPU_THIS_PTR prev_rsp = RSP;

  Bit32u ss = pop_32();
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], (Bit16u) ss);

  BX_CPU_THIS_PTR speculative_rsp = 0;

  // POP SS inhibits interrupts, debug exceptions and single-step
  // trap exceptions until the execution boundary following the
  // next instruction is reached.
  // Same code as MOV_SwEw()
  BX_CPU_THIS_PTR inhibit_mask |=
    BX_INHIBIT_INTERRUPTS | BX_INHIBIT_DEBUG;
  BX_CPU_THIS_PTR async_event = 1;
}
Esempio n. 9
0
  void
bx_cpu_c::RETnear32(BxInstruction_t *i)
{
  Bit32u temp_ESP;
  Bit32u return_EIP;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  invalidate_prefetch_q();

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
    temp_ESP = ESP;
  else
    temp_ESP = SP;


    if (protected_mode()) {
      if ( !can_pop(4) ) {
        BX_PANIC(("retnear: can't pop EIP"));
        /* ??? #SS(0) -or #GP(0) */
        }

      access_linear(bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0,
        4, CPL==3, BX_READ, &return_EIP);

      if ( return_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) {
        BX_PANIC(("retnear: EIP > limit"));
        //exception(BX_GP_EXCEPTION, 0, 0);
        }
      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += 4;
      else
        SP  += 4;
      }
    else {
      pop_32(&return_EIP);
      bx_cpu. eip = return_EIP;
      }

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, bx_cpu. eip);
}
Esempio n. 10
0
void BX_CPP_AttrRegparmN(1) BX_CPU_C::LEAVE(bxInstruction_c *i)
{
  BX_CPU_THIS_PTR speculative_rsp = 1;
  BX_CPU_THIS_PTR prev_rsp = RSP;

  // delete frame
  if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
    ESP = EBP;
  else
     SP = BP;

  // restore frame pointer
  if (i->os32L())
    EBP = pop_32();
  else
    BP = pop_16();

  BX_CPU_THIS_PTR speculative_rsp = 0;
}
Esempio n. 11
0
void BX_CPP_AttrRegparmN(1) BX_CPU_C::POP_ERX(bxInstruction_c *i)
{
  BX_WRITE_32BIT_REGZ(i->opcodeReg(), pop_32());
}