Example #1
0
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessWOK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset <= seg->cache.u.segment.limit_scaled)) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_RW);
      pl = (CPL==3);
#if BX_SupportGuest2HostTLB
      Bit8u *hostAddr = v2h_write_byte(laddr, pl);
      if (hostAddr) {
        // Current write access has privilege.
        *data = *hostAddr;
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        return;
      }
#endif
      // Accelerated attempt falls through to long path.  Do it the
      // old fashioned way...
      access_linear(laddr, 1, pl, BX_RW, (void *) data);
      return;
    }
  }
  write_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Example #2
0
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessROK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset <= seg->cache.u.segment.limit_scaled)) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_READ);
      pl = (CPL==3);
#if BX_SupportGuest2HostTLB
      Bit8u *hostAddr = v2h_read_byte(laddr, pl);
      if (hostAddr) {
        *data = *hostAddr;
        return;
      }
#endif
      access_linear(laddr, 1, pl, BX_READ, (void *) data);
      return;
    }
  }
  read_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Example #3
0
  void
bx_cpu_c::RETnear32_Iw(BxInstruction_t *i)
{
  Bit16u imm16;
  Bit32u temp_ESP;
  Bit32u return_EIP;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
    temp_ESP = ESP;
  else
    temp_ESP = SP;

  imm16 = i->Iw;

  invalidate_prefetch_q();


    if (protected_mode()) {
      if ( !can_pop(4) ) {
        BX_PANIC(("retnear_iw: can't pop EIP"));
        /* ??? #SS(0) -or #GP(0) */
        }

      access_linear(bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0,
        4, CPL==3, BX_READ, &return_EIP);

      if (protected_mode() &&
          (return_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) ) {
        BX_DEBUG(("retnear_iw: EIP > limit"));
        exception(BX_GP_EXCEPTION, 0, 0);
        }

      /* Pentium book says imm16 is number of words ??? */
      if ( !can_pop(4 + imm16) ) {
        BX_PANIC(("retnear_iw: can't release bytes from stack"));
        /* #GP(0) -or #SS(0) ??? */
        }

      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += 4 + imm16; /* ??? should it be 2*imm16 ? */
      else
        SP  += 4 + imm16;
      }
    else {
      pop_32(&return_EIP);
      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += imm16; /* ??? should it be 2*imm16 ? */
      else
        SP  += imm16;
      }

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, bx_cpu. eip);
}
Example #4
0
  void
bx_cpu_c::RETnear32(BxInstruction_t *i)
{
  Bit32u temp_ESP;
  Bit32u return_EIP;

#if BX_DEBUGGER
  bx_cpu. show_flag |= Flag_ret;
#endif

  invalidate_prefetch_q();

  if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
    temp_ESP = ESP;
  else
    temp_ESP = SP;


    if (protected_mode()) {
      if ( !can_pop(4) ) {
        BX_PANIC(("retnear: can't pop EIP"));
        /* ??? #SS(0) -or #GP(0) */
        }

      access_linear(bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0,
        4, CPL==3, BX_READ, &return_EIP);

      if ( return_EIP > bx_cpu. sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) {
        BX_PANIC(("retnear: EIP > limit"));
        //exception(BX_GP_EXCEPTION, 0, 0);
        }
      bx_cpu. eip = return_EIP;
      if (bx_cpu. sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
        ESP += 4;
      else
        SP  += 4;
      }
    else {
      pop_32(&return_EIP);
      bx_cpu. eip = return_EIP;
      }

  BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, bx_cpu. eip);
}
Example #5
0
BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessWOK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset <= (seg->cache.u.segment.limit_scaled-7))) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW);
      pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
      if (pl && BX_CPU_THIS_PTR alignment_check) {
        if (laddr & 7) {
          BX_ERROR(("read_RMW_virtual_qword(): misaligned access"));
          exception(BX_AC_EXCEPTION, 0, 0);
        }
      }
#endif
#if BX_SupportGuest2HostTLB
      Bit64u *hostAddr = v2h_write_qword(laddr, pl);
      if (hostAddr) {
        // Current write access has privilege.
        ReadHostQWordFromLittleEndian(hostAddr, *data);
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        return;
      }
#endif
      access_linear(laddr, 8, pl, BX_RW, (void *) data);
      return;
    }
  }
  write_virtual_checks(seg, offset, 8);
  goto accessOK;
}
Example #6
0
BX_CPU_C::write_virtual_dword(unsigned s, bx_address offset, Bit32u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessWOK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset < (seg->cache.u.segment.limit_scaled-2))) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE);
      pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
      if (pl && BX_CPU_THIS_PTR alignment_check) {
        if (laddr & 3) {
          BX_ERROR(("write_virtual_dword(): misaligned access"));
          exception(BX_AC_EXCEPTION, 0, 0);
        }
      }
#endif
#if BX_SupportGuest2HostTLB
      Bit32u *hostAddr = v2h_write_dword(laddr, pl);
      if (hostAddr) {
        // Current write access has privilege.
        WriteHostDWordToLittleEndian(hostAddr, *data);
        return;
      }
#endif
      access_linear(laddr, 4, pl, BX_WRITE, (void *) data);
      return;
    }
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
Example #7
0
BX_CPU_C::read_virtual_word(unsigned s, bx_address offset, Bit16u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessROK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset < seg->cache.u.segment.limit_scaled)) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_READ);
      pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
      if (pl && BX_CPU_THIS_PTR alignment_check) {
        if (laddr & 1) {
          BX_ERROR(("read_virtual_word(): misaligned access"));
          exception(BX_AC_EXCEPTION, 0, 0);
        }
      }
#endif
#if BX_SupportGuest2HostTLB
      Bit16u *hostAddr = v2h_read_word(laddr, pl);
      if (hostAddr) {
        ReadHostWordFromLittleEndian(hostAddr, *data);
        return;
      }
#endif
      access_linear(laddr, 2, pl, BX_READ, (void *) data);
      return;
    }
  }
  read_virtual_checks(seg, offset, 2);
  goto accessOK;
}