Exemplo n.º 1
0
BX_CPU_C::v2h_write_qword(bx_address laddr, unsigned pl)
{
  Bit32u pageOffset = laddr & 0xfff;
  if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
    Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == BX_TLB_LPF_VALUE(lpf))
    {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      Bit32u accessBits = tlbEntry->accessBits;
      if (accessBits & (0x04 << pl)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        return hostAddr;
      }
    }
  }

  return 0;
}
Exemplo n.º 2
0
BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit32u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
    bx_address lpf = AlignedAccessLPFOf(laddr, 3);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
        Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        ReadHostDWordFromLittleEndian(hostAddr, data);
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_dword(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
    if (BX_CPU_THIS_PTR alignment_check()) {
      if (laddr & 3) {
        BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access"));
        exception(BX_AC_EXCEPTION, 0, 0);
      }
    }
#endif
    access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset < (seg->cache.u.segment.limit_scaled-2)))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
Exemplo n.º 3
0
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        data = *hostAddr;
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
    // Accelerated attempt falls through to long path.  Do it the
    // old fashioned way...
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Exemplo n.º 4
0
// assuming the write happens in legacy mode
void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit32u data)
{
  Bit32u laddr;

  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = (Bit32u)(seg->cache.u.segment.base + offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
    bx_address lpf = AlignedAccessLPFOf(laddr, 3);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data);
        Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        WriteHostDWordToLittleEndian(hostAddr, data);
        return;
      }
    }
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
    if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) {
      if (laddr & 3) {
        BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access"));
        exception(BX_AC_EXCEPTION, 0, 0);
      }
    }
#endif
    access_write_linear(laddr, 4, curr_pl, (void *) &data);
    return;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (offset < (seg->cache.u.segment.limit_scaled-2))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
Exemplo n.º 5
0
BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit64u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      ReadHostQWordFromLittleEndian(hostAddr, data);
      BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
      return data;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check()) {
    if (laddr & 7) {
      BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
  return data;
}
Exemplo n.º 6
0
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);

  if (seg->cache.valid & SegAccessROK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us read access
      // from this CPL.
      if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
        data = *hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessROK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  read_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Exemplo n.º 7
0
BX_CPU_C::v2h_read_byte(bx_address laddr, unsigned curr_pl)
{
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  bx_address lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us read access
    // from this CPL.
    if (tlbEntry->accessBits & (1<<curr_pl)) { // Read this pl OK.
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
      return hostAddr;
    }
  }

  return 0;
}
Exemplo n.º 8
0
BX_CPU_C::v2h_read_byte(bx_address laddr, unsigned pl)
{
  Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
  bx_address lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == BX_TLB_LPF_VALUE(lpf)) {
    // See if the TLB entry privilege level allows us read access
    // from this CPL.
    Bit32u accessBits = tlbEntry->accessBits;
    if (accessBits & (1<<pl)) { // Read this pl OK.
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = laddr & 0xfff;
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
      return hostAddr;
    }
  }

  return 0;
}
Exemplo n.º 9
0
void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u data)
{
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      WriteHostQWordToLittleEndian(hostAddr, data);
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_new_stack_qword_64(): canonical failure"));
    exception(BX_SS_EXCEPTION, 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) {
    if (laddr & 7) {
      BX_ERROR(("write_new_stack_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_write_linear(laddr, 8, curr_pl, (void *) &data);
}
Exemplo n.º 10
0
BX_CPU_C::v2h_write_byte(bx_address laddr, unsigned curr_pl)
{
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  bx_address lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf)
  {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << curr_pl)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      return hostAddr;
    }
  }

  return 0;
}
Exemplo n.º 11
0
BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  Bit64u lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      *hostAddr = data;
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_virtual_byte_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

  access_write_linear(laddr, 1, CPL, (void *) &data);
}