Example #1
0
File: avx.cpp Project: iver6/BA
/* Opcode: VEX.66.0F.38 2D (VEX.W=0) */
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMASKMOVPD_MpdHpdVpd(bxInstruction_c *i)
{
  BxPackedAvxRegister mask = BX_READ_AVX_REG(i->vvv()), op = BX_READ_AVX_REG(i->nnn());
  unsigned len = i->getVL();

  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));

#if BX_SUPPORT_X86_64
  if (i->as64L()) {
    for (unsigned n=0; n < (2*len); n++) {
       if (mask.avx32u(n*2+1) & 0x80000000) {
          if (! IsCanonical(get_laddr64(i->seg(), eaddr + 8*n)))
             exception(int_number(i->seg()), 0);
       }
    }
  }
#endif

  // see you can successfully write all the elements first
  for (int n=2*len-1; n >= 0; n--) {
    if (mask.avx32u(2*n+1) & 0x80000000)
       read_RMW_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask());
  }

  for (unsigned n=0; n < (2*len); n++) {
    if (mask.avx32u(2*n+1) & 0x80000000)
       write_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask(), op.avx64u(n));
  }

  BX_NEXT_INSTR(i);
}
Example #2
0
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessWOK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset <= seg->cache.u.segment.limit_scaled)) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_RW);
      pl = (CPL==3);
#if BX_SupportGuest2HostTLB
      Bit8u *hostAddr = v2h_write_byte(laddr, pl);
      if (hostAddr) {
        // Current write access has privilege.
        *data = *hostAddr;
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        return;
      }
#endif
      // Accelerated attempt falls through to long path.  Do it the
      // old fashioned way...
      access_linear(laddr, 1, pl, BX_RW, (void *) data);
      return;
    }
  }
  write_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Example #3
0
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessROK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset <= seg->cache.u.segment.limit_scaled)) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_READ);
      pl = (CPL==3);
#if BX_SupportGuest2HostTLB
      Bit8u *hostAddr = v2h_read_byte(laddr, pl);
      if (hostAddr) {
        *data = *hostAddr;
        return;
      }
#endif
      access_linear(laddr, 1, pl, BX_READ, (void *) data);
      return;
    }
  }
  read_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Example #4
0
File: avx.cpp Project: iver6/BA
/* Opcode: VEX.66.0F.38 2D (VEX.W=0) */
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMASKMOVPD_VpdHpdMpd(bxInstruction_c *i)
{
  BxPackedAvxRegister mask = BX_READ_AVX_REG(i->vvv()), result;
  unsigned len = i->getVL();

  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));

#if BX_SUPPORT_X86_64
  if (i->as64L()) {
    for (unsigned n=0; n < (2*len); n++) {
       if (mask.avx32u(n*2+1) & 0x80000000) {
          if (! IsCanonical(get_laddr64(i->seg(), eaddr + 8*n)))
             exception(int_number(i->seg()), 0);
       }
    }
  }
#endif

  for (int n=2*len-1; n >= 0; n--) {
    if (mask.avx32u(n*2+1) & 0x80000000)
       result.avx64u(n) = read_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask());
    else
       result.avx64u(n) = 0;
  }

  BX_WRITE_AVX_REGZ(i->nnn(), result, len);

  BX_NEXT_INSTR(i);
}
Example #5
0
void BX_CPU_C::branch_far64(bx_selector_t *selector, 
           bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl)
{
#if BX_SUPPORT_X86_64
  if (descriptor->u.segment.l)
  {
    if (! IsCanonical(rip)) {
      BX_ERROR(("branch_far: canonical RIP violation"));
      exception(BX_GP_EXCEPTION, 0, 0);
    }
  }
  else
#endif
  {
    /* instruction pointer must be in code segment limit else #GP(0) */
    if (rip > descriptor->u.segment.limit_scaled) {
      BX_ERROR(("branch_far: RIP > limit"));
      exception(BX_GP_EXCEPTION, 0, 0);
    }
  }

  /* Load CS:IP from destination pointer */
  /* Load CS-cache with new segment descriptor */
  load_cs(selector, descriptor, cpl);

  /* Change the RIP value */
  RIP = rip;
}
Example #6
0
BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit32u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
    bx_address lpf = AlignedAccessLPFOf(laddr, 3);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
        Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        ReadHostDWordFromLittleEndian(hostAddr, data);
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_dword(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
    if (BX_CPU_THIS_PTR alignment_check()) {
      if (laddr & 3) {
        BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access"));
        exception(BX_AC_EXCEPTION, 0, 0);
      }
    }
#endif
    access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset < (seg->cache.u.segment.limit_scaled-2)))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
Example #7
0
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        data = *hostAddr;
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
    // Accelerated attempt falls through to long path.  Do it the
    // old fashioned way...
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Example #8
0
BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit64u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      ReadHostQWordFromLittleEndian(hostAddr, data);
      BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
      return data;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check()) {
    if (laddr & 7) {
      BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
  return data;
}
Example #9
0
BX_CPU_C::branch_near64(bxInstruction_c *i)
{
  Bit64u new_RIP = RIP + (Bit32s) i->Id();

  if (! i->os32L()) {
    new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
  }
  else {
    if (! IsCanonical(new_RIP)) {
      BX_ERROR(("branch_near64: canonical RIP violation"));
      exception(BX_GP_EXCEPTION, 0, 0);
    }
  }

  RIP = new_RIP;
}
Example #10
0
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);

  if (seg->cache.valid & SegAccessROK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us read access
      // from this CPL.
      if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
        data = *hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessROK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  read_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Example #11
0
void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u data)
{
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      WriteHostQWordToLittleEndian(hostAddr, data);
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_new_stack_qword_64(): canonical failure"));
    exception(BX_SS_EXCEPTION, 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) {
    if (laddr & 7) {
      BX_ERROR(("write_new_stack_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_write_linear(laddr, 8, curr_pl, (void *) &data);
}
Example #12
0
BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  Bit64u lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      *hostAddr = data;
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_virtual_byte_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

  access_write_linear(laddr, 1, CPL, (void *) &data);
}
Example #13
0
BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessWOK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset <= (seg->cache.u.segment.limit_scaled-7))) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW);
      pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
      if (pl && BX_CPU_THIS_PTR alignment_check) {
        if (laddr & 7) {
          BX_ERROR(("read_RMW_virtual_qword(): misaligned access"));
          exception(BX_AC_EXCEPTION, 0, 0);
        }
      }
#endif
#if BX_SupportGuest2HostTLB
      Bit64u *hostAddr = v2h_write_qword(laddr, pl);
      if (hostAddr) {
        // Current write access has privilege.
        ReadHostQWordFromLittleEndian(hostAddr, *data);
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        return;
      }
#endif
      access_linear(laddr, 8, pl, BX_RW, (void *) data);
      return;
    }
  }
  write_virtual_checks(seg, offset, 8);
  goto accessOK;
}
Example #14
0
BX_CPU_C::write_virtual_dword(unsigned s, bx_address offset, Bit32u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessWOK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset < (seg->cache.u.segment.limit_scaled-2))) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE);
      pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
      if (pl && BX_CPU_THIS_PTR alignment_check) {
        if (laddr & 3) {
          BX_ERROR(("write_virtual_dword(): misaligned access"));
          exception(BX_AC_EXCEPTION, 0, 0);
        }
      }
#endif
#if BX_SupportGuest2HostTLB
      Bit32u *hostAddr = v2h_write_dword(laddr, pl);
      if (hostAddr) {
        // Current write access has privilege.
        WriteHostDWordToLittleEndian(hostAddr, *data);
        return;
      }
#endif
      access_linear(laddr, 4, pl, BX_WRITE, (void *) data);
      return;
    }
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
Example #15
0
BX_CPU_C::read_virtual_word(unsigned s, bx_address offset, Bit16u *data)
{
  bx_address laddr;
  bx_segment_reg_t *seg;

  seg = &BX_CPU_THIS_PTR sregs[s];
  if (seg->cache.valid & SegAccessROK) {
    if ((Is64BitMode() && IsCanonical(offset))
     || (offset < seg->cache.u.segment.limit_scaled)) {
      unsigned pl;
accessOK:
      laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
      BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_READ);
      pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
      if (pl && BX_CPU_THIS_PTR alignment_check) {
        if (laddr & 1) {
          BX_ERROR(("read_virtual_word(): misaligned access"));
          exception(BX_AC_EXCEPTION, 0, 0);
        }
      }
#endif
#if BX_SupportGuest2HostTLB
      Bit16u *hostAddr = v2h_read_word(laddr, pl);
      if (hostAddr) {
        ReadHostWordFromLittleEndian(hostAddr, *data);
        return;
      }
#endif
      access_linear(laddr, 2, pl, BX_READ, (void *) data);
      return;
    }
  }
  read_virtual_checks(seg, offset, 2);
  goto accessOK;
}
Example #16
0
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
                               unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // do canonical checks
    if (!IsCanonical(offset)) {
      BX_ERROR(("write_virtual_checks(): canonical Failure 0x%08x:%08x", GET32H(offset), GET32L(offset)));
      exception(int_number(seg), 0, 0);
    }
    seg->cache.valid |= SegAccessWOK;
    return;
  }
#endif
  if (protected_mode()) {
    if (seg->cache.valid==0) {
      BX_DEBUG(("write_virtual_checks(): segment descriptor not valid"));
      exception(int_number(seg), 0, 0);
    }

    if (seg->cache.p == 0) { /* not present */
      BX_ERROR(("write_virtual_checks(): segment not present"));
      exception(int_number(seg), 0, 0);
    }

    switch (seg->cache.type) {
      case 0: case 1:   // read only
      case 4: case 5:   // read only, expand down
      case 8: case 9:   // execute only
      case 10: case 11: // execute/read
      case 12: case 13: // execute only, conforming
      case 14: case 15: // execute/read-only, conforming
        BX_ERROR(("write_virtual_checks(): no write access to seg"));
        exception(int_number(seg), 0, 0);

      case 2: case 3: /* read/write */
        if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
            || (length-1 > seg->cache.u.segment.limit_scaled))
        {
          BX_ERROR(("write_virtual_checks(): write beyond limit, r/w"));
          exception(int_number(seg), 0, 0);
        }
        if (seg->cache.u.segment.limit_scaled >= 7) {
          // Mark cache as being OK type for succeeding writes.  The limit
          // checks still needs to be done though, but is more simple.  We
          // could probably also optimize that out with a flag for the case
          // when limit is the maximum 32bit value.  Limit should accomodate
          // at least a dword, since we subtract from it in the simple
          // limit check in other functions, and we don't want the value to roll.
          // Only normal segments (not expand down) are handled this way.
          seg->cache.valid |= SegAccessWOK;
        }
        break;

      case 6: case 7: /* read/write, expand down */
        if (seg->cache.u.segment.d_b)
          upper_limit = 0xffffffff;
        else
          upper_limit = 0x0000ffff;
        if ((offset <= seg->cache.u.segment.limit_scaled) ||
             (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
        {
          BX_ERROR(("write_virtual_checks(): write beyond limit, r/w ED"));
          exception(int_number(seg), 0, 0);
        }
        break;
    }

    return;
  }
  else { /* real mode */
    if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
    {
      BX_DEBUG(("write_virtual_checks(): write beyond limit (real mode)"));
      exception(int_number(seg), 0, 0);
    }
    if (seg->cache.u.segment.limit_scaled >= 7) {
      // Mark cache as being OK type for succeeding writes. See notes above.
      seg->cache.valid |= SegAccessWOK;
    }
  }
}
Example #17
0
BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
                              unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // do canonical checks
    if (!IsCanonical(offset)) {
      BX_ERROR(("read_virtual_checks(): canonical Failure 0x%08x:%08x", GET32H(offset), GET32L(offset)));
      exception(int_number(seg), 0, 0);
    }
    seg->cache.valid |= SegAccessROK;
    return;
  }
#endif
  if (protected_mode()) {
    if (seg->cache.valid==0) {
      BX_DEBUG(("read_virtual_checks(): segment descriptor not valid"));
      exception(int_number(seg), 0, 0);
    }

    if (seg->cache.p == 0) { /* not present */
      BX_ERROR(("read_virtual_checks(): segment not present"));
      exception(int_number(seg), 0, 0);
    }

    switch (seg->cache.type) {
      case 0: case 1: /* read only */
      case 2: case 3: /* read/write */
      case 10: case 11: /* execute/read */
      case 14: case 15: /* execute/read-only, conforming */
        if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
            || (length-1 > seg->cache.u.segment.limit_scaled))
        {
          BX_ERROR(("read_virtual_checks(): read beyond limit"));
          exception(int_number(seg), 0, 0);
        }
        if (seg->cache.u.segment.limit_scaled >= 7) {
          // Mark cache as being OK type for succeeding reads. See notes for
          // write checks; similar code.
          seg->cache.valid |= SegAccessROK;
        }
        break;

      case 4: case 5: /* read only, expand down */
      case 6: case 7: /* read/write, expand down */
        if (seg->cache.u.segment.d_b)
          upper_limit = 0xffffffff;
        else
          upper_limit = 0x0000ffff;
        if ((offset <= seg->cache.u.segment.limit_scaled) ||
             (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
        {
          BX_ERROR(("read_virtual_checks(): read beyond limit"));
          exception(int_number(seg), 0, 0);
        }
        break;

      case 8: case 9: /* execute only */
      case 12: case 13: /* execute only, conforming */
        /* can't read or write an execute-only segment */
        BX_ERROR(("read_virtual_checks(): execute only"));
        exception(int_number(seg), 0, 0);
    }
    return;
  }
  else { /* real mode */
    if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
        || (length-1 > seg->cache.u.segment.limit_scaled))
    {
      BX_DEBUG(("read_virtual_checks(): read beyond limit (real mode)"));
      exception(int_number(seg), 0, 0);
    }
    if (seg->cache.u.segment.limit_scaled >= 7) {
      // Mark cache as being OK type for succeeding reads. See notes for
      // write checks; similar code.
      seg->cache.valid |= SegAccessROK;
    }
  }
}
Example #18
0
void BX_CPU_C::long_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code, Bit16u error_code)
{
  // long mode interrupt
  Bit64u tmp1, tmp2;

  bx_descriptor_t gate_descriptor, cs_descriptor;
  bx_selector_t cs_selector;

  // interrupt vector must be within IDT table limits,
  // else #GP(vector number*16 + 2 + EXT)
  if ((vector*16 + 15) > BX_CPU_THIS_PTR idtr.limit) {
    BX_ERROR(("interrupt(long mode): vector must be within IDT table limits, IDT.limit = 0x%x", BX_CPU_THIS_PTR idtr.limit));
    exception(BX_GP_EXCEPTION, vector*16 + 2, 0);
  }

  access_read_linear(BX_CPU_THIS_PTR idtr.base + vector*16,     8, 0, BX_READ, &tmp1);
  access_read_linear(BX_CPU_THIS_PTR idtr.base + vector*16 + 8, 8, 0, BX_READ, &tmp2);

  if (tmp2 & BX_CONST64(0x00001F0000000000)) {
    BX_ERROR(("interrupt(long mode): IDT entry extended attributes DWORD4 TYPE != 0"));
    exception(BX_GP_EXCEPTION, vector*16 + 2, 0);
  }

  Bit32u dword1 = GET32L(tmp1);
  Bit32u dword2 = GET32H(tmp1);
  Bit32u dword3 = GET32L(tmp2);

  parse_descriptor(dword1, dword2, &gate_descriptor);

  if ((gate_descriptor.valid==0) || gate_descriptor.segment)
  {
    BX_ERROR(("interrupt(long mode): gate descriptor is not valid sys seg"));
    exception(BX_GP_EXCEPTION, vector*16 + 2, 0);
  }

  // descriptor AR byte must indicate interrupt gate, trap gate,
  // or task gate, else #GP(vector*16 + 2 + EXT)
  if (gate_descriptor.type != BX_386_INTERRUPT_GATE &&
      gate_descriptor.type != BX_386_TRAP_GATE)
  {
    BX_ERROR(("interrupt(long mode): unsupported gate type %u",
        (unsigned) gate_descriptor.type));
    exception(BX_GP_EXCEPTION, vector*16 + 2, 0);
  }

  // if software interrupt, then gate descripor DPL must be >= CPL,
  // else #GP(vector * 16 + 2 + EXT)
  if (is_INT && (gate_descriptor.dpl < CPL))
  {
    BX_ERROR(("interrupt(long mode): is_INT && (dpl < CPL)"));
    exception(BX_GP_EXCEPTION, vector*16 + 2, 0);
  }

  // Gate must be present, else #NP(vector * 16 + 2 + EXT)
  if (! IS_PRESENT(gate_descriptor)) {
    BX_ERROR(("interrupt(long mode): p == 0"));
    exception(BX_NP_EXCEPTION, vector*16 + 2, 0);
  }

  Bit16u gate_dest_selector = gate_descriptor.u.gate.dest_selector;
  Bit64u gate_dest_offset   = ((Bit64u)dword3 << 32) |
                       gate_descriptor.u.gate.dest_offset;

  unsigned ist = gate_descriptor.u.gate.param_count & 0x7;

  // examine CS selector and descriptor given in gate descriptor
  // selector must be non-null else #GP(EXT)
  if ((gate_dest_selector & 0xfffc) == 0) {
    BX_ERROR(("int_trap_gate(long mode): selector null"));
    exception(BX_GP_EXCEPTION, 0, 0);
  }

  parse_selector(gate_dest_selector, &cs_selector);

  // selector must be within its descriptor table limits
  // else #GP(selector+EXT)
  fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
  parse_descriptor(dword1, dword2, &cs_descriptor);

  // descriptor AR byte must indicate code seg
  // and code segment descriptor DPL<=CPL, else #GP(selector+EXT)
  if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
      IS_DATA_SEGMENT(cs_descriptor.type) ||
      cs_descriptor.dpl>CPL)
  {
    BX_ERROR(("interrupt(long mode): not accessable or not code segment"));
    exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc, 0);
  }

  // check that it's a 64 bit segment
  if (! IS_LONG64_SEGMENT(cs_descriptor) || cs_descriptor.u.segment.d_b)
  {
    BX_ERROR(("interrupt(long mode): must be 64 bit segment"));
    exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc, 0);
  }

  // segment must be present, else #NP(selector + EXT)
  if (! IS_PRESENT(cs_descriptor)) {
    BX_ERROR(("interrupt(long mode): segment not present"));
    exception(BX_NP_EXCEPTION, cs_selector.value & 0xfffc, 0);
  }

  // if code segment is non-conforming and DPL < CPL then
  // INTERRUPT TO INNER PRIVILEGE:
  if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && cs_descriptor.dpl<CPL)
  {
    Bit64u RSP_for_cpl_x;

    BX_DEBUG(("interrupt(long mode): INTERRUPT TO INNER PRIVILEGE"));

    // check selector and descriptor for new stack in current TSS
    if (ist != 0) {
      BX_DEBUG(("interrupt(long mode): trap to IST, vector = %d", ist));
      get_RSP_from_TSS(ist+3, &RSP_for_cpl_x);
    }
    else {
      get_RSP_from_TSS(cs_descriptor.dpl, &RSP_for_cpl_x);
    }

    RSP_for_cpl_x &= BX_CONST64(0xfffffffffffffff0);

    Bit64u old_CS  = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
    Bit64u old_RIP = RIP;
    Bit64u old_SS  = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
    Bit64u old_RSP = RSP;

    if (! IsCanonical(RSP_for_cpl_x)) {
      // #SS(selector) when changing priviledge level
      BX_ERROR(("interrupt(long mode): canonical address failure %08x%08x",
         GET32H(RSP_for_cpl_x), GET32L(RSP_for_cpl_x)));
      exception(BX_SS_EXCEPTION, old_SS & 0xfffc, 0);
    }

    // push old stack long pointer onto new stack
    write_new_stack_qword_64(RSP_for_cpl_x -  8, cs_descriptor.dpl, old_SS);
    write_new_stack_qword_64(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
    write_new_stack_qword_64(RSP_for_cpl_x - 24, cs_descriptor.dpl, read_eflags());
    // push long pointer to return address onto new stack
    write_new_stack_qword_64(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_CS);
    write_new_stack_qword_64(RSP_for_cpl_x - 40, cs_descriptor.dpl, old_RIP);
    RSP_for_cpl_x -= 40;

    if (is_error_code) {
      RSP_for_cpl_x -= 8;
      write_new_stack_qword_64(RSP_for_cpl_x, cs_descriptor.dpl, error_code);
    }

    bx_selector_t ss_selector;
    bx_descriptor_t ss_descriptor;

    // set up a null descriptor
    parse_selector(0, &ss_selector);
    parse_descriptor(0, 0, &ss_descriptor);

    // load CS:RIP (guaranteed to be in 64 bit mode)
    branch_far64(&cs_selector, &cs_descriptor, gate_dest_offset, cs_descriptor.dpl);

    // set up null SS descriptor
    load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);
    RSP = RSP_for_cpl_x;

    // if INTERRUPT GATE set IF to 0
    if (!(gate_descriptor.type & 1)) // even is int-gate
      BX_CPU_THIS_PTR clear_IF();
    BX_CPU_THIS_PTR clear_TF();
    BX_CPU_THIS_PTR clear_VM();
    BX_CPU_THIS_PTR clear_RF();
    BX_CPU_THIS_PTR clear_NT();
    return;
  }

  // if code segment is conforming OR code segment DPL = CPL then
  // INTERRUPT TO SAME PRIVILEGE LEVEL:
  if (IS_CODE_SEGMENT_CONFORMING(cs_descriptor.type) || cs_descriptor.dpl==CPL)
  {
    BX_DEBUG(("interrupt(long mode): INTERRUPT TO SAME PRIVILEGE"));

    Bit64u old_RSP = RSP;

    // check selector and descriptor for new stack in current TSS
    if (ist > 0) {
      BX_DEBUG(("interrupt(long mode): trap to IST, vector = %d", ist));
      get_RSP_from_TSS(ist+3, &RSP);
    }

    // align stack
    RSP &= BX_CONST64(0xfffffffffffffff0);

    // push flags onto stack
    // push current CS selector onto stack
    // push return offset onto stack
    write_new_stack_qword_64(RSP - 8,  cs_descriptor.dpl,
         BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value);
    write_new_stack_qword_64(RSP - 16, cs_descriptor.dpl, old_RSP);
    write_new_stack_qword_64(RSP - 24, cs_descriptor.dpl, read_eflags());
    write_new_stack_qword_64(RSP - 32,  cs_descriptor.dpl,
         BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
    write_new_stack_qword_64(RSP - 40, cs_descriptor.dpl, RIP);
    RSP -= 40;

    if (is_error_code) {
      RSP -= 8;
      write_new_stack_qword_64(RSP, cs_descriptor.dpl, error_code);
    }

    // set the RPL field of CS to CPL
    branch_far64(&cs_selector, &cs_descriptor, gate_dest_offset, CPL);

    // if interrupt gate then set IF to 0
    if (!(gate_descriptor.type & 1)) // even is int-gate
      BX_CPU_THIS_PTR clear_IF();
    BX_CPU_THIS_PTR clear_TF();
    BX_CPU_THIS_PTR clear_VM();
    BX_CPU_THIS_PTR clear_RF();
    BX_CPU_THIS_PTR clear_NT();
    return;
  }

  // else #GP(CS selector + ext)
  BX_ERROR(("interrupt(long mode): bad descriptor type=%u, descriptor.dpl=%u, CPL=%u",
    (unsigned) cs_descriptor.type, (unsigned) cs_descriptor.dpl, (unsigned) CPL));
  BX_ERROR(("cs.segment = %u", (unsigned) cs_descriptor.segment));
  exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc, 0);
}
Example #19
0
BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
{
  bx_selector_t cs_selector;
  Bit32u dword1, dword2, dword3;
  bx_descriptor_t cs_descriptor;
  bx_descriptor_t gate_descriptor;

  // examine code segment selector in call gate descriptor
  BX_DEBUG(("call_gate64: CALL 64bit call gate"));

  fetch_raw_descriptor_64(gate_selector, &dword1, &dword2, &dword3, BX_GP_EXCEPTION);
  parse_descriptor(dword1, dword2, &gate_descriptor);

  Bit16u dest_selector = gate_descriptor.u.gate.dest_selector;
  // selector must not be null else #GP(0)
  if ((dest_selector & 0xfffc) == 0) {
    BX_ERROR(("call_gate64: selector in gate null"));
    exception(BX_GP_EXCEPTION, 0, 0);
  }

  parse_selector(dest_selector, &cs_selector);
  // selector must be within its descriptor table limits,
  //   else #GP(code segment selector)
  fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
  parse_descriptor(dword1, dword2, &cs_descriptor);

  // find the RIP in the gate_descriptor
  Bit64u new_RIP = gate_descriptor.u.gate.dest_offset;
  new_RIP |= ((Bit64u)dword3 << 32);

  // AR byte of selected descriptor must indicate code segment,
  //   else #GP(code segment selector)
  // DPL of selected descriptor must be <= CPL,
  // else #GP(code segment selector)
  if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
      IS_DATA_SEGMENT(cs_descriptor.type) ||
      cs_descriptor.dpl > CPL)
  {
    BX_ERROR(("call_gate64: selected descriptor is not code"));
    exception(BX_GP_EXCEPTION, dest_selector & 0xfffc, 0);
  }

  // In long mode, only 64-bit call gates are allowed, and they must point
  // to 64-bit code segments, else #GP(selector)
  if (! IS_LONG64_SEGMENT(cs_descriptor) || cs_descriptor.u.segment.d_b)
  {
    BX_ERROR(("call_gate64: not 64-bit code segment in call gate 64"));
    exception(BX_GP_EXCEPTION, dest_selector & 0xfffc, 0);
  }

  // code segment must be present else #NP(selector)
  if (! IS_PRESENT(cs_descriptor)) {
    BX_ERROR(("call_gate64: code segment not present !"));
    exception(BX_NP_EXCEPTION, dest_selector & 0xfffc, 0);
  }

  Bit64u old_CS  = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
  Bit64u old_RIP = RIP;

  // CALL GATE TO MORE PRIVILEGE
  // if non-conforming code segment and DPL < CPL then
  if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && (cs_descriptor.dpl < CPL))
  {
    Bit64u RSP_for_cpl_x;

    BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));

    // get new RSP for new privilege level from TSS
    get_RSP_from_TSS(cs_descriptor.dpl, &RSP_for_cpl_x);

    Bit64u old_SS  = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
    Bit64u old_RSP = RSP;

    if (! IsCanonical(RSP_for_cpl_x)) {
      // #SS(selector) when changing priviledge level
      BX_ERROR(("call_gate64: canonical address failure %08x%08x",
         GET32H(RSP_for_cpl_x), GET32L(RSP_for_cpl_x)));
      exception(BX_SS_EXCEPTION, old_SS & 0xfffc, 0);
    }

    // push old stack long pointer onto new stack
    write_new_stack_qword_64(RSP_for_cpl_x -  8, cs_descriptor.dpl, old_SS);
    write_new_stack_qword_64(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
    // push long pointer to return address onto new stack
    write_new_stack_qword_64(RSP_for_cpl_x - 24, cs_descriptor.dpl, old_CS);
    write_new_stack_qword_64(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_RIP);
    RSP_for_cpl_x -= 32;

    // prepare new stack null SS selector
    bx_selector_t ss_selector;
    bx_descriptor_t ss_descriptor;

    // set up a null descriptor
    parse_selector(0, &ss_selector);
    parse_descriptor(0, 0, &ss_descriptor);

    // load CS:RIP (guaranteed to be in 64 bit mode)
    branch_far64(&cs_selector, &cs_descriptor, new_RIP, cs_descriptor.dpl);

    // set up null SS descriptor
    load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);
    RSP = RSP_for_cpl_x;
  }
  else
  {
    BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));

    // push to 64-bit stack, switch to long64 guaranteed
    write_new_stack_qword_64(RSP -  8, CPL, old_CS);
    write_new_stack_qword_64(RSP - 16, CPL, old_RIP);
    RSP -= 16;

    // load CS:RIP (guaranteed to be in 64 bit mode)
    branch_far64(&cs_selector, &cs_descriptor, new_RIP, CPL);
  }
}