Beispiel #1
0
Datei: avx.cpp Projekt: iver6/BA
/* Opcode: VEX.66.0F.38 2D (VEX.W=0) */
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMASKMOVPD_MpdHpdVpd(bxInstruction_c *i)
{
  BxPackedAvxRegister mask = BX_READ_AVX_REG(i->vvv()), op = BX_READ_AVX_REG(i->nnn());
  unsigned len = i->getVL();

  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));

#if BX_SUPPORT_X86_64
  if (i->as64L()) {
    for (unsigned n=0; n < (2*len); n++) {
       if (mask.avx32u(n*2+1) & 0x80000000) {
          if (! IsCanonical(get_laddr64(i->seg(), eaddr + 8*n)))
             exception(int_number(i->seg()), 0);
       }
    }
  }
#endif

  // see you can successfully write all the elements first
  for (int n=2*len-1; n >= 0; n--) {
    if (mask.avx32u(2*n+1) & 0x80000000)
       read_RMW_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask());
  }

  for (unsigned n=0; n < (2*len); n++) {
    if (mask.avx32u(2*n+1) & 0x80000000)
       write_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask(), op.avx64u(n));
  }

  BX_NEXT_INSTR(i);
}
Beispiel #2
0
Datei: avx.cpp Projekt: iver6/BA
/* Opcode: VEX.66.0F.38 2D (VEX.W=0) */
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMASKMOVPD_VpdHpdMpd(bxInstruction_c *i)
{
  BxPackedAvxRegister mask = BX_READ_AVX_REG(i->vvv()), result;
  unsigned len = i->getVL();

  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));

#if BX_SUPPORT_X86_64
  if (i->as64L()) {
    for (unsigned n=0; n < (2*len); n++) {
       if (mask.avx32u(n*2+1) & 0x80000000) {
          if (! IsCanonical(get_laddr64(i->seg(), eaddr + 8*n)))
             exception(int_number(i->seg()), 0);
       }
    }
  }
#endif

  for (int n=2*len-1; n >= 0; n--) {
    if (mask.avx32u(n*2+1) & 0x80000000)
       result.avx64u(n) = read_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask());
    else
       result.avx64u(n) = 0;
  }

  BX_WRITE_AVX_REGZ(i->nnn(), result, len);

  BX_NEXT_INSTR(i);
}
Beispiel #3
0
BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit32u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
    bx_address lpf = AlignedAccessLPFOf(laddr, 3);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
        Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        ReadHostDWordFromLittleEndian(hostAddr, data);
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_dword(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
    if (BX_CPU_THIS_PTR alignment_check()) {
      if (laddr & 3) {
        BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access"));
        exception(BX_AC_EXCEPTION, 0, 0);
      }
    }
#endif
    access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset < (seg->cache.u.segment.limit_scaled-2)))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 4);
  goto accessOK;
}
Beispiel #4
0
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);

  if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us write access
      // from this CPL.
      if (tlbEntry->accessBits & (0x10 << CPL)) {
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
        pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
        data = *hostAddr;
        BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
    // Accelerated attempt falls through to long path.  Do it the
    // old fashioned way...
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_RMW_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessWOK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  write_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Beispiel #5
0
BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit64u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      ReadHostQWordFromLittleEndian(hostAddr, data);
      BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
      return data;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check()) {
    if (laddr & 7) {
      BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
  return data;
}
Beispiel #6
0
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset)
{
  bx_address laddr;
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit8u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);

  if (seg->cache.valid & SegAccessROK4G) {
accessOK:
    laddr = BX_CPU_THIS_PTR get_laddr(s, offset);
#if BX_SupportGuest2HostTLB
    unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
    bx_address lpf = LPFOf(laddr);
    bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
    if (tlbEntry->lpf == lpf) {
      // See if the TLB entry privilege level allows us read access
      // from this CPL.
      if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
        bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
        Bit32u pageOffset = PAGE_OFFSET(laddr);
        BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
        Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
        data = *hostAddr;
        BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
        return data;
      }
    }
#endif
#if BX_SUPPORT_X86_64
    if (! IsCanonical(laddr)) {
      BX_ERROR(("read_virtual_byte(): canonical failure"));
      exception(int_number(seg), 0, 0);
    }
#endif
    access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
    return data;
  }

  if (seg->cache.valid & SegAccessROK) {
    if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled))
      goto accessOK;
  }
  read_virtual_checks(seg, offset, 1);
  goto accessOK;
}
Beispiel #7
0
BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  Bit64u lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      *hostAddr = data;
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_virtual_byte_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

  access_write_linear(laddr, 1, CPL, (void *) &data);
}
Beispiel #8
0
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
                               unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // do canonical checks
    if (!IsCanonical(offset)) {
      BX_ERROR(("write_virtual_checks(): canonical Failure 0x%08x:%08x", GET32H(offset), GET32L(offset)));
      exception(int_number(seg), 0, 0);
    }
    seg->cache.valid |= SegAccessWOK;
    return;
  }
#endif
  if (protected_mode()) {
    if (seg->cache.valid==0) {
      BX_DEBUG(("write_virtual_checks(): segment descriptor not valid"));
      exception(int_number(seg), 0, 0);
    }

    if (seg->cache.p == 0) { /* not present */
      BX_ERROR(("write_virtual_checks(): segment not present"));
      exception(int_number(seg), 0, 0);
    }

    switch (seg->cache.type) {
      case 0: case 1:   // read only
      case 4: case 5:   // read only, expand down
      case 8: case 9:   // execute only
      case 10: case 11: // execute/read
      case 12: case 13: // execute only, conforming
      case 14: case 15: // execute/read-only, conforming
        BX_ERROR(("write_virtual_checks(): no write access to seg"));
        exception(int_number(seg), 0, 0);

      case 2: case 3: /* read/write */
        if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
            || (length-1 > seg->cache.u.segment.limit_scaled))
        {
          BX_ERROR(("write_virtual_checks(): write beyond limit, r/w"));
          exception(int_number(seg), 0, 0);
        }
        if (seg->cache.u.segment.limit_scaled >= 7) {
          // Mark cache as being OK type for succeeding writes.  The limit
          // checks still needs to be done though, but is more simple.  We
          // could probably also optimize that out with a flag for the case
          // when limit is the maximum 32bit value.  Limit should accomodate
          // at least a dword, since we subtract from it in the simple
          // limit check in other functions, and we don't want the value to roll.
          // Only normal segments (not expand down) are handled this way.
          seg->cache.valid |= SegAccessWOK;
        }
        break;

      case 6: case 7: /* read/write, expand down */
        if (seg->cache.u.segment.d_b)
          upper_limit = 0xffffffff;
        else
          upper_limit = 0x0000ffff;
        if ((offset <= seg->cache.u.segment.limit_scaled) ||
             (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
        {
          BX_ERROR(("write_virtual_checks(): write beyond limit, r/w ED"));
          exception(int_number(seg), 0, 0);
        }
        break;
    }

    return;
  }
  else { /* real mode */
    if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
    {
      BX_DEBUG(("write_virtual_checks(): write beyond limit (real mode)"));
      exception(int_number(seg), 0, 0);
    }
    if (seg->cache.u.segment.limit_scaled >= 7) {
      // Mark cache as being OK type for succeeding writes. See notes above.
      seg->cache.valid |= SegAccessWOK;
    }
  }
}
Beispiel #9
0
BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
                              unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // do canonical checks
    if (!IsCanonical(offset)) {
      BX_ERROR(("read_virtual_checks(): canonical Failure 0x%08x:%08x", GET32H(offset), GET32L(offset)));
      exception(int_number(seg), 0, 0);
    }
    seg->cache.valid |= SegAccessROK;
    return;
  }
#endif
  if (protected_mode()) {
    if (seg->cache.valid==0) {
      BX_DEBUG(("read_virtual_checks(): segment descriptor not valid"));
      exception(int_number(seg), 0, 0);
    }

    if (seg->cache.p == 0) { /* not present */
      BX_ERROR(("read_virtual_checks(): segment not present"));
      exception(int_number(seg), 0, 0);
    }

    switch (seg->cache.type) {
      case 0: case 1: /* read only */
      case 2: case 3: /* read/write */
      case 10: case 11: /* execute/read */
      case 14: case 15: /* execute/read-only, conforming */
        if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
            || (length-1 > seg->cache.u.segment.limit_scaled))
        {
          BX_ERROR(("read_virtual_checks(): read beyond limit"));
          exception(int_number(seg), 0, 0);
        }
        if (seg->cache.u.segment.limit_scaled >= 7) {
          // Mark cache as being OK type for succeeding reads. See notes for
          // write checks; similar code.
          seg->cache.valid |= SegAccessROK;
        }
        break;

      case 4: case 5: /* read only, expand down */
      case 6: case 7: /* read/write, expand down */
        if (seg->cache.u.segment.d_b)
          upper_limit = 0xffffffff;
        else
          upper_limit = 0x0000ffff;
        if ((offset <= seg->cache.u.segment.limit_scaled) ||
             (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
        {
          BX_ERROR(("read_virtual_checks(): read beyond limit"));
          exception(int_number(seg), 0, 0);
        }
        break;

      case 8: case 9: /* execute only */
      case 12: case 13: /* execute only, conforming */
        /* can't read or write an execute-only segment */
        BX_ERROR(("read_virtual_checks(): execute only"));
        exception(int_number(seg), 0, 0);
    }
    return;
  }
  else { /* real mode */
    if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
        || (length-1 > seg->cache.u.segment.limit_scaled))
    {
      BX_DEBUG(("read_virtual_checks(): read beyond limit (real mode)"));
      exception(int_number(seg), 0, 0);
    }
    if (seg->cache.u.segment.limit_scaled >= 7) {
      // Mark cache as being OK type for succeeding reads. See notes for
      // write checks; similar code.
      seg->cache.valid |= SegAccessROK;
    }
  }
}
Beispiel #10
0
BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // Mark cache as being OK type for succeeding reads/writes
    seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
    return;
  }
#endif

  if (seg->cache.valid==0) {
    BX_DEBUG(("execute_virtual_checks(): segment descriptor not valid"));
    exception(int_number(seg), 0, 0);
  }

  if (seg->cache.p == 0) { /* not present */
    BX_ERROR(("execute_virtual_checks(): segment not present"));
    exception(int_number(seg), 0, 0);
  }

  switch (seg->cache.type) {
    case 0: case 1: /* read only */
    case 2: case 3: /* read/write */
    case 10: case 11: /* execute/read */
    case 14: case 15: /* execute/read-only, conforming */
      if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
      {
        BX_ERROR(("execute_virtual_checks(): read beyond limit"));
        exception(int_number(seg), 0, 0);
      }
      if (seg->cache.u.segment.limit_scaled >= 7) {
        // Mark cache as being OK type for succeeding reads. See notes for
        // write checks; similar code.
        seg->cache.valid |= SegAccessROK;
        if (seg->cache.u.segment.limit_scaled == 0xffffffff)
          seg->cache.valid |= SegAccessROK4G;
      }
      break;

    case 8: case 9: /* execute only */
    case 12: case 13: /* execute only, conforming */
      if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
      {
        BX_ERROR(("execute_virtual_checks(): read beyond limit execute only"));
        exception(int_number(seg), 0, 0);
      }
      break;
 
    case 4: case 5: /* read only, expand down */
    case 6: case 7: /* read/write, expand down */
      if (seg->cache.u.segment.d_b)
        upper_limit = 0xffffffff;
      else
        upper_limit = 0x0000ffff;
      if ((offset <= seg->cache.u.segment.limit_scaled) ||
           (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
      {
        BX_ERROR(("execute_virtual_checks(): read beyond limit ED"));
        exception(int_number(seg), 0, 0);
      }
      break;

    default:
      BX_PANIC(("execute_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
  }
}