Beispiel #1
0
Datei: avx.cpp Projekt: iver6/BA
/* Opcode: VEX.66.0F.38 2D (VEX.W=0) */
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMASKMOVPD_MpdHpdVpd(bxInstruction_c *i)
{
  BxPackedAvxRegister mask = BX_READ_AVX_REG(i->vvv()), op = BX_READ_AVX_REG(i->nnn());
  unsigned len = i->getVL();

  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));

#if BX_SUPPORT_X86_64
  if (i->as64L()) {
    for (unsigned n=0; n < (2*len); n++) {
       if (mask.avx32u(n*2+1) & 0x80000000) {
          if (! IsCanonical(get_laddr64(i->seg(), eaddr + 8*n)))
             exception(int_number(i->seg()), 0);
       }
    }
  }
#endif

  // see you can successfully write all the elements first
  for (int n=2*len-1; n >= 0; n--) {
    if (mask.avx32u(2*n+1) & 0x80000000)
       read_RMW_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask());
  }

  for (unsigned n=0; n < (2*len); n++) {
    if (mask.avx32u(2*n+1) & 0x80000000)
       write_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask(), op.avx64u(n));
  }

  BX_NEXT_INSTR(i);
}
Beispiel #2
0
Datei: avx.cpp Projekt: iver6/BA
/* Opcode: VEX.66.0F.38 2D (VEX.W=0) */
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMASKMOVPD_VpdHpdMpd(bxInstruction_c *i)
{
  BxPackedAvxRegister mask = BX_READ_AVX_REG(i->vvv()), result;
  unsigned len = i->getVL();

  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));

#if BX_SUPPORT_X86_64
  if (i->as64L()) {
    for (unsigned n=0; n < (2*len); n++) {
       if (mask.avx32u(n*2+1) & 0x80000000) {
          if (! IsCanonical(get_laddr64(i->seg(), eaddr + 8*n)))
             exception(int_number(i->seg()), 0);
       }
    }
  }
#endif

  for (int n=2*len-1; n >= 0; n--) {
    if (mask.avx32u(n*2+1) & 0x80000000)
       result.avx64u(n) = read_virtual_qword(i->seg(), (eaddr + 8*n) & i->asize_mask());
    else
       result.avx64u(n) = 0;
  }

  BX_WRITE_AVX_REGZ(i->nnn(), result, len);

  BX_NEXT_INSTR(i);
}
Beispiel #3
0
BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  Bit64u data;
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
  Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      ReadHostQWordFromLittleEndian(hostAddr, data);
      BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
          tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
      return data;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
  if (BX_CPU_THIS_PTR alignment_check()) {
    if (laddr & 7) {
      BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access"));
      exception(BX_AC_EXCEPTION, 0, 0);
    }
  }
#endif

  access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
  return data;
}
Beispiel #4
0
BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
{
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);

  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
  BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);

  Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
  Bit64u lpf = LPFOf(laddr);
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
  if (tlbEntry->lpf == lpf) {
    // See if the TLB entry privilege level allows us write access
    // from this CPL.
    if (tlbEntry->accessBits & (0x10 << CPL)) {
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
      Bit32u pageOffset = PAGE_OFFSET(laddr);
      BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
      BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
            tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
      *hostAddr = data;
      return;
    }
  }
#endif

  if (! IsCanonical(laddr)) {
    BX_ERROR(("write_virtual_byte_64(): canonical failure"));
    exception(int_number(seg), 0, 0);
  }

  access_write_linear(laddr, 1, CPL, (void *) &data);
}