BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset, Bit8u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessROK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset <= seg->cache.u.segment.limit_scaled)) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_READ); pl = (CPL==3); #if BX_SupportGuest2HostTLB Bit8u *hostAddr = v2h_read_byte(laddr, pl); if (hostAddr) { *data = *hostAddr; return; } #endif access_linear(laddr, 1, pl, BX_READ, (void *) data); return; } } read_virtual_checks(seg, offset, 1); goto accessOK; }
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset, Bit8u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessWOK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset <= seg->cache.u.segment.limit_scaled)) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_RW); pl = (CPL==3); #if BX_SupportGuest2HostTLB Bit8u *hostAddr = v2h_write_byte(laddr, pl); if (hostAddr) { // Current write access has privilege. *data = *hostAddr; BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; return; } #endif // Accelerated attempt falls through to long path. Do it the // old fashioned way... access_linear(laddr, 1, pl, BX_RW, (void *) data); return; } } write_virtual_checks(seg, offset, 1); goto accessOK; }
BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset) { bx_address laddr; bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s]; Bit32u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW); if (seg->cache.valid & SegAccessWOK4G) { accessOK: laddr = BX_CPU_THIS_PTR get_laddr(s, offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); bx_address lpf = AlignedAccessLPFOf(laddr, 3); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us write access // from this CPL. if (tlbEntry->accessBits & (0x10 << CPL)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW); Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif ReadHostDWordFromLittleEndian(hostAddr, data); BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data); return data; } } #endif #if BX_SUPPORT_X86_64 if (! IsCanonical(laddr)) { BX_ERROR(("read_RMW_virtual_dword(): canonical failure")); exception(int_number(seg), 0, 0); } #endif #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (BX_CPU_THIS_PTR alignment_check()) { if (laddr & 3) { BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data); return data; } if (seg->cache.valid & SegAccessWOK) { if (Is64BitMode() || (offset < (seg->cache.u.segment.limit_scaled-2))) goto accessOK; } write_virtual_checks(seg, offset, 4); goto accessOK; }
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset) { bx_address laddr; bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s]; Bit8u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW); if (seg->cache.valid & SegAccessWOK4G) { accessOK: laddr = BX_CPU_THIS_PTR get_laddr(s, offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us write access // from this CPL. if (tlbEntry->accessBits & (0x10 << CPL)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW); Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif data = *hostAddr; BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); return data; } } #endif // Accelerated attempt falls through to long path. Do it the // old fashioned way... #if BX_SUPPORT_X86_64 if (! IsCanonical(laddr)) { BX_ERROR(("read_RMW_virtual_byte(): canonical failure")); exception(int_number(seg), 0, 0); } #endif access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data); return data; } if (seg->cache.valid & SegAccessWOK) { if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled)) goto accessOK; } write_virtual_checks(seg, offset, 1); goto accessOK; }
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset) { bx_address laddr; bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s]; Bit8u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ); if (seg->cache.valid & SegAccessROK4G) { accessOK: laddr = BX_CPU_THIS_PTR get_laddr(s, offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us read access // from this CPL. if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK. bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ); Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); data = *hostAddr; BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); return data; } } #endif #if BX_SUPPORT_X86_64 if (! IsCanonical(laddr)) { BX_ERROR(("read_virtual_byte(): canonical failure")); exception(int_number(seg), 0, 0); } #endif access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data); return data; } if (seg->cache.valid & SegAccessROK) { if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled)) goto accessOK; } read_virtual_checks(seg, offset, 1); goto accessOK; }
BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset, Bit64u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessWOK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset <= (seg->cache.u.segment.limit_scaled-7))) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW); pl = (CPL==3); #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (pl && BX_CPU_THIS_PTR alignment_check) { if (laddr & 7) { BX_ERROR(("read_RMW_virtual_qword(): misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif #if BX_SupportGuest2HostTLB Bit64u *hostAddr = v2h_write_qword(laddr, pl); if (hostAddr) { // Current write access has privilege. ReadHostQWordFromLittleEndian(hostAddr, *data); BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; return; } #endif access_linear(laddr, 8, pl, BX_RW, (void *) data); return; } } write_virtual_checks(seg, offset, 8); goto accessOK; }
BX_CPU_C::write_virtual_dword(unsigned s, bx_address offset, Bit32u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessWOK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset < (seg->cache.u.segment.limit_scaled-2))) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE); pl = (CPL==3); #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (pl && BX_CPU_THIS_PTR alignment_check) { if (laddr & 3) { BX_ERROR(("write_virtual_dword(): misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif #if BX_SupportGuest2HostTLB Bit32u *hostAddr = v2h_write_dword(laddr, pl); if (hostAddr) { // Current write access has privilege. WriteHostDWordToLittleEndian(hostAddr, *data); return; } #endif access_linear(laddr, 4, pl, BX_WRITE, (void *) data); return; } } write_virtual_checks(seg, offset, 4); goto accessOK; }
bx_bool BX_CPU_C::fetchInstruction(bxInstruction_c *iStorage, const Bit8u *fetchPtr, unsigned remainingInPage) { unsigned ret; #if BX_SUPPORT_X86_64 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ret = fetchDecode64(fetchPtr, iStorage, remainingInPage); else #endif ret = fetchDecode32(fetchPtr, iStorage, remainingInPage); if (ret==0) { // handle instrumentation callback inside boundaryFetch boundaryFetch(fetchPtr, remainingInPage, iStorage); return 0; } #if BX_INSTRUMENTATION BX_INSTR_OPCODE(BX_CPU_ID, fetchPtr, iStorage->ilen(), BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode()); #endif return 1; }
BX_CPU_C::read_virtual_word(unsigned s, bx_address offset, Bit16u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessROK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset < seg->cache.u.segment.limit_scaled)) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_READ); pl = (CPL==3); #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (pl && BX_CPU_THIS_PTR alignment_check) { if (laddr & 1) { BX_ERROR(("read_virtual_word(): misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif #if BX_SupportGuest2HostTLB Bit16u *hostAddr = v2h_read_word(laddr, pl); if (hostAddr) { ReadHostWordFromLittleEndian(hostAddr, *data); return; } #endif access_linear(laddr, 2, pl, BX_READ, (void *) data); return; } } read_virtual_checks(seg, offset, 2); goto accessOK; }