BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset, Bit8u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessWOK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset <= seg->cache.u.segment.limit_scaled)) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_RW); pl = (CPL==3); #if BX_SupportGuest2HostTLB Bit8u *hostAddr = v2h_write_byte(laddr, pl); if (hostAddr) { // Current write access has privilege. *data = *hostAddr; BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; return; } #endif // Accelerated attempt falls through to long path. Do it the // old fashioned way... access_linear(laddr, 1, pl, BX_RW, (void *) data); return; } } write_virtual_checks(seg, offset, 1); goto accessOK; }
void BX_CPU_C::debug_disasm_instruction(bx_address offset) { #if BX_DEBUGGER bx_dbg_disassemble_current(BX_CPU_ID, 1); // only one cpu, print time stamp #else bx_phy_address phy_addr; Bit8u instr_buf[16]; char char_buf[512]; size_t i=0; static char letters[] = "0123456789ABCDEF"; static disassembler bx_disassemble; unsigned remainsInPage = 0x1000 - PAGE_OFFSET(offset); bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, offset), &phy_addr); if (valid) { BX_MEM(0)->dbg_fetch_mem(BX_CPU_THIS, phy_addr, 16, instr_buf); unsigned isize = bx_disassemble.disasm( BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64, BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_CS), offset, instr_buf, char_buf+i); if (isize <= remainsInPage) { i=strlen(char_buf); char_buf[i++] = ' '; char_buf[i++] = ':'; char_buf[i++] = ' '; for (unsigned j=0; j<isize; j++) { char_buf[i++] = letters[(instr_buf[j] >> 4) & 0xf]; char_buf[i++] = letters[(instr_buf[j] >> 0) & 0xf]; } char_buf[i] = 0; BX_INFO((">> %s", char_buf)); } else {
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset, Bit8u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessROK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset <= seg->cache.u.segment.limit_scaled)) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_READ); pl = (CPL==3); #if BX_SupportGuest2HostTLB Bit8u *hostAddr = v2h_read_byte(laddr, pl); if (hostAddr) { *data = *hostAddr; return; } #endif access_linear(laddr, 1, pl, BX_READ, (void *) data); return; } } read_virtual_checks(seg, offset, 1); goto accessOK; }
BX_CPU_C::write_virtual_dqword_aligned(unsigned s, bx_address offset, Bit8u *data) { // If double quadword access is unaligned, #GP(0). bx_address laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; if (laddr & 0xf) { BX_DEBUG(("write_virtual_dqword_aligned: access not aligned to 16-byte")); exception(BX_GP_EXCEPTION, 0, 0); } write_virtual_dqword(s, offset, data); }
void tls_thread_init(os_local_state_t *os_tls, byte *segment) { ASSERT((byte *)(os_tls->self) == segment); LOG(GLOBAL, LOG_THREADS, 2, "tls_thread_init: cur priv lib tls base is "PFX"\n", os_tls->os_seg_info.priv_lib_tls_base); write_thread_register(os_tls->os_seg_info.priv_lib_tls_base); ASSERT(get_segment_base(TLS_REG_LIB) == os_tls->os_seg_info.priv_lib_tls_base); ASSERT(*get_dr_tls_base_addr() == NULL); *get_dr_tls_base_addr() = segment; os_tls->tls_type = TLS_TYPE_SLOT; }
BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset, Bit64u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessWOK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset <= (seg->cache.u.segment.limit_scaled-7))) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW); pl = (CPL==3); #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (pl && BX_CPU_THIS_PTR alignment_check) { if (laddr & 7) { BX_ERROR(("read_RMW_virtual_qword(): misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif #if BX_SupportGuest2HostTLB Bit64u *hostAddr = v2h_write_qword(laddr, pl); if (hostAddr) { // Current write access has privilege. ReadHostQWordFromLittleEndian(hostAddr, *data); BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; return; } #endif access_linear(laddr, 8, pl, BX_RW, (void *) data); return; } } write_virtual_checks(seg, offset, 8); goto accessOK; }
BX_CPU_C::write_virtual_dword(unsigned s, bx_address offset, Bit32u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessWOK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset < (seg->cache.u.segment.limit_scaled-2))) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE); pl = (CPL==3); #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (pl && BX_CPU_THIS_PTR alignment_check) { if (laddr & 3) { BX_ERROR(("write_virtual_dword(): misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif #if BX_SupportGuest2HostTLB Bit32u *hostAddr = v2h_write_dword(laddr, pl); if (hostAddr) { // Current write access has privilege. WriteHostDWordToLittleEndian(hostAddr, *data); return; } #endif access_linear(laddr, 4, pl, BX_WRITE, (void *) data); return; } } write_virtual_checks(seg, offset, 4); goto accessOK; }
BX_CPU_C::read_virtual_word(unsigned s, bx_address offset, Bit16u *data) { bx_address laddr; bx_segment_reg_t *seg; seg = &BX_CPU_THIS_PTR sregs[s]; if (seg->cache.valid & SegAccessROK) { if ((Is64BitMode() && IsCanonical(offset)) || (offset < seg->cache.u.segment.limit_scaled)) { unsigned pl; accessOK: laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset; BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_READ); pl = (CPL==3); #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (pl && BX_CPU_THIS_PTR alignment_check) { if (laddr & 1) { BX_ERROR(("read_virtual_word(): misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif #if BX_SupportGuest2HostTLB Bit16u *hostAddr = v2h_read_word(laddr, pl); if (hostAddr) { ReadHostWordFromLittleEndian(hostAddr, *data); return; } #endif access_linear(laddr, 2, pl, BX_READ, (void *) data); return; } } read_virtual_checks(seg, offset, 2); goto accessOK; }
void privload_tls_exit(void *dr_tp) { byte *alloc; if (dr_tp == NULL || dr_tp == init_thread.tls) return; alloc = (byte *)dr_tp - offsetof(android_pthread_internal_t, tls); heap_munmap(alloc, ALIGN_FORWARD(sizeof(android_pthread_internal_t), PAGE_SIZE)); } /* For standalone lib usage (i#1862: the Android loader passes * *nothing* to lib init routines). This will only succeed prior to * Bionic's initializer, which clears the tls slot. */ bool get_kernel_args(int *argc OUT, char ***argv OUT, char ***envp OUT) { android_kernel_args_t *kargs; void **tls = (void **) get_segment_base(TLS_REG_LIB); if (tls != NULL) { kargs = (android_kernel_args_t *) tls[ANDROID_TLS_SLOT_BIONIC_PREINIT]; if (kargs != NULL) { *argc = kargs->argc; *argv = kargs->argv; *envp = kargs->envp; return true; } } return false; }