uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 1; hwaddr addr1; MemTxResult r; bool release_lock = false; RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false); if (!IS_DIRECT(mr, false)) { release_lock |= prepare_mmio_access(mr); /* I/O case */ r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs); } else { /* RAM case */ ptr = MAP_RAM(mr, addr1); val = ldub_p(ptr); r = MEMTX_OK; } if (result) { *result = r; } if (release_lock) { qemu_mutex_unlock_iothread(); } RCU_READ_UNLOCK(); return val; }
/* warning: addr must be aligned */ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegion *mr; hwaddr l = 2; hwaddr addr1; MemTxResult r; bool release_lock = false; RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false); if (l < 2 || !IS_DIRECT(mr, false)) { release_lock |= prepare_mmio_access(mr); /* I/O case */ r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap16(val); } #endif } else { /* RAM case */ ptr = MAP_RAM(mr, addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = lduw_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = lduw_be_p(ptr); break; default: val = lduw_p(ptr); break; } r = MEMTX_OK; } if (result) { *result = r; } if (release_lock) { qemu_mutex_unlock_iothread(); } RCU_READ_UNLOCK(); return val; }
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, target_ulong addr, uintptr_t retaddr, bool recheck, MMUAccessType access_type, int size) { CPUState *cpu = ENV_GET_CPU(env); hwaddr mr_offset; MemoryRegionSection *section; MemoryRegion *mr; uint64_t val; bool locked = false; MemTxResult r; if (recheck) { /* * This is a TLB_RECHECK access, where the MMU protection * covers a smaller range than a target page, and we must * repeat the MMU check here. This tlb_fill() call might * longjump out if this access should cause a guest exception. */ CPUTLBEntry *entry; target_ulong tlb_addr; tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); entry = tlb_entry(env, mmu_idx, addr); tlb_addr = entry->addr_read; if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access */ uintptr_t haddr = addr + entry->addend; return ldn_p((void *)haddr, size); } /* Fall through for handling IO accesses */ } section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; cpu->mem_io_pc = retaddr; if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } cpu->mem_io_vaddr = addr; cpu->mem_io_access_type = access_type; if (mr->global_locking && !qemu_mutex_iothread_locked()) { qemu_mutex_lock_iothread(); locked = true; } r = memory_region_dispatch_read(mr, mr_offset, &val, size, iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, size, access_type, mmu_idx, iotlbentry->attrs, r, retaddr); } if (locked) { qemu_mutex_unlock_iothread(); } return val; }