inline void ReadFromHardware(T &_var, const u32 em_address, const u32 effective_address, Memory::XCheckTLBFlag flag) { // TODO: Figure out the fastest order of tests for both read and write (they are probably different). if ((em_address & 0xC8000000) == 0xC8000000) { if (em_address < 0xcc000000) _var = EFB_Read(em_address); else mmio_mapping->Read(em_address, &_var); } else if (((em_address & 0xF0000000) == 0x80000000) || ((em_address & 0xF0000000) == 0xC0000000) || ((em_address & 0xF0000000) == 0x00000000)) { _var = bswap((*(const T*)&m_pRAM[em_address & RAM_MASK])); } else if (m_pEXRAM && (((em_address & 0xF0000000) == 0x90000000) || ((em_address & 0xF0000000) == 0xD0000000) || ((em_address & 0xF0000000) == 0x10000000))) { _var = bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK])); } else if ((em_address >= 0xE0000000) && (em_address < (0xE0000000+L1_CACHE_SIZE))) { _var = bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK])); } else if ((bFakeVMEM && ((em_address &0xF0000000) == 0x70000000)) || (bFakeVMEM && ((em_address &0xF0000000) == 0x40000000))) { // fake VMEM _var = bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK])); } else { // MMU u32 tlb_addr = TranslateAddress(em_address, flag); if (tlb_addr == 0) { if (flag == FLAG_READ) { GenerateDSIException(em_address, false); } } else { _var = bswap((*(const T*)&m_pRAM[tlb_addr & RAM_MASK])); } } }
void Interpreter::ecowx(UGeckoInstruction inst) { const u32 EA = Helper_Get_EA_X(inst); if (!(PowerPC::ppcState.spr[SPR_EAR] & 0x80000000)) { GenerateDSIException(EA); return; } if (EA & 3) { GenerateAlignmentException(EA); return; } PowerPC::Write_U32(rGPR[inst.RS], EA); }
__forceinline T ReadFromHardware(const u32 em_address) { int segment = em_address >> 28; // Quick check for an address that can't meet any of the following conditions, // to speed up the MMU path. if (!BitSet32(0xCFC)[segment]) { // TODO: Figure out the fastest order of tests for both read and write (they are probably different). if ((em_address & 0xC8000000) == 0xC8000000) { if (em_address < 0xcc000000) return EFB_Read(em_address); else return (T)mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address); } else if (segment == 0x8 || segment == 0xC || segment == 0x0) { return bswap((*(const T*)&m_pRAM[em_address & RAM_MASK])); } else if (m_pEXRAM && (segment == 0x9 || segment == 0xD || segment == 0x1)) { return bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK])); } else if (segment == 0xE && (em_address < (0xE0000000 + L1_CACHE_SIZE))) { return bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK])); } } if (bFakeVMEM && (segment == 0x7 || segment == 0x4)) { // fake VMEM return bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK])); } // MMU: Do page table translation u32 tlb_addr = TranslateAddress<flag>(em_address); if (tlb_addr == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address, false); return 0; } // Handle loads that cross page boundaries (ewwww) // The alignment check isn't strictly necessary, but since this is a rare slow path, it provides a faster // (1 instruction on x86) bailout. if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { // This could be unaligned down to the byte level... hopefully this is rare, so doing it this // way isn't too terrible. // TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions. // Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned! u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1); u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page); if (tlb_addr == 0 || tlb_addr_next_page == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address_next_page, false); return 0; } T var = 0; for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++) { if (addr == em_address_next_page) tlb_addr = tlb_addr_next_page; var = (var << 8) | Memory::base[tlb_addr]; } return var; } // The easy case! return bswap(*(const T*)&Memory::base[tlb_addr]); }
inline void ReadFromHardware(T &_var, const u32 em_address, const u32 effective_address, Memory::XCheckTLBFlag flag) { // TODO: Figure out the fastest order of tests for both read and write (they are probably different). if ((em_address & 0xC8000000) == 0xC8000000) { if (em_address < 0xcc000000) _var = EFB_Read(em_address); else _var = mmio_mapping->Read<T>(em_address); } else if (((em_address & 0xF0000000) == 0x80000000) || ((em_address & 0xF0000000) == 0xC0000000) || ((em_address & 0xF0000000) == 0x00000000)) { _var = bswap((*(const T*)&m_pRAM[em_address & RAM_MASK])); } else if (m_pEXRAM && (((em_address & 0xF0000000) == 0x90000000) || ((em_address & 0xF0000000) == 0xD0000000) || ((em_address & 0xF0000000) == 0x10000000))) { _var = bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK])); } else if ((em_address >= 0xE0000000) && (em_address < (0xE0000000+L1_CACHE_SIZE))) { _var = bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK])); } else if ((bFakeVMEM && ((em_address &0xF0000000) == 0x70000000)) || (bFakeVMEM && ((em_address &0xF0000000) == 0x40000000))) { // fake VMEM _var = bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK])); } else { // MMU // Handle loads that cross page boundaries (ewwww) if (sizeof(T) > 1 && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { _var = 0; // This could be unaligned down to the byte level... hopefully this is rare, so doing it this // way isn't too terrible. // TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions. // Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned! u32 tlb_addr = TranslateAddress(em_address, flag); for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++) { // Start of the new page... translate the address again! if (!(addr & (HW_PAGE_SIZE-1))) tlb_addr = TranslateAddress(addr, flag); // Important: we need to generate the DSI on the first store that caused the fault, NOT // the address of the start of the load. if (tlb_addr == 0) { if (flag == FLAG_READ) { GenerateDSIException(addr, false); break; } } else { _var <<= 8; _var |= m_pRAM[tlb_addr & RAM_MASK]; } } } else { u32 tlb_addr = TranslateAddress(em_address, flag); if (tlb_addr == 0) { if (flag == FLAG_READ) { GenerateDSIException(em_address, false); } } else { _var = bswap((*(const T*)&m_pRAM[tlb_addr & RAM_MASK])); } } } }
__forceinline static void WriteToHardware(u32 em_address, const T data) { int segment = em_address >> 28; // Quick check for an address that can't meet any of the following conditions, // to speed up the MMU path. bool performTranslation = UReg_MSR(MSR).DR; if (!BitSet32(0xCFC)[segment] && performTranslation) { // First, let's check for FIFO writes, since they are probably the most common // reason we end up in this function. // Note that we must mask the address to correctly emulate certain games; // Pac-Man World 3 in particular is affected by this. if (flag == FLAG_WRITE && (em_address & 0xFFFFF000) == 0xCC008000) { switch (sizeof(T)) { case 1: GPFifo::Write8((u8)data); return; case 2: GPFifo::Write16((u16)data); return; case 4: GPFifo::Write32((u32)data); return; case 8: GPFifo::Write64((u64)data); return; } } if (flag == FLAG_WRITE && (em_address & 0xF8000000) == 0xC8000000) { if (em_address < 0xcc000000) { // TODO: This only works correctly for 32-bit writes. EFB_Write((u32)data, em_address); return; } else { Memory::mmio_mapping->Write(em_address & 0x0FFFFFFF, data); return; } } if (segment == 0x0 || segment == 0x8 || segment == 0xC) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. *(T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK] = bswap(data); return; } if (Memory::m_pEXRAM && (segment == 0x9 || segment == 0xD) && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { // Handle EXRAM. // TODO: Is this supposed to be mirrored like main RAM? *(T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF] = bswap(data); return; } if (segment == 0xE && (em_address < (0xE0000000 + Memory::L1_CACHE_SIZE))) { *(T*)&Memory::m_pL1Cache[em_address & 0x0FFFFFFF] = bswap(data); return; } } if (Memory::bFakeVMEM && performTranslation && (segment == 0x7 || segment == 0x4)) { // fake VMEM *(T*)&Memory::m_pFakeVMEM[em_address & Memory::FAKEVMEM_MASK] = bswap(data); return; } if (!performTranslation) { if (flag == FLAG_WRITE && (em_address & 0xFFFFF000) == 0x0C008000) { switch (sizeof(T)) { case 1: GPFifo::Write8((u8)data); return; case 2: GPFifo::Write16((u16)data); return; case 4: GPFifo::Write32((u32)data); return; case 8: GPFifo::Write64((u64)data); return; } } if (flag == FLAG_WRITE && (em_address & 0xF8000000) == 0x08000000) { if (em_address < 0x0c000000) { // TODO: This only works correctly for 32-bit writes. EFB_Write((u32)data, em_address); return; } else { Memory::mmio_mapping->Write(em_address, data); return; } } if (segment == 0x0) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. *(T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK] = bswap(data); return; } if (Memory::m_pEXRAM && segment == 0x1 && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { *(T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF] = bswap(data); return; } PanicAlert("Unable to resolve write address %x PC %x", em_address, PC); return; } // MMU: Do page table translation u32 tlb_addr = TranslateAddress<flag>(em_address); if (tlb_addr == 0) { if (flag == FLAG_WRITE) GenerateDSIException(em_address, true); return; } // Handle stores that cross page boundaries (ewwww) if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { T val = bswap(data); // We need to check both addresses before writing in case there's a DSI. u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1); u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page); if (tlb_addr_next_page == 0) { if (flag == FLAG_WRITE) GenerateDSIException(em_address_next_page, true); return; } for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++, val >>= 8) { if (addr == em_address_next_page) tlb_addr = tlb_addr_next_page; Memory::physical_base[tlb_addr] = (u8)val; } return; }
__forceinline static T ReadFromHardware(const u32 em_address) { int segment = em_address >> 28; bool performTranslation = UReg_MSR(MSR).DR; // Quick check for an address that can't meet any of the following conditions, // to speed up the MMU path. if (!BitSet32(0xCFC)[segment] && performTranslation) { // TODO: Figure out the fastest order of tests for both read and write (they are probably different). if (flag == FLAG_READ && (em_address & 0xF8000000) == 0xC8000000) { if (em_address < 0xcc000000) return EFB_Read(em_address); else return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address & 0x0FFFFFFF); } if (segment == 0x0 || segment == 0x8 || segment == 0xC) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK])); } if (Memory::m_pEXRAM && (segment == 0x9 || segment == 0xD) && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { // Handle EXRAM. // TODO: Is this supposed to be mirrored like main RAM? return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF])); } if (segment == 0xE && (em_address < (0xE0000000 + Memory::L1_CACHE_SIZE))) { return bswap((*(const T*)&Memory::m_pL1Cache[em_address & 0x0FFFFFFF])); } } if (Memory::bFakeVMEM && performTranslation && (segment == 0x7 || segment == 0x4)) { // fake VMEM return bswap((*(const T*)&Memory::m_pFakeVMEM[em_address & Memory::FAKEVMEM_MASK])); } if (!performTranslation) { if (flag == FLAG_READ && (em_address & 0xF8000000) == 0x08000000) { if (em_address < 0x0c000000) return EFB_Read(em_address); else return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address); } if (segment == 0x0) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK])); } if (Memory::m_pEXRAM && segment == 0x1 && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF])); } PanicAlert("Unable to resolve read address %x PC %x", em_address, PC); return 0; } // MMU: Do page table translation u32 tlb_addr = TranslateAddress<flag>(em_address); if (tlb_addr == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address, false); return 0; } // Handle loads that cross page boundaries (ewwww) // The alignment check isn't strictly necessary, but since this is a rare slow path, it provides a faster // (1 instruction on x86) bailout. if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { // This could be unaligned down to the byte level... hopefully this is rare, so doing it this // way isn't too terrible. // TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions. // Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned! u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1); u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page); if (tlb_addr == 0 || tlb_addr_next_page == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address_next_page, false); return 0; } T var = 0; for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++) { if (addr == em_address_next_page) tlb_addr = tlb_addr_next_page; var = (var << 8) | Memory::physical_base[tlb_addr]; } return var; } // The easy case! return bswap(*(const T*)&Memory::physical_base[tlb_addr]); }