namespace Memory { // The base pointer to the auto-mirrored arena. u8* base = NULL; #ifdef __SYMBIAN32__ RChunk* memmap; #else // The MemArena class MemArena g_arena; #endif // ============== // 64-bit: Pointers to low-mem (sub-0x10000000) mirror // 32-bit: Same as the corresponding physical/virtual pointers. u8 *m_pRAM; u8 *m_pRAM2; u8 *m_pRAM3; u8 *m_pScratchPad; u8 *m_pVRAM; u8 *m_pPhysicalScratchPad; u8 *m_pUncachedScratchPad; // 64-bit: Pointers to high-mem mirrors // 32-bit: Same as above u8 *m_pPhysicalRAM; u8 *m_pUncachedRAM; u8 *m_pKernelRAM; // RAM mirrored up to "kernel space". Fully accessible at all times currently. u8 *m_pPhysicalRAM2; u8 *m_pUncachedRAM2; u8 *m_pKernelRAM2; u8 *m_pPhysicalRAM3; u8 *m_pUncachedRAM3; u8 *m_pKernelRAM3; // VRAM is mirrored 4 times. The second and fourth mirrors are swizzled. // In practice, a game accessing the mirrors most likely is deswizzling the depth buffer. u8 *m_pPhysicalVRAM1; u8 *m_pPhysicalVRAM2; u8 *m_pPhysicalVRAM3; u8 *m_pPhysicalVRAM4; u8 *m_pUncachedVRAM1; u8 *m_pUncachedVRAM2; u8 *m_pUncachedVRAM3; u8 *m_pUncachedVRAM4; // Holds the ending address of the PSP's user space. // Required for HD Remasters to work properly. // This replaces RAM_NORMAL_SIZE at runtime. u32 g_MemorySize; // Used to store the PSP model on game startup. u32 g_PSPModel; recursive_mutex g_shutdownLock; // We don't declare the IO region in here since its handled by other means. static MemoryView views[] = { {&m_pScratchPad, &m_pPhysicalScratchPad, 0x00010000, SCRATCHPAD_SIZE, 0}, {NULL, &m_pUncachedScratchPad, 0x40010000, SCRATCHPAD_SIZE, MV_MIRROR_PREVIOUS}, {&m_pVRAM, &m_pPhysicalVRAM1, 0x04000000, 0x00200000, 0}, {NULL, &m_pPhysicalVRAM2, 0x04200000, 0x00200000, MV_MIRROR_PREVIOUS}, {NULL, &m_pPhysicalVRAM3, 0x04400000, 0x00200000, MV_MIRROR_PREVIOUS}, {NULL, &m_pPhysicalVRAM4, 0x04600000, 0x00200000, MV_MIRROR_PREVIOUS}, {NULL, &m_pUncachedVRAM1, 0x44000000, 0x00200000, MV_MIRROR_PREVIOUS}, {NULL, &m_pUncachedVRAM2, 0x44200000, 0x00200000, MV_MIRROR_PREVIOUS}, {NULL, &m_pUncachedVRAM3, 0x44400000, 0x00200000, MV_MIRROR_PREVIOUS}, {NULL, &m_pUncachedVRAM4, 0x44600000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pRAM, &m_pPhysicalRAM, 0x08000000, g_MemorySize, MV_IS_PRIMARY_RAM}, // only from 0x08800000 is it usable (last 24 megs) {NULL, &m_pUncachedRAM, 0x48000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, {NULL, &m_pKernelRAM, 0x88000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, // Starts at memory + 31 MB. {&m_pRAM2, &m_pPhysicalRAM2, 0x09F00000, g_MemorySize, MV_IS_EXTRA1_RAM}, {NULL, &m_pUncachedRAM2, 0x49F00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA1_RAM}, {NULL, &m_pKernelRAM2, 0x89F00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA1_RAM}, // Starts at memory + 31 * 2 MB. {&m_pRAM3, &m_pPhysicalRAM3, 0x0BE00000, g_MemorySize, MV_IS_EXTRA2_RAM}, {NULL, &m_pUncachedRAM3, 0x4BE00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA2_RAM}, {NULL, &m_pKernelRAM3, 0x8BE00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA2_RAM}, // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to // implement those. }; static const int num_views = sizeof(views) / sizeof(MemoryView); inline static bool CanIgnoreView(const MemoryView &view) { #if defined(_M_IX86) || defined(_M_ARM32) || defined(_XBOX) // Basically, 32-bit platforms can ignore views that are masked out anyway. return (view.flags & MV_MIRROR_PREVIOUS) && (view.virtual_address & ~MEMVIEW32_MASK) != 0; #else return false; #endif } // yeah, this could also be done in like two bitwise ops... #define SKIP(a_flags, b_flags) // if (!(a_flags & MV_WII_ONLY) && (b_flags & MV_WII_ONLY)) // continue; // if (!(a_flags & MV_FAKE_VMEM) && (b_flags & MV_FAKE_VMEM)) // continue; static bool Memory_TryBase(u32 flags) { // OK, we know where to find free space. Now grab it! // We just mimic the popular BAT setup. #if defined(_XBOX) void *ptr; #elif !defined(__SYMBIAN32__) size_t position = 0; size_t last_position = 0; #endif // Zero all the pointers to be sure. for (int i = 0; i < num_views; i++) { if (views[i].out_ptr_low) *views[i].out_ptr_low = 0; if (views[i].out_ptr) *views[i].out_ptr = 0; } int i; for (i = 0; i < num_views; i++) { const MemoryView &view = views[i]; if (view.size == 0) continue; SKIP(flags, view.flags); #ifdef __SYMBIAN32__ if (!CanIgnoreView(view)) { memmap->Commit(view.virtual_address & MEMVIEW32_MASK, view.size); } *(view.out_ptr) = (u8*)base + (view.virtual_address & MEMVIEW32_MASK); #elif defined(_XBOX) if (!CanIgnoreView(view)) { *(view.out_ptr_low) = (u8*)(base + view.virtual_address); ptr = VirtualAlloc(base + (view.virtual_address & MEMVIEW32_MASK), view.size, MEM_COMMIT, PAGE_READWRITE); } *(view.out_ptr) = (u8*)base + (view.virtual_address & MEMVIEW32_MASK); #else if (view.flags & MV_MIRROR_PREVIOUS) { position = last_position; } else { *(view.out_ptr_low) = (u8*)g_arena.CreateView(position, view.size); if (!*view.out_ptr_low) goto bail; } #ifdef _M_X64 *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + view.virtual_address); #else if (CanIgnoreView(view)) { // No need to create multiple identical views. *view.out_ptr = *views[i - 1].out_ptr; } else { *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + (view.virtual_address & MEMVIEW32_MASK)); if (!*view.out_ptr) goto bail; } #endif last_position = position; position += g_arena.roundup(view.size); #endif } return true; #if !defined(_XBOX) && !defined(__SYMBIAN32__) bail: // Argh! ERROR! Free what we grabbed so far so we can try again. for (int j = 0; j <= i; j++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (views[j].out_ptr_low && *views[j].out_ptr_low) { g_arena.ReleaseView(*views[j].out_ptr_low, views[j].size); *views[j].out_ptr_low = NULL; } if (*views[j].out_ptr) { if (!CanIgnoreView(views[j])) { g_arena.ReleaseView(*views[j].out_ptr, views[j].size); } *views[j].out_ptr = NULL; } } return false; #endif } void MemoryMap_Setup(u32 flags) { // Find a base to reserve 256MB #if defined(_XBOX) base = (u8*)VirtualAlloc(0, 0x10000000, MEM_RESERVE|MEM_LARGE_PAGES, PAGE_READWRITE); #elif defined(__SYMBIAN32__) memmap = new RChunk(); memmap->CreateDisconnectedLocal(0 , 0, 0x10000000); base = memmap->Base(); #else size_t total_mem = 0; for (int i = 0; i < num_views; i++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (!CanIgnoreView(views[i])) total_mem += g_arena.roundup(views[i].size); } // Grab some pagefile backed memory out of the void ... g_arena.GrabLowMemSpace(total_mem); // 32-bit Windows retrieves base a different way #if defined(_M_X64) || !defined(_WIN32) // This really shouldn't fail - in 64-bit, there will always be enough address space. // Linux32 is fine with the x64 method, although limited to 32-bit with no automirrors. base = MemArena::Find4GBBase(); #endif #endif // Now, create views in high memory where there's plenty of space. #if defined(_WIN32) && !defined(_M_X64) && !defined(_XBOX) // Try a whole range of possible bases. Return once we got a valid one. int base_attempts = 0; u32 max_base_addr = 0x7FFF0000 - 0x10000000; for (u32 base_addr = 0x01000000; base_addr < max_base_addr; base_addr += 0x400000) { base_attempts++; base = (u8 *)base_addr; if (Memory_TryBase(flags)) { INFO_LOG(MEMMAP, "Found valid memory base at %p after %i tries.", base, base_attempts); base_attempts = 0; break; } } if (base_attempts) PanicAlert("No possible memory base pointer found!"); #else // Try base we retrieved earlier if (!Memory_TryBase(flags)) { ERROR_LOG(MEMMAP, "MemoryMap_Setup: Failed finding a memory base."); PanicAlert("MemoryMap_Setup: Failed finding a memory base."); } #endif return; } void MemoryMap_Shutdown(u32 flags) { #ifdef __SYMBIAN32__ memmap->Decommit(0, memmap->MaxSize()); memmap->Close(); delete memmap; #else for (int i = 0; i < num_views; i++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (views[i].out_ptr_low && *views[i].out_ptr_low) g_arena.ReleaseView(*views[i].out_ptr_low, views[i].size); if (*views[i].out_ptr && (!views[i].out_ptr_low || *views[i].out_ptr != *views[i].out_ptr_low)) g_arena.ReleaseView(*views[i].out_ptr, views[i].size); *views[i].out_ptr = NULL; if (views[i].out_ptr_low) *views[i].out_ptr_low = NULL; } g_arena.ReleaseSpace(); #endif } void Init() { int flags = 0; // On some 32 bit platforms, you can only map < 32 megs at a time. const static int MAX_MMAP_SIZE = 31 * 1024 * 1024; _dbg_assert_msg_(MEMMAP, g_MemorySize < MAX_MMAP_SIZE * 3, "ACK - too much memory for three mmap views."); for (size_t i = 0; i < ARRAY_SIZE(views); i++) { if (views[i].flags & MV_IS_PRIMARY_RAM) views[i].size = std::min((int)g_MemorySize, MAX_MMAP_SIZE); if (views[i].flags & MV_IS_EXTRA1_RAM) views[i].size = std::min(std::max((int)g_MemorySize - MAX_MMAP_SIZE, 0), MAX_MMAP_SIZE); if (views[i].flags & MV_IS_EXTRA2_RAM) views[i].size = std::min(std::max((int)g_MemorySize - MAX_MMAP_SIZE * 2, 0), MAX_MMAP_SIZE); } MemoryMap_Setup(flags); INFO_LOG(MEMMAP, "Memory system initialized. RAM at %p (mirror at 0 @ %p, uncached @ %p)", m_pRAM, m_pPhysicalRAM, m_pUncachedRAM); } void DoState(PointerWrap &p) { auto s = p.Section("Memory", 1, 2); if (!s) return; if (s < 2) { if (!g_RemasterMode) g_MemorySize = RAM_NORMAL_SIZE; g_PSPModel = PSP_MODEL_FAT; } else { u32 oldMemorySize = g_MemorySize; p.Do(g_PSPModel); p.DoMarker("PSPModel"); if (!g_RemasterMode) { g_MemorySize = g_PSPModel == PSP_MODEL_FAT ? RAM_NORMAL_SIZE : RAM_DOUBLE_SIZE; if (oldMemorySize < g_MemorySize) { Shutdown(); Init(); } } } p.DoArray(GetPointer(PSP_GetKernelMemoryBase()), g_MemorySize); p.DoMarker("RAM"); p.DoArray(m_pVRAM, VRAM_SIZE); p.DoMarker("VRAM"); p.DoArray(m_pScratchPad, SCRATCHPAD_SIZE); p.DoMarker("ScratchPad"); } void Shutdown() { lock_guard guard(g_shutdownLock); u32 flags = 0; MemoryMap_Shutdown(flags); base = NULL; DEBUG_LOG(MEMMAP, "Memory system shut down."); } void Clear() { if (m_pRAM) memset(GetPointerUnchecked(PSP_GetKernelMemoryBase()), 0, g_MemorySize); if (m_pScratchPad) memset(m_pScratchPad, 0, SCRATCHPAD_SIZE); if (m_pVRAM) memset(m_pVRAM, 0, VRAM_SIZE); } // Wanting to avoid include pollution, MemMap.h is included a lot. MemoryInitedLock::MemoryInitedLock() { g_shutdownLock.lock(); } MemoryInitedLock::~MemoryInitedLock() { g_shutdownLock.unlock(); } MemoryInitedLock Lock() { return MemoryInitedLock(); } static Opcode Read_Instruction(u32 address, bool resolveReplacements, Opcode inst) { if (!MIPS_IS_EMUHACK(inst.encoding)) { return inst; } if (MIPS_IS_RUNBLOCK(inst.encoding) && MIPSComp::jit) { JitBlockCache *bc = MIPSComp::jit->GetBlockCache(); int block_num = bc->GetBlockNumberFromEmuHackOp(inst, true); if (block_num >= 0) { inst = bc->GetOriginalFirstOp(block_num); if (resolveReplacements && MIPS_IS_REPLACEMENT(inst)) { u32 op; if (GetReplacedOpAt(address, &op)) { if (MIPS_IS_EMUHACK(op)) { ERROR_LOG(HLE,"WTF 1"); return Opcode(op); } else { return Opcode(op); } } else { ERROR_LOG(HLE, "Replacement, but no replacement op? %08x", inst.encoding); } } return inst; } else { return inst; } } else if (resolveReplacements && MIPS_IS_REPLACEMENT(inst.encoding)) { u32 op; if (GetReplacedOpAt(address, &op)) { if (MIPS_IS_EMUHACK(op)) { ERROR_LOG(HLE,"WTF 2"); return Opcode(op); } else { return Opcode(op); } } else { return inst; } } else { return inst; } } Opcode Read_Instruction(u32 address, bool resolveReplacements) { Opcode inst = Opcode(Read_U32(address)); return Read_Instruction(address, resolveReplacements, inst); } Opcode ReadUnchecked_Instruction(u32 address, bool resolveReplacements) { Opcode inst = Opcode(ReadUnchecked_U32(address)); return Read_Instruction(address, resolveReplacements, inst); } Opcode Read_Opcode_JIT(u32 address) { Opcode inst = Opcode(Read_U32(address)); if (MIPS_IS_RUNBLOCK(inst.encoding) && MIPSComp::jit) { JitBlockCache *bc = MIPSComp::jit->GetBlockCache(); int block_num = bc->GetBlockNumberFromEmuHackOp(inst, true); if (block_num >= 0) { return bc->GetOriginalFirstOp(block_num); } else { return inst; } } else { return inst; } } // WARNING! No checks! // We assume that _Address is cached void Write_Opcode_JIT(const u32 _Address, const Opcode _Value) { Memory::WriteUnchecked_U32(_Value.encoding, _Address); } void Memset(const u32 _Address, const u8 _iValue, const u32 _iLength) { u8 *ptr = GetPointer(_Address); if (ptr != NULL) { memset(ptr, _iValue, _iLength); } else { for (size_t i = 0; i < _iLength; i++) Write_U8(_iValue, (u32)(_Address + i)); } #ifndef MOBILE_DEVICE CBreakPoints::ExecMemCheck(_Address, true, _iLength, currentMIPS->pc); #endif } const char *GetAddressName(u32 address) { // TODO, follow GetPointer return "[mem]"; } } // namespace
static bool Memory_TryBase(u32 flags) { // OK, we know where to find free space. Now grab it! // We just mimic the popular BAT setup. #if defined(_XBOX) void *ptr; #elif !defined(__SYMBIAN32__) size_t position = 0; size_t last_position = 0; #endif // Zero all the pointers to be sure. for (int i = 0; i < num_views; i++) { if (views[i].out_ptr_low) *views[i].out_ptr_low = 0; if (views[i].out_ptr) *views[i].out_ptr = 0; } int i; for (i = 0; i < num_views; i++) { const MemoryView &view = views[i]; if (view.size == 0) continue; SKIP(flags, view.flags); #ifdef __SYMBIAN32__ if (!CanIgnoreView(view)) { memmap->Commit(view.virtual_address & MEMVIEW32_MASK, view.size); } *(view.out_ptr) = (u8*)base + (view.virtual_address & MEMVIEW32_MASK); #elif defined(_XBOX) if (!CanIgnoreView(view)) { *(view.out_ptr_low) = (u8*)(base + view.virtual_address); ptr = VirtualAlloc(base + (view.virtual_address & MEMVIEW32_MASK), view.size, MEM_COMMIT, PAGE_READWRITE); } *(view.out_ptr) = (u8*)base + (view.virtual_address & MEMVIEW32_MASK); #else if (view.flags & MV_MIRROR_PREVIOUS) { position = last_position; } else { *(view.out_ptr_low) = (u8*)g_arena.CreateView(position, view.size); if (!*view.out_ptr_low) goto bail; } #ifdef _M_X64 *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + view.virtual_address); #else if (CanIgnoreView(view)) { // No need to create multiple identical views. *view.out_ptr = *views[i - 1].out_ptr; } else { *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + (view.virtual_address & MEMVIEW32_MASK)); if (!*view.out_ptr) goto bail; } #endif last_position = position; position += g_arena.roundup(view.size); #endif } return true; #if !defined(_XBOX) && !defined(__SYMBIAN32__) bail: // Argh! ERROR! Free what we grabbed so far so we can try again. for (int j = 0; j <= i; j++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (views[j].out_ptr_low && *views[j].out_ptr_low) { g_arena.ReleaseView(*views[j].out_ptr_low, views[j].size); *views[j].out_ptr_low = NULL; } if (*views[j].out_ptr) { if (!CanIgnoreView(views[j])) { g_arena.ReleaseView(*views[j].out_ptr, views[j].size); } *views[j].out_ptr = NULL; } } return false; #endif }
void MemoryMap_Setup(u32 flags) { // Find a base to reserve 256MB #if defined(_XBOX) base = (u8*)VirtualAlloc(0, 0x10000000, MEM_RESERVE|MEM_LARGE_PAGES, PAGE_READWRITE); #elif defined(__SYMBIAN32__) memmap = new RChunk(); memmap->CreateDisconnectedLocal(0 , 0, 0x10000000); base = memmap->Base(); #else size_t total_mem = 0; for (int i = 0; i < num_views; i++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (!CanIgnoreView(views[i])) total_mem += g_arena.roundup(views[i].size); } // Grab some pagefile backed memory out of the void ... g_arena.GrabLowMemSpace(total_mem); // 32-bit Windows retrieves base a different way #if defined(_M_X64) || !defined(_WIN32) // This really shouldn't fail - in 64-bit, there will always be enough address space. // Linux32 is fine with the x64 method, although limited to 32-bit with no automirrors. base = MemArena::Find4GBBase(); #endif #endif // Now, create views in high memory where there's plenty of space. #if defined(_WIN32) && !defined(_M_X64) && !defined(_XBOX) // Try a whole range of possible bases. Return once we got a valid one. int base_attempts = 0; u32 max_base_addr = 0x7FFF0000 - 0x10000000; for (u32 base_addr = 0x01000000; base_addr < max_base_addr; base_addr += 0x400000) { base_attempts++; base = (u8 *)base_addr; if (Memory_TryBase(flags)) { INFO_LOG(MEMMAP, "Found valid memory base at %p after %i tries.", base, base_attempts); base_attempts = 0; break; } } if (base_attempts) PanicAlert("No possible memory base pointer found!"); #else // Try base we retrieved earlier if (!Memory_TryBase(flags)) { ERROR_LOG(MEMMAP, "MemoryMap_Setup: Failed finding a memory base."); PanicAlert("MemoryMap_Setup: Failed finding a memory base."); } #endif return; }
namespace Memory { // The base pointer to the auto-mirrored arena. u8* base = NULL; // The MemArena class MemArena g_arena; // ============== // 64-bit: Pointers to low-mem (sub-0x10000000) mirror // 32-bit: Same as the corresponding physical/virtual pointers. u8 *m_pRAM; u8 *m_pScratchPad; u8 *m_pVRAM; u8 *m_pPhysicalScratchPad; u8 *m_pUncachedScratchPad; // 64-bit: Pointers to high-mem mirrors // 32-bit: Same as above u8 *m_pPhysicalRAM; u8 *m_pUncachedRAM; u8 *m_pKernelRAM; // RAM mirrored up to "kernel space". Fully accessible at all times currently. u8 *m_pPhysicalVRAM; u8 *m_pUncachedVRAM; // Holds the ending address of the PSP's user space. // Required for HD Remasters to work properly. u32 g_MemoryMask; u32 g_MemorySize; // We don't declare the IO region in here since its handled by other means. static MemoryView views[] = { {&m_pScratchPad, &m_pPhysicalScratchPad, 0x00010000, SCRATCHPAD_SIZE, 0}, {NULL, &m_pUncachedScratchPad, 0x40010000, SCRATCHPAD_SIZE, MV_MIRROR_PREVIOUS}, {&m_pVRAM, &m_pPhysicalVRAM, 0x04000000, 0x00800000, 0}, {NULL, &m_pUncachedVRAM, 0x44000000, 0x00800000, MV_MIRROR_PREVIOUS}, {&m_pRAM, &m_pPhysicalRAM, 0x08000000, g_MemorySize, 0}, // only from 0x08800000 is it usable (last 24 megs) {NULL, &m_pUncachedRAM, 0x48000000, g_MemorySize, MV_MIRROR_PREVIOUS}, {NULL, &m_pKernelRAM, 0x88000000, g_MemorySize, MV_MIRROR_PREVIOUS}, // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to // implement those. }; static const int num_views = sizeof(views) / sizeof(MemoryView); void Init() { int flags = 0; Memory::g_MemoryMask = Memory::g_MemorySize - 1; for(int i = 0; i < ARRAY_SIZE(views); i++) { if(views[i].size == 0) views[i].size = g_MemorySize; } base = MemoryMap_Setup(views, num_views, flags, &g_arena); INFO_LOG(MEMMAP, "Memory system initialized. RAM at %p (mirror at 0 @ %p, uncached @ %p)", m_pRAM, m_pPhysicalRAM, m_pUncachedRAM); } void DoState(PointerWrap &p) { p.DoArray(m_pRAM, g_MemorySize); p.DoMarker("RAM"); p.DoArray(m_pVRAM, VRAM_SIZE); p.DoMarker("VRAM"); p.DoArray(m_pScratchPad, SCRATCHPAD_SIZE); p.DoMarker("ScratchPad"); p.Do(g_RemasterMode); // TODO: Need to test more if this and MemoryMask need to be saved in the state p.DoMarker("RemasterMode"); p.Do(g_MemoryMask); p.DoMarker("MemoryMask"); p.Do(g_DoubleTextureCoordinates); // TODO: Is there a more appropriate place for this? p.DoMarker("DoubleTextureCoordinates"); } void Shutdown() { u32 flags = 0; MemoryMap_Shutdown(views, num_views, flags, &g_arena); g_arena.ReleaseSpace(); base = NULL; INFO_LOG(MEMMAP, "Memory system shut down."); } void Clear() { if (m_pRAM) memset(m_pRAM, 0, g_MemorySize); if (m_pScratchPad) memset(m_pScratchPad, 0, SCRATCHPAD_SIZE); if (m_pVRAM) memset(m_pVRAM, 0, VRAM_SIZE); } u32 Read_Instruction(u32 address) { u32 inst = Read_U32(address); if (MIPS_IS_EMUHACK(inst) && MIPSComp::jit) { JitBlockCache *bc = MIPSComp::jit->GetBlockCache(); int block_num = bc->GetBlockNumberFromEmuHackOp(inst); if (block_num >= 0) { return bc->GetOriginalFirstOp(block_num); } else { return inst; } } else { return inst; } } u32 Read_Opcode_JIT(u32 address) { return Read_Instruction(address); } // WARNING! No checks! // We assume that _Address is cached void Write_Opcode_JIT(const u32 _Address, const u32 _Value) { Memory::WriteUnchecked_U32(_Value, _Address); } void Memset(const u32 _Address, const u8 _iValue, const u32 _iLength) { u8 *ptr = GetPointer(_Address); if (ptr != NULL) { memset(ptr,_iValue,_iLength); } else { for (size_t i = 0; i < _iLength; i++) Write_U8(_iValue, (u32)(_Address + i)); } } void Memcpy(const u32 to_address, const void *from_data, const u32 len) { memcpy(GetPointer(to_address), from_data, len); } void Memcpy(void *to_data, const u32 from_address, const u32 len) { memcpy(to_data,GetPointer(from_address),len); } void GetString(std::string& _string, const u32 em_address) { char stringBuffer[2048]; char *string = stringBuffer; char c; u32 addr = em_address; while ((c = Read_U8(addr))) { *string++ = c; addr++; } *string++ = '\0'; _string = stringBuffer; } const char *GetAddressName(u32 address) { // TODO, follow GetPointer return "[mem]"; } } // namespace
namespace Memory { // The base pointer to the auto-mirrored arena. u8* base = NULL; // The MemArena class MemArena g_arena; // ============== // 64-bit: Pointers to low-mem (sub-0x10000000) mirror // 32-bit: Same as the corresponding physical/virtual pointers. u8 *m_pRAM; u8 *m_pRAM2; u8 *m_pRAM3; u8 *m_pScratchPad; u8 *m_pVRAM; u8 *m_pPhysicalScratchPad; u8 *m_pUncachedScratchPad; // 64-bit: Pointers to high-mem mirrors // 32-bit: Same as above u8 *m_pPhysicalRAM; u8 *m_pUncachedRAM; u8 *m_pKernelRAM; // RAM mirrored up to "kernel space". Fully accessible at all times currently. u8 *m_pPhysicalRAM2; u8 *m_pUncachedRAM2; u8 *m_pKernelRAM2; u8 *m_pPhysicalRAM3; u8 *m_pUncachedRAM3; u8 *m_pKernelRAM3; u8 *m_pPhysicalVRAM; u8 *m_pUncachedVRAM; // Holds the ending address of the PSP's user space. // Required for HD Remasters to work properly. // These replace RAM_NORMAL_SIZE and RAM_NORMAL_MASK, respectively. u32 g_MemorySize; u32 g_MemoryMask; // Used to store the PSP model on game startup. u32 g_PSPModel; // We don't declare the IO region in here since its handled by other means. static MemoryView views[] = { {&m_pScratchPad, &m_pPhysicalScratchPad, 0x00010000, SCRATCHPAD_SIZE, 0}, {NULL, &m_pUncachedScratchPad, 0x40010000, SCRATCHPAD_SIZE, MV_MIRROR_PREVIOUS}, {&m_pVRAM, &m_pPhysicalVRAM, 0x04000000, 0x00800000, 0}, {NULL, &m_pUncachedVRAM, 0x44000000, 0x00800000, MV_MIRROR_PREVIOUS}, {&m_pRAM, &m_pPhysicalRAM, 0x08000000, g_MemorySize, MV_IS_PRIMARY_RAM}, // only from 0x08800000 is it usable (last 24 megs) {NULL, &m_pUncachedRAM, 0x48000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, {NULL, &m_pKernelRAM, 0x88000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, // Starts at memory + 31 MB. {&m_pRAM2, &m_pPhysicalRAM2, 0x09F00000, g_MemorySize, MV_IS_EXTRA1_RAM}, {NULL, &m_pUncachedRAM2, 0x49F00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA1_RAM}, {NULL, &m_pKernelRAM2, 0x89F00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA1_RAM}, // Starts at memory + 31 * 2 MB. {&m_pRAM3, &m_pPhysicalRAM3, 0x0BE00000, g_MemorySize, MV_IS_EXTRA2_RAM}, {NULL, &m_pUncachedRAM3, 0x4BE00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA2_RAM}, {NULL, &m_pKernelRAM3, 0x8BE00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA2_RAM}, // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to // implement those. }; static const int num_views = sizeof(views) / sizeof(MemoryView); void Init() { int flags = 0; // This mask is used ONLY after validating the address is in the correct range. // So let's just use a fixed mask to remove the uncached/user memory bits. // Using (Memory::g_MemorySize - 1) won't work for e.g. 0x04C00000. Memory::g_MemoryMask = 0x07FFFFFF; // On some 32 bit platforms, you can only map < 32 megs at a time. const static int MAX_MMAP_SIZE = 31 * 1024 * 1024; _dbg_assert_msg_(MEMMAP, g_MemorySize < MAX_MMAP_SIZE * 3, "ACK - too much memory for three mmap views."); for (size_t i = 0; i < ARRAY_SIZE(views); i++) { if (views[i].flags & MV_IS_PRIMARY_RAM) views[i].size = std::min((int)g_MemorySize, MAX_MMAP_SIZE); if (views[i].flags & MV_IS_EXTRA1_RAM) views[i].size = std::min(std::max((int)g_MemorySize - MAX_MMAP_SIZE, 0), MAX_MMAP_SIZE); if (views[i].flags & MV_IS_EXTRA2_RAM) views[i].size = std::min(std::max((int)g_MemorySize - MAX_MMAP_SIZE * 2, 0), MAX_MMAP_SIZE); } base = MemoryMap_Setup(views, num_views, flags, &g_arena); INFO_LOG(MEMMAP, "Memory system initialized. RAM at %p (mirror at 0 @ %p, uncached @ %p)", m_pRAM, m_pPhysicalRAM, m_pUncachedRAM); } void DoState(PointerWrap &p) { auto s = p.Section("Memory", 1, 2); if (!s) return; if (s < 2) { if (!g_RemasterMode) g_MemorySize = RAM_NORMAL_SIZE; g_PSPModel = PSP_MODEL_FAT; } else { p.Do(g_PSPModel); p.DoMarker("PSPModel"); if (!g_RemasterMode) g_MemorySize = g_PSPModel == PSP_MODEL_FAT ? RAM_NORMAL_SIZE : RAM_DOUBLE_SIZE; } p.DoArray(GetPointer(PSP_GetKernelMemoryBase()), g_MemorySize); p.DoMarker("RAM"); p.DoArray(m_pVRAM, VRAM_SIZE); p.DoMarker("VRAM"); p.DoArray(m_pScratchPad, SCRATCHPAD_SIZE); p.DoMarker("ScratchPad"); } void Shutdown() { u32 flags = 0; MemoryMap_Shutdown(views, num_views, flags, &g_arena); g_arena.ReleaseSpace(); base = NULL; DEBUG_LOG(MEMMAP, "Memory system shut down."); } void Clear() { if (m_pRAM) memset(GetPointerUnchecked(PSP_GetKernelMemoryBase()), 0, g_MemorySize); if (m_pScratchPad) memset(m_pScratchPad, 0, SCRATCHPAD_SIZE); if (m_pVRAM) memset(m_pVRAM, 0, VRAM_SIZE); } Opcode Read_Instruction(u32 address) { Opcode inst = Opcode(Read_U32(address)); if (MIPS_IS_EMUHACK(inst) && MIPSComp::jit) { JitBlockCache *bc = MIPSComp::jit->GetBlockCache(); int block_num = bc->GetBlockNumberFromEmuHackOp(inst, true); if (block_num >= 0) { return bc->GetOriginalFirstOp(block_num); } else { return inst; } } else { return inst; } } Opcode Read_Opcode_JIT(u32 address) { return Read_Instruction(address); } // WARNING! No checks! // We assume that _Address is cached void Write_Opcode_JIT(const u32 _Address, const Opcode _Value) { Memory::WriteUnchecked_U32(_Value.encoding, _Address); } void Memset(const u32 _Address, const u8 _iValue, const u32 _iLength) { u8 *ptr = GetPointer(_Address); if (ptr != NULL) { memset(ptr,_iValue,_iLength); } else { for (size_t i = 0; i < _iLength; i++) Write_U8(_iValue, (u32)(_Address + i)); } } void GetString(std::string& _string, const u32 em_address) { char stringBuffer[2048]; char *string = stringBuffer; char c; u32 addr = em_address; while ((c = Read_U8(addr))) { *string++ = c; addr++; } *string++ = '\0'; _string = stringBuffer; } const char *GetAddressName(u32 address) { // TODO, follow GetPointer return "[mem]"; } } // namespace
namespace Memory { u8* g_base; ///< The base pointer to the auto-mirrored arena. static MemArena arena; ///< The MemArena class u8* g_exefs_code; ///< ExeFS:/.code is loaded here u8* g_system_mem; ///< System memory u8* g_heap; ///< Application heap (main memory) u8* g_heap_linear; ///< Linear heap u8* g_vram; ///< Video memory (VRAM) pointer u8* g_shared_mem; ///< Shared memory u8* g_dsp_mem; ///< DSP memory u8* g_kernel_mem; ///< Kernel memory static u8* physical_bootrom; ///< Bootrom physical memory static u8* uncached_bootrom; static u8* physical_exefs_code; ///< Phsical ExeFS:/.code is loaded here static u8* physical_system_mem; ///< System physical memory static u8* physical_fcram; ///< Main physical memory (FCRAM) static u8* physical_heap_gsp; ///< GSP heap physical memory static u8* physical_vram; ///< Video physical memory (VRAM) static u8* physical_shared_mem; ///< Physical shared memory static u8* physical_dsp_mem; ///< Physical DSP memory static u8* physical_kernel_mem; ///< Kernel memory // We don't declare the IO region in here since its handled by other means. static MemoryView g_views[] = { {&g_exefs_code, &physical_exefs_code, EXEFS_CODE_VADDR, EXEFS_CODE_SIZE, 0}, {&g_vram, &physical_vram, VRAM_VADDR, VRAM_SIZE, 0}, {&g_heap, &physical_fcram, HEAP_VADDR, HEAP_SIZE, MV_IS_PRIMARY_RAM}, {&g_shared_mem, &physical_shared_mem, SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE, 0}, {&g_system_mem, &physical_system_mem, SYSTEM_MEMORY_VADDR, SYSTEM_MEMORY_SIZE, 0}, {&g_dsp_mem, &physical_dsp_mem, DSP_MEMORY_VADDR, DSP_MEMORY_SIZE, 0}, {&g_kernel_mem, &physical_kernel_mem, KERNEL_MEMORY_VADDR, KERNEL_MEMORY_SIZE, 0}, {&g_heap_linear, &physical_heap_gsp, HEAP_LINEAR_VADDR, HEAP_LINEAR_SIZE, 0}, }; /*static MemoryView views[] = { {&m_pScratchPad, &m_pPhysicalScratchPad, 0x00010000, SCRATCHPAD_SIZE, 0}, {NULL, &m_pUncachedScratchPad, 0x40010000, SCRATCHPAD_SIZE, MV_MIRROR_PREVIOUS}, {&m_pVRAM, &m_pPhysicalVRAM, 0x04000000, 0x00800000, 0}, {NULL, &m_pUncachedVRAM, 0x44000000, 0x00800000, MV_MIRROR_PREVIOUS}, {&m_pRAM, &m_pPhysicalRAM, 0x08000000, g_MemorySize, MV_IS_PRIMARY_RAM}, // only from 0x08800000 is it usable (last 24 megs) {NULL, &m_pUncachedRAM, 0x48000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, {NULL, &m_pKernelRAM, 0x88000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to // implement those. };*/ static const int kNumMemViews = sizeof(g_views) / sizeof(MemoryView); ///< Number of mem views void Init() { int flags = 0; for (size_t i = 0; i < ARRAY_SIZE(g_views); i++) { if (g_views[i].flags & MV_IS_PRIMARY_RAM) g_views[i].size = FCRAM_SIZE; } g_base = MemoryMap_Setup(g_views, kNumMemViews, flags, &arena); MemBlock_Init(); LOG_DEBUG(HW_Memory, "initialized OK, RAM at %p (mirror at 0 @ %p)", g_heap, physical_fcram); } void Shutdown() { u32 flags = 0; MemoryMap_Shutdown(g_views, kNumMemViews, flags, &arena); arena.ReleaseSpace(); MemBlock_Shutdown(); g_base = nullptr; g_exefs_code = nullptr; g_system_mem = nullptr; g_heap = nullptr; g_heap_linear = nullptr; g_vram = nullptr; g_shared_mem = nullptr; g_dsp_mem = nullptr; g_kernel_mem = nullptr; physical_bootrom = nullptr; uncached_bootrom = nullptr; physical_exefs_code = nullptr; physical_system_mem = nullptr; physical_fcram = nullptr; physical_heap_gsp = nullptr; physical_vram = nullptr; physical_shared_mem = nullptr; physical_dsp_mem = nullptr; physical_kernel_mem = nullptr; LOG_DEBUG(HW_Memory, "shutdown OK"); } } // namespace
namespace Memory { // The base pointer to the auto-mirrored arena. u8* base = NULL; // The MemArena class MemArena g_arena; // ============== u8 *m_pPhysicalScratchPad; u8 *m_pUncachedScratchPad; // 64-bit: Pointers to high-mem mirrors // 32-bit: Same as above u8 *m_pPhysicalRAM; u8 *m_pUncachedRAM; u8 *m_pKernelRAM; // RAM mirrored up to "kernel space". Fully accessible at all times currently. u8 *m_pPhysicalRAM2; u8 *m_pUncachedRAM2; u8 *m_pKernelRAM2; u8 *m_pPhysicalRAM3; u8 *m_pUncachedRAM3; u8 *m_pKernelRAM3; // VRAM is mirrored 4 times. The second and fourth mirrors are swizzled. // In practice, a game accessing the mirrors most likely is deswizzling the depth buffer. u8 *m_pPhysicalVRAM1; u8 *m_pPhysicalVRAM2; u8 *m_pPhysicalVRAM3; u8 *m_pPhysicalVRAM4; u8 *m_pUncachedVRAM1; u8 *m_pUncachedVRAM2; u8 *m_pUncachedVRAM3; u8 *m_pUncachedVRAM4; // Holds the ending address of the PSP's user space. // Required for HD Remasters to work properly. // This replaces RAM_NORMAL_SIZE at runtime. u32 g_MemorySize; // Used to store the PSP model on game startup. u32 g_PSPModel; std::recursive_mutex g_shutdownLock; // We don't declare the IO region in here since its handled by other means. static MemoryView views[] = { {&m_pPhysicalScratchPad, 0x00010000, SCRATCHPAD_SIZE, 0}, {&m_pUncachedScratchPad, 0x40010000, SCRATCHPAD_SIZE, MV_MIRROR_PREVIOUS}, {&m_pPhysicalVRAM1, 0x04000000, 0x00200000, 0}, {&m_pPhysicalVRAM2, 0x04200000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pPhysicalVRAM3, 0x04400000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pPhysicalVRAM4, 0x04600000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pUncachedVRAM1, 0x44000000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pUncachedVRAM2, 0x44200000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pUncachedVRAM3, 0x44400000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pUncachedVRAM4, 0x44600000, 0x00200000, MV_MIRROR_PREVIOUS}, {&m_pPhysicalRAM, 0x08000000, g_MemorySize, MV_IS_PRIMARY_RAM}, // only from 0x08800000 is it usable (last 24 megs) {&m_pUncachedRAM, 0x48000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, {&m_pKernelRAM, 0x88000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM | MV_KERNEL}, // Starts at memory + 31 MB. {&m_pPhysicalRAM2, 0x09F00000, g_MemorySize, MV_IS_EXTRA1_RAM}, {&m_pUncachedRAM2, 0x49F00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA1_RAM}, {&m_pKernelRAM2, 0x89F00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA1_RAM | MV_KERNEL}, // Starts at memory + 31 * 2 MB. {&m_pPhysicalRAM3, 0x0BE00000, g_MemorySize, MV_IS_EXTRA2_RAM}, {&m_pUncachedRAM3, 0x4BE00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA2_RAM}, {&m_pKernelRAM3, 0x8BE00000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_EXTRA2_RAM | MV_KERNEL}, // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to // implement those. }; static const int num_views = sizeof(views) / sizeof(MemoryView); inline static bool CanIgnoreView(const MemoryView &view) { #if PPSSPP_ARCH(32BIT) // Basically, 32-bit platforms can ignore views that are masked out anyway. return (view.flags & MV_MIRROR_PREVIOUS) && (view.virtual_address & ~MEMVIEW32_MASK) != 0; #else return false; #endif } #if defined(IOS) && PPSSPP_ARCH(64BIT) #define SKIP(a_flags, b_flags) \ if ((b_flags) & MV_KERNEL) \ continue; #else #define SKIP(a_flags, b_flags) \ ; #endif static bool Memory_TryBase(u32 flags) { // OK, we know where to find free space. Now grab it! // We just mimic the popular BAT setup. size_t position = 0; size_t last_position = 0; // Zero all the pointers to be sure. for (int i = 0; i < num_views; i++) { if (views[i].out_ptr) *views[i].out_ptr = 0; } int i; for (i = 0; i < num_views; i++) { const MemoryView &view = views[i]; if (view.size == 0) continue; SKIP(flags, view.flags); if (view.flags & MV_MIRROR_PREVIOUS) { position = last_position; } #ifndef MASKED_PSP_MEMORY *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + view.virtual_address); if (!*view.out_ptr) { goto bail; DEBUG_LOG(MEMMAP, "Failed at view %d", i); } #else if (CanIgnoreView(view)) { // This is handled by address masking in 32-bit, no view needs to be created. *view.out_ptr = *views[i - 1].out_ptr; } else { *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + (view.virtual_address & MEMVIEW32_MASK)); if (!*view.out_ptr) { DEBUG_LOG(MEMMAP, "Failed at view %d", i); goto bail; } } #endif last_position = position; position += g_arena.roundup(view.size); } return true; bail: // Argh! ERROR! Free what we grabbed so far so we can try again. for (int j = 0; j <= i; j++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (*views[j].out_ptr) { if (!CanIgnoreView(views[j])) { g_arena.ReleaseView(*views[j].out_ptr, views[j].size); } *views[j].out_ptr = NULL; } } return false; } bool MemoryMap_Setup(u32 flags) { #if PPSSPP_PLATFORM(UWP) // We reserve the memory, then simply commit in TryBase. base = (u8*)VirtualAllocFromApp(0, 0x10000000, MEM_RESERVE, PAGE_READWRITE); #else // Figure out how much memory we need to allocate in total. size_t total_mem = 0; for (int i = 0; i < num_views; i++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (!CanIgnoreView(views[i])) total_mem += g_arena.roundup(views[i].size); } // Grab some pagefile backed memory out of the void ... g_arena.GrabLowMemSpace(total_mem); #endif #if !PPSSPP_PLATFORM(ANDROID) if (g_arena.NeedsProbing()) { int base_attempts = 0; #if defined(_WIN32) && PPSSPP_ARCH(32BIT) // Try a whole range of possible bases. Return once we got a valid one. uintptr_t max_base_addr = 0x7FFF0000 - 0x10000000; uintptr_t min_base_addr = 0x01000000; uintptr_t stride = 0x400000; #else // iOS uintptr_t max_base_addr = 0x1FFFF0000ULL - 0x80000000ULL; uintptr_t min_base_addr = 0x100000000ULL; uintptr_t stride = 0x800000; #endif for (uintptr_t base_addr = min_base_addr; base_addr < max_base_addr; base_addr += stride) { base_attempts++; base = (u8 *)base_addr; if (Memory_TryBase(flags)) { INFO_LOG(MEMMAP, "Found valid memory base at %p after %i tries.", base, base_attempts); return true; } } ERROR_LOG(MEMMAP, "MemoryMap_Setup: Failed finding a memory base."); PanicAlert("MemoryMap_Setup: Failed finding a memory base."); return false; } else #endif { #if !PPSSPP_PLATFORM(UWP) base = g_arena.Find4GBBase(); #endif } // Should return true... return Memory_TryBase(flags); } void MemoryMap_Shutdown(u32 flags) { for (int i = 0; i < num_views; i++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (*views[i].out_ptr) g_arena.ReleaseView(*views[i].out_ptr, views[i].size); *views[i].out_ptr = nullptr; } g_arena.ReleaseSpace(); #if PPSSPP_PLATFORM(UWP) VirtualFree(base, 0, MEM_RELEASE); #endif } void Init() { // On some 32 bit platforms, you can only map < 32 megs at a time. // TODO: Wait, wtf? What platforms are those? This seems bad. const static int MAX_MMAP_SIZE = 31 * 1024 * 1024; _dbg_assert_msg_(MEMMAP, g_MemorySize < MAX_MMAP_SIZE * 3, "ACK - too much memory for three mmap views."); for (size_t i = 0; i < ARRAY_SIZE(views); i++) { if (views[i].flags & MV_IS_PRIMARY_RAM) views[i].size = std::min((int)g_MemorySize, MAX_MMAP_SIZE); if (views[i].flags & MV_IS_EXTRA1_RAM) views[i].size = std::min(std::max((int)g_MemorySize - MAX_MMAP_SIZE, 0), MAX_MMAP_SIZE); if (views[i].flags & MV_IS_EXTRA2_RAM) views[i].size = std::min(std::max((int)g_MemorySize - MAX_MMAP_SIZE * 2, 0), MAX_MMAP_SIZE); } int flags = 0; MemoryMap_Setup(flags); INFO_LOG(MEMMAP, "Memory system initialized. Base at %p (RAM at @ %p, uncached @ %p)", base, m_pPhysicalRAM, m_pUncachedRAM); } void DoState(PointerWrap &p) { auto s = p.Section("Memory", 1, 3); if (!s) return; if (s < 2) { if (!g_RemasterMode) g_MemorySize = RAM_NORMAL_SIZE; g_PSPModel = PSP_MODEL_FAT; } else if (s == 2) { // In version 2, we determine memory size based on PSP model. u32 oldMemorySize = g_MemorySize; p.Do(g_PSPModel); p.DoMarker("PSPModel"); if (!g_RemasterMode) { g_MemorySize = g_PSPModel == PSP_MODEL_FAT ? RAM_NORMAL_SIZE : RAM_DOUBLE_SIZE; if (oldMemorySize < g_MemorySize) { Shutdown(); Init(); } } } else { // In version 3, we started just saving the memory size directly. // It's no longer based strictly on the PSP model. u32 oldMemorySize = g_MemorySize; p.Do(g_PSPModel); p.DoMarker("PSPModel"); p.Do(g_MemorySize); if (oldMemorySize != g_MemorySize) { Shutdown(); Init(); } } p.DoArray(GetPointer(PSP_GetKernelMemoryBase()), g_MemorySize); p.DoMarker("RAM"); p.DoArray(m_pPhysicalVRAM1, VRAM_SIZE); p.DoMarker("VRAM"); p.DoArray(m_pPhysicalScratchPad, SCRATCHPAD_SIZE); p.DoMarker("ScratchPad"); } void Shutdown() { std::lock_guard<std::recursive_mutex> guard(g_shutdownLock); u32 flags = 0; MemoryMap_Shutdown(flags); base = nullptr; DEBUG_LOG(MEMMAP, "Memory system shut down."); } void Clear() { if (m_pPhysicalRAM) memset(GetPointerUnchecked(PSP_GetKernelMemoryBase()), 0, g_MemorySize); if (m_pPhysicalScratchPad) memset(m_pPhysicalScratchPad, 0, SCRATCHPAD_SIZE); if (m_pPhysicalVRAM1) memset(m_pPhysicalVRAM1, 0, VRAM_SIZE); } bool IsActive() { return base != nullptr; } // Wanting to avoid include pollution, MemMap.h is included a lot. MemoryInitedLock::MemoryInitedLock() { g_shutdownLock.lock(); } MemoryInitedLock::~MemoryInitedLock() { g_shutdownLock.unlock(); } MemoryInitedLock Lock() { return MemoryInitedLock(); } __forceinline static Opcode Read_Instruction(u32 address, bool resolveReplacements, Opcode inst) { if (!MIPS_IS_EMUHACK(inst.encoding)) { return inst; } if (MIPS_IS_RUNBLOCK(inst.encoding) && MIPSComp::jit) { inst = MIPSComp::jit->GetOriginalOp(inst); if (resolveReplacements && MIPS_IS_REPLACEMENT(inst)) { u32 op; if (GetReplacedOpAt(address, &op)) { if (MIPS_IS_EMUHACK(op)) { ERROR_LOG(MEMMAP, "WTF 1"); return Opcode(op); } else { return Opcode(op); } } else { ERROR_LOG(MEMMAP, "Replacement, but no replacement op? %08x", inst.encoding); } } return inst; } else if (resolveReplacements && MIPS_IS_REPLACEMENT(inst.encoding)) { u32 op; if (GetReplacedOpAt(address, &op)) { if (MIPS_IS_EMUHACK(op)) { ERROR_LOG(MEMMAP, "WTF 2"); return Opcode(op); } else { return Opcode(op); } } else { return inst; } } else { return inst; } } Opcode Read_Instruction(u32 address, bool resolveReplacements) { Opcode inst = Opcode(Read_U32(address)); return Read_Instruction(address, resolveReplacements, inst); } Opcode ReadUnchecked_Instruction(u32 address, bool resolveReplacements) { Opcode inst = Opcode(ReadUnchecked_U32(address)); return Read_Instruction(address, resolveReplacements, inst); } Opcode Read_Opcode_JIT(u32 address) { Opcode inst = Opcode(Read_U32(address)); if (MIPS_IS_RUNBLOCK(inst.encoding) && MIPSComp::jit) { return MIPSComp::jit->GetOriginalOp(inst); } else { return inst; } } // WARNING! No checks! // We assume that _Address is cached void Write_Opcode_JIT(const u32 _Address, const Opcode& _Value) { Memory::WriteUnchecked_U32(_Value.encoding, _Address); } void Memset(const u32 _Address, const u8 _iValue, const u32 _iLength) { u8 *ptr = GetPointer(_Address); if (ptr != NULL) { memset(ptr, _iValue, _iLength); } else { for (size_t i = 0; i < _iLength; i++) Write_U8(_iValue, (u32)(_Address + i)); } #ifndef MOBILE_DEVICE CBreakPoints::ExecMemCheck(_Address, true, _iLength, currentMIPS->pc); #endif } } // namespace
static bool Memory_TryBase(u32 flags) { // OK, we know where to find free space. Now grab it! // We just mimic the popular BAT setup. size_t position = 0; size_t last_position = 0; // Zero all the pointers to be sure. for (int i = 0; i < num_views; i++) { if (views[i].out_ptr) *views[i].out_ptr = 0; } int i; for (i = 0; i < num_views; i++) { const MemoryView &view = views[i]; if (view.size == 0) continue; SKIP(flags, view.flags); if (view.flags & MV_MIRROR_PREVIOUS) { position = last_position; } #ifndef MASKED_PSP_MEMORY *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + view.virtual_address); if (!*view.out_ptr) { goto bail; DEBUG_LOG(MEMMAP, "Failed at view %d", i); } #else if (CanIgnoreView(view)) { // This is handled by address masking in 32-bit, no view needs to be created. *view.out_ptr = *views[i - 1].out_ptr; } else { *view.out_ptr = (u8*)g_arena.CreateView( position, view.size, base + (view.virtual_address & MEMVIEW32_MASK)); if (!*view.out_ptr) { DEBUG_LOG(MEMMAP, "Failed at view %d", i); goto bail; } } #endif last_position = position; position += g_arena.roundup(view.size); } return true; bail: // Argh! ERROR! Free what we grabbed so far so we can try again. for (int j = 0; j <= i; j++) { if (views[i].size == 0) continue; SKIP(flags, views[i].flags); if (*views[j].out_ptr) { if (!CanIgnoreView(views[j])) { g_arena.ReleaseView(*views[j].out_ptr, views[j].size); } *views[j].out_ptr = NULL; } } return false; }
namespace Memory { // The base pointer to the auto-mirrored arena. u8* base = NULL; // The MemArena class MemArena g_arena; // ============== // 64-bit: Pointers to low-mem (sub-0x10000000) mirror // 32-bit: Same as the corresponding physical/virtual pointers. u8 *m_pRAM; u8 *m_pScratchPad; u8 *m_pVRAM; u8 *m_pPhysicalScratchPad; u8 *m_pUncachedScratchPad; // 64-bit: Pointers to high-mem mirrors // 32-bit: Same as above u8 *m_pPhysicalRAM; u8 *m_pUncachedRAM; u8 *m_pKernelRAM; // RAM mirrored up to "kernel space". Fully accessible at all times currently. u8 *m_pPhysicalVRAM; u8 *m_pUncachedVRAM; // We don't declare the IO region in here since its handled by other means. static const MemoryView views[] = { {&m_pScratchPad, &m_pPhysicalScratchPad, 0x00010000, SCRATCHPAD_SIZE, 0}, {NULL, &m_pUncachedScratchPad, 0x40010000, SCRATCHPAD_SIZE, MV_MIRROR_PREVIOUS}, {&m_pVRAM, &m_pPhysicalVRAM, 0x04000000, 0x00800000, 0}, {NULL, &m_pUncachedVRAM, 0x44000000, 0x00800000, MV_MIRROR_PREVIOUS}, {&m_pRAM, &m_pPhysicalRAM, 0x08000000, RAM_SIZE, 0}, // only from 0x08800000 is it usable (last 24 megs) {NULL, &m_pUncachedRAM, 0x48000000, RAM_SIZE, MV_MIRROR_PREVIOUS}, {NULL, &m_pKernelRAM, 0x88000000, RAM_SIZE, MV_MIRROR_PREVIOUS}, // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to // implement those. }; static const int num_views = sizeof(views) / sizeof(MemoryView); void Init() { int flags = 0; base = MemoryMap_Setup(views, num_views, flags, &g_arena); INFO_LOG(MEMMAP, "Memory system initialized. RAM at %p (mirror at 0 @ %p, uncached @ %p)", m_pRAM, m_pPhysicalRAM, m_pUncachedRAM); } void DoState(PointerWrap &p) { p.DoArray(m_pRAM, RAM_SIZE); p.DoMarker("RAM"); p.DoArray(m_pVRAM, VRAM_SIZE); p.DoMarker("VRAM"); p.DoArray(m_pScratchPad, SCRATCHPAD_SIZE); p.DoMarker("ScratchPad"); } void Shutdown() { u32 flags = 0; MemoryMap_Shutdown(views, num_views, flags, &g_arena); g_arena.ReleaseSpace(); base = NULL; INFO_LOG(MEMMAP, "Memory system shut down."); } void Clear() { if (m_pRAM) memset(m_pRAM, 0, RAM_SIZE); if (m_pScratchPad) memset(m_pScratchPad, 0, SCRATCHPAD_SIZE); if (m_pVRAM) memset(m_pVRAM, 0, VRAM_SIZE); } bool AreMemoryBreakpointsActivated() { #ifndef ENABLE_MEM_CHECK return false; #else return true; #endif } u32 Read_Instruction(u32 address) { u32 inst = Read_U32(address); if (MIPS_IS_EMUHACK(inst) && MIPSComp::jit) return MIPSComp::jit->GetBlockCache()->GetOriginalFirstOp(inst & MIPS_EMUHACK_VALUE_MASK); else return inst; } u32 Read_Opcode_JIT(u32 address) { return Read_Instruction(address); } // WARNING! No checks! // We assume that _Address is cached void Write_Opcode_JIT(const u32 _Address, const u32 _Value) { Memory::WriteUnchecked_U32(_Value, _Address); } void Memset(const u32 _Address, const u8 _iValue, const u32 _iLength) { u8 *ptr = GetPointer(_Address); if (ptr != NULL) { memset(ptr,_iValue,_iLength); } else { for (size_t i = 0; i < _iLength; i++) Write_U8(_iValue, (u32)(_Address + i)); } } void Memcpy(const u32 to_address, const void *from_data, const u32 len) { memcpy(GetPointer(to_address), from_data, len); } void Memcpy(void *to_data, const u32 from_address, const u32 len) { memcpy(to_data,GetPointer(from_address),len); } void GetString(std::string& _string, const u32 em_address) { char stringBuffer[2048]; char *string = stringBuffer; char c; u32 addr = em_address; while ((c = Read_U8(addr))) { *string++ = c; addr++; } *string++ = '\0'; _string = stringBuffer; } const char *GetAddressName(u32 address) { // TODO, follow GetPointer return "[mem]"; } } // namespace
namespace Memory { u8* g_base = NULL; ///< The base pointer to the auto-mirrored arena. MemArena g_arena; ///< The MemArena class u8* g_heap_gsp = NULL; ///< GSP heap (main memory) u8* g_heap = NULL; ///< Application heap (main memory) u8* g_vram = NULL; ///< Video memory (VRAM) pointer u8* g_physical_bootrom = NULL; ///< Bootrom physical memory u8* g_uncached_bootrom = NULL; u8* g_physical_fcram = NULL; ///< Main physical memory (FCRAM) u8* g_physical_heap_gsp = NULL; u8* g_physical_vram = NULL; ///< Video physical memory (VRAM) u8* g_physical_scratchpad = NULL; ///< Scratchpad memory used for main thread stack // We don't declare the IO region in here since its handled by other means. static MemoryView g_views[] = { {&g_vram, &g_physical_vram, VRAM_VADDR, VRAM_SIZE, 0}, {&g_heap_gsp, &g_physical_heap_gsp, HEAP_GSP_VADDR, HEAP_GSP_SIZE, 0}, {&g_heap, &g_physical_fcram, HEAP_VADDR, HEAP_SIZE, MV_IS_PRIMARY_RAM}, }; /*static MemoryView views[] = { {&m_pScratchPad, &m_pPhysicalScratchPad, 0x00010000, SCRATCHPAD_SIZE, 0}, {NULL, &m_pUncachedScratchPad, 0x40010000, SCRATCHPAD_SIZE, MV_MIRROR_PREVIOUS}, {&m_pVRAM, &m_pPhysicalVRAM, 0x04000000, 0x00800000, 0}, {NULL, &m_pUncachedVRAM, 0x44000000, 0x00800000, MV_MIRROR_PREVIOUS}, {&m_pRAM, &m_pPhysicalRAM, 0x08000000, g_MemorySize, MV_IS_PRIMARY_RAM}, // only from 0x08800000 is it usable (last 24 megs) {NULL, &m_pUncachedRAM, 0x48000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, {NULL, &m_pKernelRAM, 0x88000000, g_MemorySize, MV_MIRROR_PREVIOUS | MV_IS_PRIMARY_RAM}, // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to // implement those. };*/ static const int kNumMemViews = sizeof(g_views) / sizeof(MemoryView); ///< Number of mem views void Init() { int flags = 0; for (size_t i = 0; i < ARRAY_SIZE(g_views); i++) { if (g_views[i].flags & MV_IS_PRIMARY_RAM) g_views[i].size = FCRAM_SIZE; } g_base = MemoryMap_Setup(g_views, kNumMemViews, flags, &g_arena); NOTICE_LOG(MEMMAP, "initialized OK, RAM at %p (mirror at 0 @ %p)", g_heap, g_physical_fcram); } void Shutdown() { u32 flags = 0; MemoryMap_Shutdown(g_views, kNumMemViews, flags, &g_arena); g_arena.ReleaseSpace(); g_base = NULL; NOTICE_LOG(MEMMAP, "shutdown OK"); } } // namespace