Exemplo n.º 1
0
// Requires MSR.DR, MSR.IR
// There's no perfect way to do this, it's just a heuristic.
// We require at least 2 stack frames, if the stack is shallower than that then it won't work.
static bool IsStackSane()
{
  _dbg_assert_(ACTIONREPLAY, UReg_MSR(MSR).DR && UReg_MSR(MSR).IR);

  // Check the stack pointer
  u32 SP = GPR(1);
  if (!PowerPC::HostIsRAMAddress(SP))
    return false;

  // Read the frame pointer from the stack (find 2nd frame from top), assert that it makes sense
  u32 next_SP = PowerPC::HostRead_U32(SP);
  if (next_SP <= SP || !PowerPC::HostIsRAMAddress(next_SP) ||
      !PowerPC::HostIsRAMAddress(next_SP + 4))
    return false;

  // Check the link register makes sense (that it points to a valid IBAT address)
  auto insn = PowerPC::TryReadInstruction(PowerPC::HostRead_U32(next_SP + 4));
  if (!insn.valid || !insn.hex)
    return false;

  return true;
}
void JitILBase::psq_st(UGeckoInstruction inst)
{
  INSTRUCTION_START
  JITDISABLE(bJITLoadStorePairedOff);
  FALLBACK_IF(jo.memcheck || inst.W);

  // For performance, the AsmCommon routines assume address translation is on.
  FALLBACK_IF(!UReg_MSR(MSR).DR);

  IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_12);
  IREmitter::InstLoc val;

  if (inst.RA)
    addr = ibuild.EmitAdd(addr, ibuild.EmitLoadGReg(inst.RA));

  if (inst.OPCD == 61)
    ibuild.EmitStoreGReg(addr, inst.RA);

  val = ibuild.EmitLoadFReg(inst.RS);
  val = ibuild.EmitCompactMRegToPacked(val);
  ibuild.EmitStorePaired(val, addr, inst.I);
}
void JitILBase::psq_l(UGeckoInstruction inst)
{
  INSTRUCTION_START
  JITDISABLE(bJITLoadStorePairedOff);
  FALLBACK_IF(jo.memcheck || inst.W);

  // For performance, the AsmCommon routines assume address translation is on.
  FALLBACK_IF(!UReg_MSR(MSR).DR);

  IREmitter::InstLoc addr = ibuild.EmitIntConst(inst.SIMM_12);
  IREmitter::InstLoc val;

  if (inst.RA)
    addr = ibuild.EmitAdd(addr, ibuild.EmitLoadGReg(inst.RA));

  if (inst.OPCD == 57)
    ibuild.EmitStoreGReg(addr, inst.RA);

  val = ibuild.EmitLoadPaired(
      addr,
      inst.I | (inst.W << 3));  // The lower 3 bits is for GQR index. The next 1 bit is for inst.W
  val = ibuild.EmitExpandPackedToMReg(val);
  ibuild.EmitStoreFReg(val, inst.RD);
}
Exemplo n.º 4
0
__forceinline static void WriteToHardware(u32 em_address, const T data)
{
	int segment = em_address >> 28;
	// Quick check for an address that can't meet any of the following conditions,
	// to speed up the MMU path.
	bool performTranslation = UReg_MSR(MSR).DR;

	if (!BitSet32(0xCFC)[segment] && performTranslation)
	{
		// First, let's check for FIFO writes, since they are probably the most common
		// reason we end up in this function.
		// Note that we must mask the address to correctly emulate certain games;
		// Pac-Man World 3 in particular is affected by this.
		if (flag == FLAG_WRITE && (em_address & 0xFFFFF000) == 0xCC008000)
		{
			switch (sizeof(T))
			{
			case 1: GPFifo::Write8((u8)data); return;
			case 2: GPFifo::Write16((u16)data); return;
			case 4: GPFifo::Write32((u32)data); return;
			case 8: GPFifo::Write64((u64)data); return;
			}
		}
		if (flag == FLAG_WRITE && (em_address & 0xF8000000) == 0xC8000000)
		{
			if (em_address < 0xcc000000)
			{
				// TODO: This only works correctly for 32-bit writes.
				EFB_Write((u32)data, em_address);
				return;
			}
			else
			{
				Memory::mmio_mapping->Write(em_address & 0x0FFFFFFF, data);
				return;
			}
		}
		if (segment == 0x0 || segment == 0x8 || segment == 0xC)
		{
			// Handle RAM; the masking intentionally discards bits (essentially creating
			// mirrors of memory).
			// TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory.
			*(T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK] = bswap(data);
			return;
		}
		if (Memory::m_pEXRAM && (segment == 0x9 || segment == 0xD) && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE)
		{
			// Handle EXRAM.
			// TODO: Is this supposed to be mirrored like main RAM?
			*(T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF] = bswap(data);
			return;
		}
		if (segment == 0xE && (em_address < (0xE0000000 + Memory::L1_CACHE_SIZE)))
		{
			*(T*)&Memory::m_pL1Cache[em_address & 0x0FFFFFFF] = bswap(data);
			return;
		}
	}

	if (Memory::bFakeVMEM && performTranslation && (segment == 0x7 || segment == 0x4))
	{
		// fake VMEM
		*(T*)&Memory::m_pFakeVMEM[em_address & Memory::FAKEVMEM_MASK] = bswap(data);
		return;
	}

	if (!performTranslation)
	{
		if (flag == FLAG_WRITE && (em_address & 0xFFFFF000) == 0x0C008000)
		{
			switch (sizeof(T))
			{
			case 1: GPFifo::Write8((u8)data); return;
			case 2: GPFifo::Write16((u16)data); return;
			case 4: GPFifo::Write32((u32)data); return;
			case 8: GPFifo::Write64((u64)data); return;
			}
		}
		if (flag == FLAG_WRITE && (em_address & 0xF8000000) == 0x08000000)
		{
			if (em_address < 0x0c000000)
			{
				// TODO: This only works correctly for 32-bit writes.
				EFB_Write((u32)data, em_address);
				return;
			}
			else
			{
				Memory::mmio_mapping->Write(em_address, data);
				return;
			}
		}
		if (segment == 0x0)
		{
			// Handle RAM; the masking intentionally discards bits (essentially creating
			// mirrors of memory).
			// TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory.
			*(T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK] = bswap(data);
			return;
		}
		if (Memory::m_pEXRAM && segment == 0x1 && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE)
		{
			*(T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF] = bswap(data);
			return;
		}
		PanicAlert("Unable to resolve write address %x PC %x", em_address, PC);
		return;
	}

	// MMU: Do page table translation
	u32 tlb_addr = TranslateAddress<flag>(em_address);
	if (tlb_addr == 0)
	{
		if (flag == FLAG_WRITE)
			GenerateDSIException(em_address, true);
		return;
	}

	// Handle stores that cross page boundaries (ewwww)
	if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T))
	{
		T val = bswap(data);

		// We need to check both addresses before writing in case there's a DSI.
		u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1);
		u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page);
		if (tlb_addr_next_page == 0)
		{
			if (flag == FLAG_WRITE)
				GenerateDSIException(em_address_next_page, true);
			return;
		}
		for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++, val >>= 8)
		{
			if (addr == em_address_next_page)
				tlb_addr = tlb_addr_next_page;
			Memory::physical_base[tlb_addr] = (u8)val;
		}
		return;
	}
Exemplo n.º 5
0
__forceinline static T ReadFromHardware(const u32 em_address)
{
	int segment = em_address >> 28;
	bool performTranslation = UReg_MSR(MSR).DR;

	// Quick check for an address that can't meet any of the following conditions,
	// to speed up the MMU path.
	if (!BitSet32(0xCFC)[segment] && performTranslation)
	{
		// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
		if (flag == FLAG_READ && (em_address & 0xF8000000) == 0xC8000000)
		{
			if (em_address < 0xcc000000)
				return EFB_Read(em_address);
			else
				return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address & 0x0FFFFFFF);
		}
		if (segment == 0x0 || segment == 0x8 || segment == 0xC)
		{
			// Handle RAM; the masking intentionally discards bits (essentially creating
			// mirrors of memory).
			// TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory.
			return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK]));
		}
		if (Memory::m_pEXRAM && (segment == 0x9 || segment == 0xD) && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE)
		{
			// Handle EXRAM.
			// TODO: Is this supposed to be mirrored like main RAM?
			return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF]));
		}
		if (segment == 0xE && (em_address < (0xE0000000 + Memory::L1_CACHE_SIZE)))
		{
			return bswap((*(const T*)&Memory::m_pL1Cache[em_address & 0x0FFFFFFF]));
		}
	}

	if (Memory::bFakeVMEM && performTranslation && (segment == 0x7 || segment == 0x4))
	{
		// fake VMEM
		return bswap((*(const T*)&Memory::m_pFakeVMEM[em_address & Memory::FAKEVMEM_MASK]));
	}

	if (!performTranslation)
	{
		if (flag == FLAG_READ && (em_address & 0xF8000000) == 0x08000000)
		{
			if (em_address < 0x0c000000)
				return EFB_Read(em_address);
			else
				return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address);
		}
		if (segment == 0x0)
		{
			// Handle RAM; the masking intentionally discards bits (essentially creating
			// mirrors of memory).
			// TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory.
			return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK]));
		}
		if (Memory::m_pEXRAM && segment == 0x1 && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE)
		{
			return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF]));
		}
		PanicAlert("Unable to resolve read address %x PC %x", em_address, PC);
		return 0;
	}

	// MMU: Do page table translation
	u32 tlb_addr = TranslateAddress<flag>(em_address);
	if (tlb_addr == 0)
	{
		if (flag == FLAG_READ)
			GenerateDSIException(em_address, false);
		return 0;
	}

	// Handle loads that cross page boundaries (ewwww)
	// The alignment check isn't strictly necessary, but since this is a rare slow path, it provides a faster
	// (1 instruction on x86) bailout.
	if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T))
	{
		// This could be unaligned down to the byte level... hopefully this is rare, so doing it this
		// way isn't too terrible.
		// TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions.
		// Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned!
		u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1);
		u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page);
		if (tlb_addr == 0 || tlb_addr_next_page == 0)
		{
			if (flag == FLAG_READ)
				GenerateDSIException(em_address_next_page, false);
			return 0;
		}
		T var = 0;
		for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++)
		{
			if (addr == em_address_next_page)
				tlb_addr = tlb_addr_next_page;
			var = (var << 8) | Memory::physical_base[tlb_addr];
		}
		return var;
	}

	// The easy case!
	return bswap(*(const T*)&Memory::physical_base[tlb_addr]);
}