Exemplo n.º 1
0
inline void ReadFromHardware(T &_var, const u32 em_address, const u32 effective_address, Memory::XCheckTLBFlag flag)
{
	// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
	if ((em_address & 0xC8000000) == 0xC8000000)
	{
		if (em_address < 0xcc000000)
			_var = EFB_Read(em_address);
		else
			mmio_mapping->Read(em_address, &_var);
	}
	else if (((em_address & 0xF0000000) == 0x80000000) ||
		((em_address & 0xF0000000) == 0xC0000000) ||
		((em_address & 0xF0000000) == 0x00000000))
	{
		_var = bswap((*(const T*)&m_pRAM[em_address & RAM_MASK]));
	}
	else if (m_pEXRAM && (((em_address & 0xF0000000) == 0x90000000) ||
		((em_address & 0xF0000000) == 0xD0000000) ||
		((em_address & 0xF0000000) == 0x10000000)))
	{
		_var = bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK]));
	}
	else if ((em_address >= 0xE0000000) && (em_address < (0xE0000000+L1_CACHE_SIZE)))
	{
		_var = bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK]));
	}
	else if ((bFakeVMEM && ((em_address &0xF0000000) == 0x70000000)) ||
		(bFakeVMEM && ((em_address &0xF0000000) == 0x40000000)))
	{
		// fake VMEM
		_var = bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK]));
	}
	else
	{
		// MMU
		u32 tlb_addr = TranslateAddress(em_address, flag);
		if (tlb_addr == 0)
		{
			if (flag == FLAG_READ)
			{
				GenerateDSIException(em_address, false);
			}
		}
		else
		{
			_var = bswap((*(const T*)&m_pRAM[tlb_addr & RAM_MASK]));
		}
	}
}
Exemplo n.º 2
0
__forceinline T ReadFromHardware(const u32 em_address)
{
	int segment = em_address >> 28;
	// Quick check for an address that can't meet any of the following conditions,
	// to speed up the MMU path.
	if (!BitSet32(0xCFC)[segment])
	{
		// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
		if ((em_address & 0xC8000000) == 0xC8000000)
		{
			if (em_address < 0xcc000000)
				return EFB_Read(em_address);
			else
				return (T)mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address);
		}
		else if (segment == 0x8 || segment == 0xC || segment == 0x0)
		{
			return bswap((*(const T*)&m_pRAM[em_address & RAM_MASK]));
		}
		else if (m_pEXRAM && (segment == 0x9 || segment == 0xD || segment == 0x1))
		{
			return bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK]));
		}
		else if (segment == 0xE && (em_address < (0xE0000000 + L1_CACHE_SIZE)))
		{
			return bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK]));
		}
	}

	if (bFakeVMEM && (segment == 0x7 || segment == 0x4))
	{
		// fake VMEM
		return bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK]));
	}

	// MMU: Do page table translation
	u32 tlb_addr = TranslateAddress<flag>(em_address);
	if (tlb_addr == 0)
	{
		if (flag == FLAG_READ)
			GenerateDSIException(em_address, false);
		return 0;
	}

	// Handle loads that cross page boundaries (ewwww)
	// The alignment check isn't strictly necessary, but since this is a rare slow path, it provides a faster
	// (1 instruction on x86) bailout.
	if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T))
	{
		// This could be unaligned down to the byte level... hopefully this is rare, so doing it this
		// way isn't too terrible.
		// TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions.
		// Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned!
		u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1);
		u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page);
		if (tlb_addr == 0 || tlb_addr_next_page == 0)
		{
			if (flag == FLAG_READ)
				GenerateDSIException(em_address_next_page, false);
			return 0;
		}
		T var = 0;
		for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++)
		{
			if (addr == em_address_next_page)
				tlb_addr = tlb_addr_next_page;
			var = (var << 8) | Memory::base[tlb_addr];
		}
		return var;
	}

	// The easy case!
	return bswap(*(const T*)&Memory::base[tlb_addr]);
}
Exemplo n.º 3
0
inline void ReadFromHardware(T &_var, const u32 em_address, const u32 effective_address, Memory::XCheckTLBFlag flag)
{
	// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
	if ((em_address & 0xC8000000) == 0xC8000000)
	{
		if (em_address < 0xcc000000)
			_var = EFB_Read(em_address);
		else
			_var = mmio_mapping->Read<T>(em_address);
	}
	else if (((em_address & 0xF0000000) == 0x80000000) ||
		((em_address & 0xF0000000) == 0xC0000000) ||
		((em_address & 0xF0000000) == 0x00000000))
	{
		_var = bswap((*(const T*)&m_pRAM[em_address & RAM_MASK]));
	}
	else if (m_pEXRAM && (((em_address & 0xF0000000) == 0x90000000) ||
		((em_address & 0xF0000000) == 0xD0000000) ||
		((em_address & 0xF0000000) == 0x10000000)))
	{
		_var = bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK]));
	}
	else if ((em_address >= 0xE0000000) && (em_address < (0xE0000000+L1_CACHE_SIZE)))
	{
		_var = bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK]));
	}
	else if ((bFakeVMEM && ((em_address &0xF0000000) == 0x70000000)) ||
		(bFakeVMEM && ((em_address &0xF0000000) == 0x40000000)))
	{
		// fake VMEM
		_var = bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK]));
	}
	else
	{
		// MMU
		// Handle loads that cross page boundaries (ewwww)
		if (sizeof(T) > 1 && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T))
		{
			_var = 0;
			// This could be unaligned down to the byte level... hopefully this is rare, so doing it this
			// way isn't too terrible.
			// TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions.
			// Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned!
			u32 tlb_addr = TranslateAddress(em_address, flag);
			for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++)
			{
				// Start of the new page... translate the address again!
				if (!(addr & (HW_PAGE_SIZE-1)))
					tlb_addr = TranslateAddress(addr, flag);
				// Important: we need to generate the DSI on the first store that caused the fault, NOT
				// the address of the start of the load.
				if (tlb_addr == 0)
				{
					if (flag == FLAG_READ)
					{
						GenerateDSIException(addr, false);
						break;
					}
				}
				else
				{
					_var <<= 8;
					_var |= m_pRAM[tlb_addr & RAM_MASK];
				}
			}
		}
		else
		{
			u32 tlb_addr = TranslateAddress(em_address, flag);
			if (tlb_addr == 0)
			{
				if (flag == FLAG_READ)
				{
					GenerateDSIException(em_address, false);
				}
			}
			else
			{
				_var = bswap((*(const T*)&m_pRAM[tlb_addr & RAM_MASK]));
			}
		}
	}
}
Exemplo n.º 4
0
__forceinline static T ReadFromHardware(const u32 em_address)
{
	int segment = em_address >> 28;
	bool performTranslation = UReg_MSR(MSR).DR;

	// Quick check for an address that can't meet any of the following conditions,
	// to speed up the MMU path.
	if (!BitSet32(0xCFC)[segment] && performTranslation)
	{
		// TODO: Figure out the fastest order of tests for both read and write (they are probably different).
		if (flag == FLAG_READ && (em_address & 0xF8000000) == 0xC8000000)
		{
			if (em_address < 0xcc000000)
				return EFB_Read(em_address);
			else
				return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address & 0x0FFFFFFF);
		}
		if (segment == 0x0 || segment == 0x8 || segment == 0xC)
		{
			// Handle RAM; the masking intentionally discards bits (essentially creating
			// mirrors of memory).
			// TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory.
			return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK]));
		}
		if (Memory::m_pEXRAM && (segment == 0x9 || segment == 0xD) && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE)
		{
			// Handle EXRAM.
			// TODO: Is this supposed to be mirrored like main RAM?
			return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF]));
		}
		if (segment == 0xE && (em_address < (0xE0000000 + Memory::L1_CACHE_SIZE)))
		{
			return bswap((*(const T*)&Memory::m_pL1Cache[em_address & 0x0FFFFFFF]));
		}
	}

	if (Memory::bFakeVMEM && performTranslation && (segment == 0x7 || segment == 0x4))
	{
		// fake VMEM
		return bswap((*(const T*)&Memory::m_pFakeVMEM[em_address & Memory::FAKEVMEM_MASK]));
	}

	if (!performTranslation)
	{
		if (flag == FLAG_READ && (em_address & 0xF8000000) == 0x08000000)
		{
			if (em_address < 0x0c000000)
				return EFB_Read(em_address);
			else
				return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address);
		}
		if (segment == 0x0)
		{
			// Handle RAM; the masking intentionally discards bits (essentially creating
			// mirrors of memory).
			// TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory.
			return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK]));
		}
		if (Memory::m_pEXRAM && segment == 0x1 && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE)
		{
			return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF]));
		}
		PanicAlert("Unable to resolve read address %x PC %x", em_address, PC);
		return 0;
	}

	// MMU: Do page table translation
	u32 tlb_addr = TranslateAddress<flag>(em_address);
	if (tlb_addr == 0)
	{
		if (flag == FLAG_READ)
			GenerateDSIException(em_address, false);
		return 0;
	}

	// Handle loads that cross page boundaries (ewwww)
	// The alignment check isn't strictly necessary, but since this is a rare slow path, it provides a faster
	// (1 instruction on x86) bailout.
	if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T))
	{
		// This could be unaligned down to the byte level... hopefully this is rare, so doing it this
		// way isn't too terrible.
		// TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions.
		// Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned!
		u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1);
		u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page);
		if (tlb_addr == 0 || tlb_addr_next_page == 0)
		{
			if (flag == FLAG_READ)
				GenerateDSIException(em_address_next_page, false);
			return 0;
		}
		T var = 0;
		for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++)
		{
			if (addr == em_address_next_page)
				tlb_addr = tlb_addr_next_page;
			var = (var << 8) | Memory::physical_base[tlb_addr];
		}
		return var;
	}

	// The easy case!
	return bswap(*(const T*)&Memory::physical_base[tlb_addr]);
}