MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
{
	int i;
	const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
	const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);

	for (i = first_pte; i <= last_pte; i++) {
		_mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
	}
}
void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
{
#if defined(DEBUG)
	u32 pde_index, pte_index;
	u32 pde, pte;

	pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
	pte_index = MALI_MMU_PTE_ENTRY(fault_addr);


	pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
				     pde_index * sizeof(u32));


	if (pde & MALI_MMU_FLAGS_PRESENT) {
		u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);

		pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
					     pte_index * sizeof(u32));

		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
				     "\t\tPTE: %08x, page %08x is %s\n",
				     fault_addr, pte_addr, pte,
				     MALI_MMU_ENTRY_ADDRESS(pte),
				     pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
	} else {
		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
				     fault_addr, pde));
	}
#else
	MALI_IGNORE(pagedir);
	MALI_IGNORE(fault_addr);
#endif
}
void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, mali_memory_cache_settings cache_settings)
{
	u32 end_address = mali_address + size;
	u32 permission_bits;

	switch ( cache_settings )
	{
		case MALI_CACHE_GP_READ_ALLOCATE:
		MALI_DEBUG_PRINT(5, ("Map L2 GP_Read_allocate\n"));
		permission_bits = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
		break;

		case MALI_CACHE_STANDARD:
		MALI_DEBUG_PRINT(5, ("Map L2 Standard\n"));
		/*falltrough */
		default:
		if ( MALI_CACHE_STANDARD != cache_settings) MALI_PRINT_ERROR(("Wrong cache settings\n"));
		permission_bits = MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT;
	}

	/* Map physical pages into MMU page tables */
	for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE)
	{
		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
		_mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
		                MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
			        phys_address | permission_bits);
	}
	_mali_osk_write_mem_barrier();
}
void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits)
{
	u32 end_address = mali_address + size;

	/* Map physical pages into MMU page tables */
	for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) {
		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
		_mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
		                                MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
		                                phys_address | permission_bits);
	}
}
void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size)
{
	u32 end_address = mali_address + size;

	/* Map physical pages into MMU page tables */
	for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE)
	{
		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
		_mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
		                MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
			        phys_address | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
	}
	_mali_osk_write_mem_barrier();
}