void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
{
#if defined(DEBUG)
	u32 pde_index, pte_index;
	u32 pde, pte;

	pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
	pte_index = MALI_MMU_PTE_ENTRY(fault_addr);


	pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
				     pde_index * sizeof(u32));


	if (pde & MALI_MMU_FLAGS_PRESENT) {
		u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);

		pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
					     pte_index * sizeof(u32));

		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
				     "\t\tPTE: %08x, page %08x is %s\n",
				     fault_addr, pte_addr, pte,
				     MALI_MMU_ENTRY_ADDRESS(pte),
				     pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
	} else {
		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
				     fault_addr, pde));
	}
#else
	MALI_IGNORE(pagedir);
	MALI_IGNORE(fault_addr);
#endif
}
static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
{
	MALI_DEBUG_ASSERT_POINTER(pagedir);
	MALI_DEBUG_ASSERT_POINTER(info);

	if (NULL != pagedir->page_directory_mapped) {
		int i;

		MALI_CHECK_NO_ERROR(
			mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
		);

		for (i = 0; i < 1024; i++) {
			if (NULL != pagedir->page_entries_mapped[i]) {
				MALI_CHECK_NO_ERROR(
					mali_mmu_dump_page(pagedir->page_entries_mapped[i],
							   _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
									   i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
				);
			}
		}
	}

	MALI_SUCCESS;
}
void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
{
	const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
	int i;

	/* Free referenced page tables and zero PDEs. */
	for (i = 0; i < num_page_table_entries; i++) {
		if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
			u32 phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
			mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
		}
	}
	_mali_osk_write_mem_barrier();

	/* Free the page directory page. */
	mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);

	_mali_osk_free(pagedir);
}
static void mali_platform_wating(u32 msec)
{
	unsigned int read_val;
	while (1) {
		read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
		if ((read_val & 0x8000) == 0x0000)
			break;

		_mali_osk_time_ubusydelay(100); /* 1000 -> 100 : 20101218 */
	}
	/* _mali_osk_time_ubusydelay(msec*1000);*/
}
static void mali_platform_wating(u32 msec)
{
	/*sample wating
	change this in the future with proper check routine.
	*/
	unsigned int read_val;
	while(1) {
		read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
		if ((read_val & 0x8000)==0x0000) break;
			_mali_osk_time_ubusydelay(100); // 1000 -> 100 : 20101218
		}
		/* _mali_osk_time_ubusydelay(msec*1000);*/
}
/** @brief Internal PMU function to read a PMU register
 *
 * @param pmu handle that identifies the PMU hardware
 * @param relative_address relative PMU hardware address to read from
 * @return 32-bit value that was read from the address
 */
static u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address)
{
	u32 read_val;

	MALI_DEBUG_ASSERT_POINTER(pmu);
	MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
	MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);

	read_val = _mali_osk_mem_ioread32(pmu->reg_mapped, relative_address);

	MALI_DEBUG_PRINT( 5, ("PMU: reg_read: %s Addr:0x%04X Val:0x%08x\n",
			pmu->name, relative_address, read_val));

	return read_val;
}
_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
{
	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
	_mali_osk_errcode_t err;
	mali_io_address pde_mapping;
	u32 pde_phys;
	int i;

	for(i = first_pde; i <= last_pde; i++)
	{
		if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT))
		{
			/* Page table not present */
			MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
			MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);

			err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
			if(_MALI_OSK_ERR_OK != err)
			{
				MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
				return err;
			}
			pagedir->page_entries_mapped[i] = pde_mapping;

			/* Update PDE, mark as present */
			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
			                pde_phys | MALI_MMU_FLAGS_PRESENT);

			MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
			pagedir->page_entries_usage_count[i] = 1;
		}
		else
		{
			pagedir->page_entries_usage_count[i]++;
		}
	}
	_mali_osk_write_mem_barrier();

	MALI_SUCCESS;
}
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
{
	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
	u32 left = size;
	int i;
	mali_bool pd_changed = MALI_FALSE;
	u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
	u32 num_pages_inv = 0;
	mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */

	/* For all page directory entries in range. */
	for (i = first_pde; i <= last_pde; i++) {
		u32 size_in_pde, offset;

		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
		MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);

		/* Offset into page table, 0 if mali_address is 4MiB aligned */
		offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
		if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
			size_in_pde = left;
		} else {
			size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
		}

		pagedir->page_entries_usage_count[i]--;

		/* If entire page table is unused, free it */
		if (0 == pagedir->page_entries_usage_count[i]) {
			u32 page_phys;
			void *page_virt;
			MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
			/* last reference removed, no need to zero out each PTE  */

			page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
			page_virt = pagedir->page_entries_mapped[i];
			pagedir->page_entries_mapped[i] = NULL;
			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);

			mali_mmu_release_table_page(page_phys, page_virt);
			pd_changed = MALI_TRUE;
		} else {
			MALI_DEBUG_ASSERT(num_pages_inv < 2);
			if (num_pages_inv < 2) {
				pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
				num_pages_inv++;
			} else {
				invalidate_all = MALI_TRUE;
			}

			/* If part of the page table is still in use, zero the relevant PTEs */
			mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
		}

		left -= size_in_pde;
		mali_address += size_in_pde;
	}
	_mali_osk_write_mem_barrier();

	/* L2 pages invalidation */
	if (MALI_TRUE == pd_changed) {
		MALI_DEBUG_ASSERT(num_pages_inv < 3);
		if (num_pages_inv < 3) {
			pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
			num_pages_inv++;
		} else {
			invalidate_all = MALI_TRUE;
		}
	}

	if (invalidate_all) {
		mali_l2_cache_invalidate_all();
	} else {
		mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
	}

	MALI_SUCCESS;
}
static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
{
	return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
				       index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
}
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
{
	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
	u32 left = size;
	int i;
#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
	mali_bool pd_changed = MALI_FALSE;
	u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
	u32 num_pages_inv = 0;
#endif

	/* For all page directory entries in range. */
	for (i = first_pde; i <= last_pde; i++)
	{
		u32 size_in_pde, offset;

		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
		MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);

		/* Offset into page table, 0 if mali_address is 4MiB aligned */
		offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
		if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset)
		{
			size_in_pde = left;
		}
		else
		{
			size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
		}

		pagedir->page_entries_usage_count[i]--;

		/* If entire page table is unused, free it */
		if (0 == pagedir->page_entries_usage_count[i])
		{
			u32 page_address;
			MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
			/* last reference removed, no need to zero out each PTE  */

			page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
			pagedir->page_entries_mapped[i] = NULL;
			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);

			mali_mmu_release_table_page(page_address);
#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
			pd_changed = MALI_TRUE;
#endif
		}
		else
		{
#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
			pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
			num_pages_inv++;
			MALI_DEBUG_ASSERT(num_pages_inv<3);
#endif

			/* If part of the page table is still in use, zero the relevant PTEs */
			mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
		}

		left -= size_in_pde;
		mali_address += size_in_pde;
	}
	_mali_osk_write_mem_barrier();

#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
	/* L2 pages invalidation */
	if (MALI_TRUE == pd_changed)
	{
		pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
		num_pages_inv++;
		MALI_DEBUG_ASSERT(num_pages_inv<3);
	}

	if (_MALI_PRODUCT_ID_MALI200 != mali_kernel_core_get_product_id())
	{
		mali_cluster_invalidate_pages(pages_to_invalidate, num_pages_inv);
	}
#endif

	MALI_SUCCESS;
}