_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
{
	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
	u32 left = size;
	int i;
	mali_bool pd_changed = MALI_FALSE;
	u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
	u32 num_pages_inv = 0;
	mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */

	/* For all page directory entries in range. */
	for (i = first_pde; i <= last_pde; i++) {
		u32 size_in_pde, offset;

		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
		MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);

		/* Offset into page table, 0 if mali_address is 4MiB aligned */
		offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
		if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
			size_in_pde = left;
		} else {
			size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
		}

		pagedir->page_entries_usage_count[i]--;

		/* If entire page table is unused, free it */
		if (0 == pagedir->page_entries_usage_count[i]) {
			u32 page_phys;
			void *page_virt;
			MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
			/* last reference removed, no need to zero out each PTE  */

			page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
			page_virt = pagedir->page_entries_mapped[i];
			pagedir->page_entries_mapped[i] = NULL;
			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);

			mali_mmu_release_table_page(page_phys, page_virt);
			pd_changed = MALI_TRUE;
		} else {
			MALI_DEBUG_ASSERT(num_pages_inv < 2);
			if (num_pages_inv < 2) {
				pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
				num_pages_inv++;
			} else {
				invalidate_all = MALI_TRUE;
			}

			/* If part of the page table is still in use, zero the relevant PTEs */
			mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
		}

		left -= size_in_pde;
		mali_address += size_in_pde;
	}
	_mali_osk_write_mem_barrier();

	/* L2 pages invalidation */
	if (MALI_TRUE == pd_changed) {
		MALI_DEBUG_ASSERT(num_pages_inv < 3);
		if (num_pages_inv < 3) {
			pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
			num_pages_inv++;
		} else {
			invalidate_all = MALI_TRUE;
		}
	}

	if (invalidate_all) {
		mali_l2_cache_invalidate_all();
	} else {
		mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
	}

	MALI_SUCCESS;
}
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
{
	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
	u32 left = size;
	int i;
#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
	mali_bool pd_changed = MALI_FALSE;
	u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
	u32 num_pages_inv = 0;
#endif

	/* For all page directory entries in range. */
	for (i = first_pde; i <= last_pde; i++)
	{
		u32 size_in_pde, offset;

		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
		MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);

		/* Offset into page table, 0 if mali_address is 4MiB aligned */
		offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
		if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset)
		{
			size_in_pde = left;
		}
		else
		{
			size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
		}

		pagedir->page_entries_usage_count[i]--;

		/* If entire page table is unused, free it */
		if (0 == pagedir->page_entries_usage_count[i])
		{
			u32 page_address;
			MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
			/* last reference removed, no need to zero out each PTE  */

			page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
			pagedir->page_entries_mapped[i] = NULL;
			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);

			mali_mmu_release_table_page(page_address);
#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
			pd_changed = MALI_TRUE;
#endif
		}
		else
		{
#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
			pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
			num_pages_inv++;
			MALI_DEBUG_ASSERT(num_pages_inv<3);
#endif

			/* If part of the page table is still in use, zero the relevant PTEs */
			mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
		}

		left -= size_in_pde;
		mali_address += size_in_pde;
	}
	_mali_osk_write_mem_barrier();

#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
	/* L2 pages invalidation */
	if (MALI_TRUE == pd_changed)
	{
		pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
		num_pages_inv++;
		MALI_DEBUG_ASSERT(num_pages_inv<3);
	}

	if (_MALI_PRODUCT_ID_MALI200 != mali_kernel_core_get_product_id())
	{
		mali_cluster_invalidate_pages(pages_to_invalidate, num_pages_inv);
	}
#endif

	MALI_SUCCESS;
}