void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, mali_memory_cache_settings cache_settings) { u32 end_address = mali_address + size; u32 permission_bits; switch ( cache_settings ) { case MALI_CACHE_GP_READ_ALLOCATE: MALI_DEBUG_PRINT(5, ("Map L2 GP_Read_allocate\n")); permission_bits = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE; break; case MALI_CACHE_STANDARD: MALI_DEBUG_PRINT(5, ("Map L2 Standard\n")); /*falltrough */ default: if ( MALI_CACHE_STANDARD != cache_settings) MALI_PRINT_ERROR(("Wrong cache settings\n")); permission_bits = MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT; } /* Map physical pages into MMU page tables */ for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) { MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]); _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), phys_address | permission_bits); } _mali_osk_write_mem_barrier(); }
void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits) { u32 end_address = mali_address + size; /* Map physical pages into MMU page tables */ for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) { MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]); _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), phys_address | permission_bits); } }
void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size) { u32 end_address = mali_address + size; /* Map physical pages into MMU page tables */ for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) { MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]); _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), phys_address | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT); } _mali_osk_write_mem_barrier(); }
void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr) { #if defined(DEBUG) u32 pde_index, pte_index; u32 pde, pte; pde_index = MALI_MMU_PDE_ENTRY(fault_addr); pte_index = MALI_MMU_PTE_ENTRY(fault_addr); pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, pde_index * sizeof(u32)); if (pde & MALI_MMU_FLAGS_PRESENT) { u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde); pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index], pte_index * sizeof(u32)); MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n" "\t\tPTE: %08x, page %08x is %s\n", fault_addr, pte_addr, pte, MALI_MMU_ENTRY_ADDRESS(pte), pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present")); } else { MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n", fault_addr, pde)); } #else MALI_IGNORE(pagedir); MALI_IGNORE(fault_addr); #endif }
_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size) { const int first_pde = MALI_MMU_PDE_ENTRY(mali_address); const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1); _mali_osk_errcode_t err; mali_io_address pde_mapping; u32 pde_phys; int i; for(i = first_pde; i <= last_pde; i++) { if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) { /* Page table not present */ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]); MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]); err = mali_mmu_get_table_page(&pde_phys, &pde_mapping); if(_MALI_OSK_ERR_OK != err) { MALI_PRINT_ERROR(("Failed to allocate page table page.\n")); return err; } pagedir->page_entries_mapped[i] = pde_mapping; /* Update PDE, mark as present */ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), pde_phys | MALI_MMU_FLAGS_PRESENT); MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]); pagedir->page_entries_usage_count[i] = 1; } else { pagedir->page_entries_usage_count[i]++; } } _mali_osk_write_mem_barrier(); MALI_SUCCESS; }
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size) { const int first_pde = MALI_MMU_PDE_ENTRY(mali_address); const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1); u32 left = size; int i; mali_bool pd_changed = MALI_FALSE; u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */ u32 num_pages_inv = 0; mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */ /* For all page directory entries in range. */ for (i = first_pde; i <= last_pde; i++) { u32 size_in_pde, offset; MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]); MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]); /* Offset into page table, 0 if mali_address is 4MiB aligned */ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1)); if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) { size_in_pde = left; } else { size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset; } pagedir->page_entries_usage_count[i]--; /* If entire page table is unused, free it */ if (0 == pagedir->page_entries_usage_count[i]) { u32 page_phys; void *page_virt; MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n")); /* last reference removed, no need to zero out each PTE */ page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32))); page_virt = pagedir->page_entries_mapped[i]; pagedir->page_entries_mapped[i] = NULL; _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0); mali_mmu_release_table_page(page_phys, page_virt); pd_changed = MALI_TRUE; } else { MALI_DEBUG_ASSERT(num_pages_inv < 2); if (num_pages_inv < 2) { pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i); num_pages_inv++; } else { invalidate_all = MALI_TRUE; } /* If part of the page table is still in use, zero the relevant PTEs */ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde); } left -= size_in_pde; mali_address += size_in_pde; } _mali_osk_write_mem_barrier(); /* L2 pages invalidation */ if (MALI_TRUE == pd_changed) { MALI_DEBUG_ASSERT(num_pages_inv < 3); if (num_pages_inv < 3) { pages_to_invalidate[num_pages_inv] = pagedir->page_directory; num_pages_inv++; } else { invalidate_all = MALI_TRUE; } } if (invalidate_all) { mali_l2_cache_invalidate_all(); } else { mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv); } MALI_SUCCESS; }
void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address) { MALI_ASSERT_GROUP_LOCKED(mmu->group); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address)); }
void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address) { mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address)); }
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size) { const int first_pde = MALI_MMU_PDE_ENTRY(mali_address); const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1); u32 left = size; int i; #ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2 mali_bool pd_changed = MALI_FALSE; u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */ u32 num_pages_inv = 0; #endif /* For all page directory entries in range. */ for (i = first_pde; i <= last_pde; i++) { u32 size_in_pde, offset; MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]); MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]); /* Offset into page table, 0 if mali_address is 4MiB aligned */ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1)); if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) { size_in_pde = left; } else { size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset; } pagedir->page_entries_usage_count[i]--; /* If entire page table is unused, free it */ if (0 == pagedir->page_entries_usage_count[i]) { u32 page_address; MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n")); /* last reference removed, no need to zero out each PTE */ page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32))); pagedir->page_entries_mapped[i] = NULL; _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0); mali_mmu_release_table_page(page_address); #ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2 pd_changed = MALI_TRUE; #endif } else { #ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2 pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i); num_pages_inv++; MALI_DEBUG_ASSERT(num_pages_inv<3); #endif /* If part of the page table is still in use, zero the relevant PTEs */ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde); } left -= size_in_pde; mali_address += size_in_pde; } _mali_osk_write_mem_barrier(); #ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2 /* L2 pages invalidation */ if (MALI_TRUE == pd_changed) { pages_to_invalidate[num_pages_inv] = pagedir->page_directory; num_pages_inv++; MALI_DEBUG_ASSERT(num_pages_inv<3); } if (_MALI_PRODUCT_ID_MALI200 != mali_kernel_core_get_product_id()) { mali_cluster_invalidate_pages(pages_to_invalidate, num_pages_inv); } #endif MALI_SUCCESS; }