void page_fault_worker(struct work_struct *data) { u64 fault_pfn; u32 fault_status; size_t new_pages; size_t fault_rel_pfn; struct kbase_as *faulting_as; int as_no; struct kbase_context *kctx; struct kbase_device *kbdev; struct kbase_va_region *region; int err; bool grown = false; faulting_as = container_of(data, struct kbase_as, work_pagefault); fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT; as_no = faulting_as->number; kbdev = container_of(faulting_as, struct kbase_device, as[as_no]); /* Grab the context that was already refcounted in kbase_mmu_interrupt(). * Therefore, it cannot be scheduled out of this AS until we explicitly release it * * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */ kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no); if (kctx == NULL) { /* Only handle this if not already suspended */ if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) { /* Address space has no context, terminate the work */ /* AS transaction begin */ mutex_lock(&faulting_as->transaction_mutex); kbase_mmu_disable_as(kbdev, as_no); mutex_unlock(&faulting_as->transaction_mutex); /* AS transaction end */ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); kbase_pm_context_idle(kbdev); } atomic_dec(&kbdev->faults_pending); return; } KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev); fault_status = faulting_as->fault_status; switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) { case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT: /* need to check against the region to handle this one */ break; case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT: kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Permission failure"); goto fault_done; case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT: kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Tranlation table bus fault"); goto fault_done; case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG: /* nothing to do, but we don't expect this fault currently */ dev_warn(kbdev->dev, "Access flag unexpectedly set"); goto fault_done; default: kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Unknown fault code"); goto fault_done; } /* so we have a translation fault, let's see if it is for growable * memory */ kbase_gpu_vm_lock(kctx); region = kbase_region_tracker_find_region_enclosing_address(kctx, faulting_as->fault_addr); if (!region || region->flags & KBASE_REG_FREE) { kbase_gpu_vm_unlock(kctx); kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Memory is not mapped on the GPU"); goto fault_done; } if ((region->flags & GROWABLE_FLAGS_REQUIRED) != GROWABLE_FLAGS_REQUIRED) { kbase_gpu_vm_unlock(kctx); kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Memory is not growable"); goto fault_done; } /* find the size we need to grow it by */ /* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address * validating the fault_adress to be within a size_t from the start_pfn */ fault_rel_pfn = fault_pfn - region->start_pfn; if (fault_rel_pfn < kbase_reg_current_backed_size(region)) { dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring", faulting_as->fault_addr, region->start_pfn, region->start_pfn + kbase_reg_current_backed_size(region)); kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); /* [1] in case another page fault occurred while we were * handling the (duplicate) page fault we need to ensure we * don't loose the other page fault as result of us clearing * the MMU IRQ. Therefore, after we clear the MMU IRQ we send * an UNLOCK command that will retry any stalled memory * transaction (which should cause the other page fault to be * raised again). */ kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0, AS_COMMAND_UNLOCK, 1); kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); kbase_gpu_vm_unlock(kctx); goto fault_done; } new_pages = make_multiple(fault_rel_pfn - kbase_reg_current_backed_size(region) + 1, region->extent); /* cap to max vsize */ if (new_pages + kbase_reg_current_backed_size(region) > region->nr_pages) new_pages = region->nr_pages - kbase_reg_current_backed_size(region); if (0 == new_pages) { /* Duplicate of a fault we've already handled, nothing to do */ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); /* See comment [1] about UNLOCK usage */ kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0, AS_COMMAND_UNLOCK, 1); kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); kbase_gpu_vm_unlock(kctx); goto fault_done; } if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) { if (region->gpu_alloc != region->cpu_alloc) { if (kbase_alloc_phy_pages_helper( region->cpu_alloc, new_pages) == 0) { grown = true; } else { kbase_free_phy_pages_helper(region->gpu_alloc, new_pages); } } else { grown = true; } } if (grown) { u32 op; /* alloc success */ KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages); /* AS transaction begin */ mutex_lock(&faulting_as->transaction_mutex); /* set up the new pages */ err = kbase_mmu_insert_pages(kctx, region->start_pfn + kbase_reg_current_backed_size(region) - new_pages, &kbase_get_gpu_phy_pages(region)[kbase_reg_current_backed_size(region) - new_pages], new_pages, region->flags); if (err) { /* failed to insert pages, handle as a normal PF */ mutex_unlock(&faulting_as->transaction_mutex); kbase_free_phy_pages_helper(region->gpu_alloc, new_pages); if (region->gpu_alloc != region->cpu_alloc) kbase_free_phy_pages_helper(region->cpu_alloc, new_pages); kbase_gpu_vm_unlock(kctx); /* The locked VA region will be unlocked and the cache invalidated in here */ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Page table update failure"); goto fault_done; } #if defined(CONFIG_MALI_GATOR_SUPPORT) kbase_trace_mali_page_fault_insert_pages(as_no, new_pages); #endif #if defined(CONFIG_MALI_MIPE_ENABLED) kbase_tlstream_aux_pagefault( as_no, atomic_read(&kctx->used_pages)); #endif /* flush L2 and unlock the VA (resumes the MMU) */ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367)) op = AS_COMMAND_FLUSH; else op = AS_COMMAND_FLUSH_PT; /* clear MMU interrupt - this needs to be done after updating * the page tables but before issuing a FLUSH command. The * FLUSH cmd has a side effect that it restarts stalled memory * transactions in other address spaces which may cause * another fault to occur. If we didn't clear the interrupt at * this stage a new IRQ might not be raised when the GPU finds * a MMU IRQ is already pending. */ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx, faulting_as->fault_addr >> PAGE_SHIFT, new_pages, op, 1); mutex_unlock(&faulting_as->transaction_mutex); /* AS transaction end */ /* reenable this in the mask */ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx, KBASE_MMU_FAULT_TYPE_PAGE); kbase_gpu_vm_unlock(kctx); } else {
void kbase_pm_context_active(kbase_device *kbdev) { (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE); }