/** * kbase_destroy_context - Destroy a kernel base context. * @kctx: Context to destroy * * Calls kbase_destroy_os_context() to free OS specific structures. * Will release all outstanding regions. */ void kbase_destroy_context(struct kbase_context *kctx) { struct kbase_device *kbdev; int pages; unsigned long pending_regions_to_clean; /* MALI_SEC_INTEGRATION */ int profile_count; /* MALI_SEC_INTEGRATION */ if (!kctx) { printk("An uninitialized or destroyed context is tried to be destroyed. kctx is null\n"); return ; } else if (kctx->ctx_status != CTX_INITIALIZED) { printk("An uninitialized or destroyed context is tried to be destroyed\n"); printk("kctx: 0x%p, kctx->tgid: %d, kctx->ctx_status: 0x%x\n", kctx, kctx->tgid, kctx->ctx_status); return ; } KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); /* MALI_SEC_INTEGRATION */ for (profile_count = 0; profile_count < 3; profile_count++) { if (wait_event_timeout(kctx->mem_profile_wait, atomic_read(&kctx->mem_profile_showing_state) == 0, (unsigned int) msecs_to_jiffies(1000))) break; else printk("[G3D] waiting for memory profile\n"); } /* MALI_SEC_INTEGRATION */ while (wait_event_timeout(kbdev->pm.suspending_wait, kbdev->pm.suspending == false, (unsigned int) msecs_to_jiffies(1000)) == 0) printk("[G3D] Waiting for resuming the device\n"); KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u); /* Ensure the core is powered up for the destroy process */ /* A suspend won't happen here, because we're in a syscall from a userspace * thread. */ kbase_pm_context_active(kbdev); kbase_jd_zap_context(kctx); kbase_event_cleanup(kctx); kbase_gpu_vm_lock(kctx); /* MMU is disabled as part of scheduling out the context */ kbase_mmu_free_pgd(kctx); /* drop the aliasing sink page now that it can't be mapped anymore */ kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false); /* free pending region setups */ pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK; while (pending_regions_to_clean) { unsigned int cookie = __ffs(pending_regions_to_clean); BUG_ON(!kctx->pending_regions[cookie]); kbase_reg_pending_dtor(kctx->pending_regions[cookie]); kctx->pending_regions[cookie] = NULL; pending_regions_to_clean &= ~(1UL << cookie); } kbase_region_tracker_term(kctx); kbase_gpu_vm_unlock(kctx); /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ kbasep_js_kctx_term(kctx); kbase_jd_exit(kctx); kbase_pm_context_idle(kbdev); kbase_mmu_term(kctx); pages = atomic_read(&kctx->used_pages); if (pages != 0) dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); kbase_mem_pool_term(&kctx->mem_pool); WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0); /* MALI_SEC_INTEGRATION */ if(kbdev->vendor_callbacks->destroy_context) kbdev->vendor_callbacks->destroy_context(kctx); if (kctx->ctx_need_qos) { kctx->ctx_need_qos = false; } vfree(kctx); /* MALI_SEC_INTEGRATION */ kctx = NULL; }
/** * kbase_destroy_context - Destroy a kernel base context. * @kctx: Context to destroy * * Calls kbase_destroy_os_context() to free OS specific structures. * Will release all outstanding regions. */ void kbase_destroy_context(struct kbase_context *kctx) { struct kbase_device *kbdev; int pages; unsigned long pending_regions_to_clean; KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u); /* Ensure the core is powered up for the destroy process */ /* A suspend won't happen here, because we're in a syscall from a userspace * thread. */ kbase_pm_context_active(kbdev); kbase_jd_zap_context(kctx); kbase_event_cleanup(kctx); kbase_gpu_vm_lock(kctx); /* MMU is disabled as part of scheduling out the context */ kbase_mmu_free_pgd(kctx); /* drop the aliasing sink page now that it can't be mapped anymore */ kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false); /* free pending region setups */ pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK; while (pending_regions_to_clean) { unsigned int cookie = __ffs(pending_regions_to_clean); BUG_ON(!kctx->pending_regions[cookie]); kbase_reg_pending_dtor(kctx->pending_regions[cookie]); kctx->pending_regions[cookie] = NULL; pending_regions_to_clean &= ~(1UL << cookie); } kbase_region_tracker_term(kctx); kbase_gpu_vm_unlock(kctx); /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ kbasep_js_kctx_term(kctx); kbase_jd_exit(kctx); kbase_pm_context_idle(kbdev); kbase_mmu_term(kctx); pages = atomic_read(&kctx->used_pages); if (pages != 0) dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); kbase_mem_pool_term(&kctx->mem_pool); WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0); vfree(kctx); }