/** * kbase_destroy_context - Destroy a kernel base context. * @kctx: Context to destroy * * Calls kbase_destroy_os_context() to free OS specific structures. * Will release all outstanding regions. */ void kbase_destroy_context(struct kbase_context *kctx) { struct kbase_device *kbdev; int pages; unsigned long pending_regions_to_clean; /* MALI_SEC_INTEGRATION */ int profile_count; /* MALI_SEC_INTEGRATION */ if (!kctx) { printk("An uninitialized or destroyed context is tried to be destroyed. kctx is null\n"); return ; } else if (kctx->ctx_status != CTX_INITIALIZED) { printk("An uninitialized or destroyed context is tried to be destroyed\n"); printk("kctx: 0x%p, kctx->tgid: %d, kctx->ctx_status: 0x%x\n", kctx, kctx->tgid, kctx->ctx_status); return ; } KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); /* MALI_SEC_INTEGRATION */ for (profile_count = 0; profile_count < 3; profile_count++) { if (wait_event_timeout(kctx->mem_profile_wait, atomic_read(&kctx->mem_profile_showing_state) == 0, (unsigned int) msecs_to_jiffies(1000))) break; else printk("[G3D] waiting for memory profile\n"); } /* MALI_SEC_INTEGRATION */ while (wait_event_timeout(kbdev->pm.suspending_wait, kbdev->pm.suspending == false, (unsigned int) msecs_to_jiffies(1000)) == 0) printk("[G3D] Waiting for resuming the device\n"); KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u); /* Ensure the core is powered up for the destroy process */ /* A suspend won't happen here, because we're in a syscall from a userspace * thread. */ kbase_pm_context_active(kbdev); kbase_jd_zap_context(kctx); kbase_event_cleanup(kctx); kbase_gpu_vm_lock(kctx); /* MMU is disabled as part of scheduling out the context */ kbase_mmu_free_pgd(kctx); /* drop the aliasing sink page now that it can't be mapped anymore */ kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false); /* free pending region setups */ pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK; while (pending_regions_to_clean) { unsigned int cookie = __ffs(pending_regions_to_clean); BUG_ON(!kctx->pending_regions[cookie]); kbase_reg_pending_dtor(kctx->pending_regions[cookie]); kctx->pending_regions[cookie] = NULL; pending_regions_to_clean &= ~(1UL << cookie); } kbase_region_tracker_term(kctx); kbase_gpu_vm_unlock(kctx); /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ kbasep_js_kctx_term(kctx); kbase_jd_exit(kctx); kbase_pm_context_idle(kbdev); kbase_mmu_term(kctx); pages = atomic_read(&kctx->used_pages); if (pages != 0) dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); kbase_mem_pool_term(&kctx->mem_pool); WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0); /* MALI_SEC_INTEGRATION */ if(kbdev->vendor_callbacks->destroy_context) kbdev->vendor_callbacks->destroy_context(kctx); if (kctx->ctx_need_qos) { kctx->ctx_need_qos = false; } vfree(kctx); /* MALI_SEC_INTEGRATION */ kctx = NULL; }
void kbase_pm_resume(struct kbase_device *kbdev) { int nr_keep_gpu_powered_ctxs; /* MUST happen before any pm_context_active calls occur */ mutex_lock(&kbdev->pm.lock); kbdev->pm.suspending = MALI_FALSE; mutex_unlock(&kbdev->pm.lock); /* Initial active call, to power on the GPU/cores if needed */ kbase_pm_context_active(kbdev); /* Restore the keep_gpu_powered calls */ for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count); nr_keep_gpu_powered_ctxs > 0 ; --nr_keep_gpu_powered_ctxs ) { kbase_pm_context_active(kbdev); } /* Re-enable instrumentation, if it was previously disabled */ kbase_instr_hwcnt_resume(kbdev); /* Resume any blocked atoms (which may cause contexts to be scheduled in * and dependent atoms to run) */ kbase_resume_suspended_soft_jobs(kbdev); /* Resume the Job Scheduler and associated components, and start running * atoms */ kbasep_js_resume(kbdev); /* Matching idle call, to power off the GPU/cores if we didn't actually * need it and the policy doesn't want it on */ kbase_pm_context_idle(kbdev); }
void kbase_pm_resume(struct kbase_device *kbdev) { int nr_keep_gpu_powered_ctxs; /* MUST happen before any pm_context_active calls occur */ mutex_lock(&kbdev->pm.lock); kbdev->pm.suspending = MALI_FALSE; kbase_pm_do_poweron(kbdev, MALI_TRUE); mutex_unlock(&kbdev->pm.lock); /* Initial active call, to power on the GPU/cores if needed */ kbase_pm_context_active(kbdev); /* Restore the keep_gpu_powered calls */ for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count); nr_keep_gpu_powered_ctxs > 0 ; --nr_keep_gpu_powered_ctxs) { kbase_pm_context_active(kbdev); } #if SLSI_INTEGRATION if (kbdev->hwcnt.prev_mm) { mutex_lock(&kbdev->hwcnt.mlock); if ((kbdev->hwcnt.enable_for_gpr == FALSE) && (kbdev->hwcnt.s_enable_for_utilization)) kbdev->hwcnt.enable_for_utilization = TRUE; else kbdev->hwcnt.enable_for_utilization = FALSE; kbase_pm_policy_change(kbdev, 2); mutex_unlock(&kbdev->hwcnt.mlock); } else #endif /* Re-enable instrumentation, if it was previously disabled */ kbase_instr_hwcnt_resume(kbdev); /* Resume any blocked atoms (which may cause contexts to be scheduled in * and dependent atoms to run) */ kbase_resume_suspended_soft_jobs(kbdev); /* Resume the Job Scheduler and associated components, and start running * atoms */ kbasep_js_resume(kbdev); /* Matching idle call, to power off the GPU/cores if we didn't actually * need it and the policy doesn't want it on */ kbase_pm_context_idle(kbdev); }
void kbase_wait_write_flush(struct kbase_context *kctx) { u32 base_count = 0; /* A suspend won't happen here, because we're in a syscall from a * userspace thread */ kbase_pm_context_active(kctx->kbdev); kbase_pm_request_gpu_cycle_counter(kctx->kbdev); while (true) { u32 new_count; new_count = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL); /* First time around, just store the count. */ if (base_count == 0) { base_count = new_count; continue; } /* No need to handle wrapping, unsigned maths works for this. */ if ((new_count - base_count) > 1000) break; } kbase_pm_release_gpu_cycle_counter(kctx->kbdev); kbase_pm_context_idle(kctx->kbdev); }
int kbase_instr_hwcnt_enable(struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup) { struct kbase_device *kbdev; int err; kbdev = kctx->kbdev; /* Mark the context as active so the GPU is kept turned on */ /* A suspend won't happen here, because we're in a syscall from a * userspace thread. */ kbase_pm_context_active(kbdev); /* Schedule the context in */ kbasep_js_schedule_privileged_ctx(kbdev, kctx); err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, setup); if (err) { /* Release the context. This had its own Power Manager Active * reference */ kbasep_js_release_privileged_ctx(kbdev, kctx); /* Also release our Power Manager Active reference */ kbase_pm_context_idle(kbdev); } return err; }
int kbase_instr_hwcnt_enable(struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup) { struct kbase_device *kbdev; bool access_allowed; int err; kbdev = kctx->kbdev; /* Determine if the calling task has access to this capability */ access_allowed = kbase_security_has_capability(kctx, KBASE_SEC_INSTR_HW_COUNTERS_COLLECT, KBASE_SEC_FLAG_NOAUDIT); if (!access_allowed) return -EINVAL; /* Mark the context as active so the GPU is kept turned on */ /* A suspend won't happen here, because we're in a syscall from a * userspace thread. */ kbase_pm_context_active(kbdev); /* Schedule the context in */ kbasep_js_schedule_privileged_ctx(kbdev, kctx); err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, setup); if (err) { /* Release the context. This had its own Power Manager Active * reference */ kbasep_js_release_privileged_ctx(kbdev, kctx); /* Also release our Power Manager Active reference */ kbase_pm_context_idle(kbdev); } return err; }
void kbase_pm_ca_set_policy(struct kbase_device *kbdev, const struct kbase_pm_ca_policy *new_policy) { const struct kbase_pm_ca_policy *old_policy; unsigned long flags; KBASE_DEBUG_ASSERT(kbdev != NULL); KBASE_DEBUG_ASSERT(new_policy != NULL); KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u, new_policy->id); /* During a policy change we pretend the GPU is active */ /* A suspend won't happen here, because we're in a syscall from a * userspace thread */ kbase_pm_context_active(kbdev); mutex_lock(&kbdev->pm.lock); /* Remove the policy to prevent IRQ handlers from working on it */ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); old_policy = kbdev->pm.backend.ca_current_policy; kbdev->pm.backend.ca_current_policy = NULL; spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); if (old_policy->term) old_policy->term(kbdev); if (new_policy->init) new_policy->init(kbdev); spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); kbdev->pm.backend.ca_current_policy = new_policy; /* If any core power state changes were previously attempted, but * couldn't be made because the policy was changing (current_policy was * NULL), then re-try them here. */ kbase_pm_update_cores_state_nolock(kbdev); kbdev->pm.backend.ca_current_policy->update_core_status(kbdev, kbdev->shader_ready_bitmap, kbdev->shader_transitioning_bitmap); spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); mutex_unlock(&kbdev->pm.lock); /* Now the policy change is finished, we release our fake context active * reference */ kbase_pm_context_idle(kbdev); }
void kbase_pm_set_policy(kbase_device *kbdev, const kbase_pm_policy *new_policy) { OSK_ASSERT(kbdev != NULL); OSK_ASSERT(new_policy != NULL); if (kbdev->pm.new_policy) { /* A policy change is already outstanding */ KBASE_TRACE_ADD( kbdev, PM_SET_POLICY, NULL, NULL, 0u, -1 ); return; } KBASE_TRACE_ADD( kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id ); /* During a policy change we pretend the GPU is active */ kbase_pm_context_active(kbdev); kbdev->pm.new_policy = new_policy; kbase_pm_send_event(kbdev, KBASE_PM_EVENT_POLICY_CHANGE); }
void kbase_pm_resume(struct kbase_device *kbdev) { /* MUST happen before any pm_context_active calls occur */ kbase_hwaccess_pm_resume(kbdev); /* Initial active call, to power on the GPU/cores if needed */ kbase_pm_context_active(kbdev); /* Re-enable instrumentation, if it was previously disabled */ kbase_instr_hwcnt_resume(kbdev); /* Resume any blocked atoms (which may cause contexts to be scheduled in * and dependent atoms to run) */ kbase_resume_suspended_soft_jobs(kbdev); /* Resume the Job Scheduler and associated components, and start running * atoms */ kbasep_js_resume(kbdev); /* Matching idle call, to power off the GPU/cores if we didn't actually * need it and the policy doesn't want it on */ kbase_pm_context_idle(kbdev); }
static base_jd_event_code kbase_dump_cpu_gpu_time(kbase_jd_atom *katom) { kbase_va_region *reg; osk_phy_addr addr; u64 pfn; u32 offset; char *page; struct timespec ts; base_dump_cpu_gpu_counters data; u64 system_time; u64 cycle_counter; mali_addr64 jc = katom->jc; kbase_context *kctx = katom->kctx; u32 hi1, hi2; memset(&data, 0, sizeof(data)); kbase_pm_context_active(kctx->kbdev); /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */ do { hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL); cycle_counter = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL); hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI), NULL); cycle_counter |= (((u64)hi1) << 32); } while (hi1 != hi2); /* Read hi, lo, hi to ensure that overflow from lo to hi is handled correctly */ do { hi1 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL); system_time = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_LO), NULL); hi2 = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(TIMESTAMP_HI), NULL); system_time |= (((u64)hi1) << 32); } while (hi1 != hi2); /* Record the CPU's idea of current time */ getnstimeofday(&ts); kbase_pm_context_idle(kctx->kbdev); data.sec = ts.tv_sec; data.usec = ts.tv_nsec / 1000; data.system_time = system_time; data.cycle_counter = cycle_counter; pfn = jc >> 12; offset = jc & 0xFFF; if (offset > 0x1000-sizeof(data)) { /* Wouldn't fit in the page */ return BASE_JD_EVENT_JOB_CANCELLED; } reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc); if (!reg) { return BASE_JD_EVENT_JOB_CANCELLED; } if (! (reg->flags & KBASE_REG_GPU_WR) ) { /* Region is not writable by GPU so we won't write to it either */ return BASE_JD_EVENT_JOB_CANCELLED; } if (!reg->phy_pages) { return BASE_JD_EVENT_JOB_CANCELLED; } addr = reg->phy_pages[pfn - reg->start_pfn]; if (!addr) { return BASE_JD_EVENT_JOB_CANCELLED; } page = osk_kmap(addr); if (!page) { return BASE_JD_EVENT_JOB_CANCELLED; } memcpy(page+offset, &data, sizeof(data)); osk_sync_to_cpu(addr+offset, page+offset, sizeof(data)); osk_kunmap(addr, page); return BASE_JD_EVENT_DONE; }
STATIC mali_error kbase_instr_hwcnt_enable_internal(kbase_device *kbdev, kbase_context *kctx, kbase_uk_hwcnt_setup *setup) { unsigned long flags, pm_flags; mali_error err = MALI_ERROR_FUNCTION_FAILED; kbasep_js_device_data *js_devdata; u32 irq_mask; int ret; u64 shader_cores_needed; KBASE_DEBUG_ASSERT(NULL != kctx); KBASE_DEBUG_ASSERT(NULL != kbdev); KBASE_DEBUG_ASSERT(NULL != setup); KBASE_DEBUG_ASSERT(NULL == kbdev->hwcnt.suspended_kctx); shader_cores_needed = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER); js_devdata = &kbdev->js_data; /* alignment failure */ if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1))) goto out_err; /* Override core availability policy to ensure all cores are available */ kbase_pm_ca_instr_enable(kbdev); /* Mark the context as active so the GPU is kept turned on */ /* A suspend won't happen here, because we're in a syscall from a userspace * thread. */ kbase_pm_context_active(kbdev); /* Request the cores early on synchronously - we'll release them on any errors * (e.g. instrumentation already active) */ kbase_pm_request_cores_sync(kbdev, MALI_TRUE, shader_cores_needed); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /* GPU is being reset */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); } if (kbdev->hwcnt.state != KBASE_INSTR_STATE_DISABLED) { /* Instrumentation is already enabled */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); goto out_unrequest_cores; } /* Enable interrupt */ spin_lock_irqsave(&kbdev->pm.power_change_lock, pm_flags); irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL); kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask | PRFCNT_SAMPLE_COMPLETED, NULL); spin_unlock_irqrestore(&kbdev->pm.power_change_lock, pm_flags); /* In use, this context is the owner */ kbdev->hwcnt.kctx = kctx; /* Remember the dump address so we can reprogram it later */ kbdev->hwcnt.addr = setup->dump_buffer; /* Remember all the settings for suspend/resume */ if (&kbdev->hwcnt.suspended_state != setup) memcpy(&kbdev->hwcnt.suspended_state, setup, sizeof(kbdev->hwcnt.suspended_state)); /* Request the clean */ kbdev->hwcnt.state = KBASE_INSTR_STATE_REQUEST_CLEAN; kbdev->hwcnt.triggered = 0; /* Clean&invalidate the caches so we're sure the mmu tables for the dump buffer is valid */ ret = queue_work(kbdev->hwcnt.cache_clean_wq, &kbdev->hwcnt.cache_clean_work); KBASE_DEBUG_ASSERT(ret); spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); /* Wait for cacheclean to complete */ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE); /* Schedule the context in */ kbasep_js_schedule_privileged_ctx(kbdev, kctx); /* Configure */ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), setup->dump_buffer & 0xFFFFFFFF, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), setup->dump_buffer >> 32, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), setup->jm_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN), setup->shader_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_L3_CACHE_EN), setup->l3_cache_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN), setup->mmu_l2_bm, kctx); /* Due to PRLAM-8186 we need to disable the Tiler before we enable the HW counter dump. */ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0, kctx); else kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx); kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_MANUAL, kctx); /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump */ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186)) kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /* GPU is being reset */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); } kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE; kbdev->hwcnt.triggered = 1; wake_up(&kbdev->hwcnt.wait); spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); err = MALI_ERROR_NONE; dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx); return err; out_unrequest_cores: kbase_pm_unrequest_cores(kbdev, MALI_TRUE, shader_cores_needed); kbase_pm_context_idle(kbdev); out_err: return err; }
mali_bool kbasep_js_add_job( kbase_context *kctx, kbase_jd_atom *atom ) { kbasep_js_kctx_info *js_kctx_info; kbase_device *kbdev; kbasep_js_device_data *js_devdata; kbasep_js_policy *js_policy; mali_bool policy_queue_updated = MALI_FALSE; OSK_ASSERT( kctx != NULL ); OSK_ASSERT( atom != NULL ); kbdev = kctx->kbdev; js_devdata = &kbdev->js_data; js_policy = &kbdev->js_data.policy; js_kctx_info = &kctx->jctx.sched_info; osk_mutex_lock( &js_devdata->runpool_mutex ); OSK_PRINT_INFO( OSK_BASE_JM, "JS: job enqueue %p", (void *)atom); /* Refcount ctx.nr_jobs */ OSK_ASSERT( js_kctx_info->ctx.nr_jobs < U32_MAX ); ++(js_kctx_info->ctx.nr_jobs); /* Setup any scheduling information */ kbasep_js_clear_job_retry_submit( atom ); /* * Begin Runpool_irq transaction */ osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock ); { /* NSS refcounting */ kbasep_js_check_and_ref_nss_job( js_devdata, kctx, atom ); /* Enqueue the job in the policy, causing it to be scheduled if the * parent context gets scheduled */ kbasep_js_policy_enqueue_job( js_policy, atom ); } osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock ); /* End runpool_irq transaction */ if ( js_kctx_info->ctx.is_scheduled != MALI_FALSE ) { /* Handle an already running context - try to run the new job, in case it * matches requirements that aren't matched by any other job in the Run * Pool */ kbasep_js_try_run_next_job( kbdev ); } osk_mutex_unlock( &js_devdata->runpool_mutex ); if ( js_kctx_info->ctx.is_scheduled == MALI_FALSE && js_kctx_info->ctx.nr_jobs == 1 ) { /* Handle Refcount going from 0 to 1: schedule the context on the Policy Queue */ OSK_ASSERT( js_kctx_info->ctx.is_scheduled == MALI_FALSE ); OSK_PRINT_INFO(OSK_BASE_JM, "JS: Enqueue Context %p", kctx ); osk_mutex_lock( &js_devdata->queue_mutex ); kbasep_js_policy_enqueue_ctx( js_policy, kctx ); osk_mutex_unlock( &js_devdata->queue_mutex ); /* If the runpool is full and this job has a higher priority than the non-running * job in the runpool - evict it so this higher priority job starts faster */ kbasep_js_runpool_attempt_fast_start_ctx( kbdev, kctx ); /* This context is becoming active */ kbase_pm_context_active(kctx->kbdev); /* NOTE: Potentially, we can make the scheduling of the head context * happen in a work-queue if we need to wait for the PM to power * up. Also need logic to submit nothing until PM really has completed * powering up. */ /* Policy Queue was updated - caller must try to schedule the head context */ policy_queue_updated = MALI_TRUE; } return policy_queue_updated; }
/** * kbase_destroy_context - Destroy a kernel base context. * @kctx: Context to destroy * * Calls kbase_destroy_os_context() to free OS specific structures. * Will release all outstanding regions. */ void kbase_destroy_context(struct kbase_context *kctx) { struct kbase_device *kbdev; int pages; unsigned long pending_regions_to_clean; KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u); /* Ensure the core is powered up for the destroy process */ /* A suspend won't happen here, because we're in a syscall from a userspace * thread. */ kbase_pm_context_active(kbdev); kbase_jd_zap_context(kctx); kbase_event_cleanup(kctx); kbase_gpu_vm_lock(kctx); /* MMU is disabled as part of scheduling out the context */ kbase_mmu_free_pgd(kctx); /* drop the aliasing sink page now that it can't be mapped anymore */ kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false); /* free pending region setups */ pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK; while (pending_regions_to_clean) { unsigned int cookie = __ffs(pending_regions_to_clean); BUG_ON(!kctx->pending_regions[cookie]); kbase_reg_pending_dtor(kctx->pending_regions[cookie]); kctx->pending_regions[cookie] = NULL; pending_regions_to_clean &= ~(1UL << cookie); } kbase_region_tracker_term(kctx); kbase_gpu_vm_unlock(kctx); /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */ kbasep_js_kctx_term(kctx); kbase_jd_exit(kctx); kbase_pm_context_idle(kbdev); kbase_mmu_term(kctx); pages = atomic_read(&kctx->used_pages); if (pages != 0) dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages); kbase_mem_pool_term(&kctx->mem_pool); WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0); vfree(kctx); }