/* Complete an atom that has returned '1' from kbase_process_soft_job (i.e. has waited) * * @param katom The atom to complete */ static void complete_soft_job(kbase_jd_atom *katom) { kbase_context *kctx = katom->kctx; mutex_lock(&kctx->jctx.lock); OSK_DLIST_REMOVE(&kctx->waiting_soft_jobs, katom, dep_item[0]); kbase_finish_soft_job(katom); if (jd_done_nolock(katom)) { kbasep_js_try_schedule_head_ctx( kctx->kbdev ); } mutex_unlock(&kctx->jctx.lock); }
static void kbase_fence_cancel_wait(kbase_jd_atom *katom) { if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) { /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */ return; } /* Wait was cancelled - zap the atoms */ katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; kbase_finish_soft_job(katom); if (jd_done_nolock(katom)) { kbasep_js_try_schedule_head_ctx( katom->kctx->kbdev ); } }
void kbasep_js_runpool_release_ctx( kbase_device *kbdev, kbase_context *kctx ) { kbasep_js_device_data *js_devdata; kbasep_js_kctx_info *js_kctx_info; kbasep_js_policy *js_policy; kbasep_js_per_as_data *js_per_as_data; mali_bool was_descheduled = MALI_FALSE; int saved_as_nr; kbase_as *current_as; int new_ref_count; mali_bool nss_state_changed = MALI_FALSE; OSK_ASSERT( kbdev != NULL ); OSK_ASSERT( kctx != NULL ); js_kctx_info = &kctx->jctx.sched_info; js_devdata = &kbdev->js_data; js_policy = &kbdev->js_data.policy; osk_mutex_lock( &js_kctx_info->ctx.jsctx_mutex ); osk_mutex_lock( &js_devdata->runpool_mutex ); /* Ensure context really is scheduled in */ OSK_ASSERT( js_kctx_info->ctx.is_scheduled != MALI_FALSE ); /* The saved_as_nr must be accessed under lock, but we also need to take a * sleeping mutex. Since the ctx is known to be busy-refcounted, we can * just take the runpool lock briefly, then taken it again later (the as_nr * won't be reassigned due to being busy). * * We ASSERT on this fact */ osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock ); { saved_as_nr = kctx->as_nr; OSK_ASSERT( saved_as_nr != KBASEP_AS_NR_INVALID ); js_per_as_data = &js_devdata->runpool_irq.per_as_data[saved_as_nr]; OSK_ASSERT( js_per_as_data->as_busy_refcount > 0 ); } osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock ); /* * Transaction begins on AS and runpool_irq * * Doubly-assert that our previous facts are still true */ current_as = &kbdev->as[saved_as_nr]; osk_mutex_lock( ¤t_as->transaction_mutex ); osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock ); OSK_ASSERT( saved_as_nr == kctx->as_nr ); OSK_ASSERT( js_per_as_data->as_busy_refcount > 0 ); /* Update refcount */ new_ref_count = --(js_per_as_data->as_busy_refcount); /* Make a set of checks to see if the context should be scheduled out */ if ( new_ref_count == 0 && ( kctx->jctx.sched_info.ctx.nr_jobs == 0 || kbasep_js_is_submit_allowed( js_devdata, kctx ) == MALI_FALSE ) ) { /* Last reference, and we've been told to remove this context from the Run Pool */ OSK_PRINT_INFO(OSK_BASE_JM, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, kbasep_js_is_submit_allowed( js_devdata, kctx ) ); kbasep_js_policy_runpool_remove_ctx( js_policy, kctx ); /* Stop any more refcounts occuring on the context */ js_per_as_data->kctx = NULL; /* Ensure we prevent the context from submitting any new jobs * e.g. from kbasep_js_try_run_next_job_on_slot_irq_nolock() */ kbasep_js_clear_submit_allowed( js_devdata, kctx ); /* Disable the MMU on the affected address space, and indicate it's invalid */ kbase_mmu_disable( kctx ); kctx->as_nr = KBASEP_AS_NR_INVALID; /* NSS handling */ nss_state_changed = kbasep_js_check_and_deref_nss_running_ctx( js_devdata, kctx ); /* * Transaction ends on AS and runpool_irq: * * By this point, the AS-related data is now clear and ready for re-use. * * Since releases only occur once for each previous successful retain, and no more * retains are allowed on this context, no other thread will be operating in this * code whilst we are */ osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock ); osk_mutex_unlock( ¤t_as->transaction_mutex ); /* Free up the address space */ js_devdata->as_free |= ((u16)(1u << saved_as_nr)); /* Note: Don't reuse saved_as_nr now */ /* update book-keeping info */ --(js_devdata->nr_contexts_running); js_kctx_info->ctx.is_scheduled = MALI_FALSE; /* Signal any waiter that the context is not scheduled, so is safe for * termination - once the jsctx_mutex is also dropped, and jobs have * finished. */ osk_waitq_set( &js_kctx_info->ctx.not_scheduled_waitq ); /* Handle dying contexts */ if ( js_kctx_info->ctx.is_dying != MALI_FALSE ) { /* This happens asynchronously */ OSK_PRINT_INFO(OSK_BASE_JM, "JS: ** Killing Context %p on RunPool Remove **", kctx ); kbasep_js_policy_kill_all_ctx_jobs( js_policy, kctx ); } /* Queue an action to occur after we've dropped the lock */ was_descheduled = MALI_TRUE; } else { osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock ); osk_mutex_unlock( ¤t_as->transaction_mutex ); } osk_mutex_unlock( &js_devdata->runpool_mutex ); /* Do we have an action queued whilst the lock was held? */ if ( was_descheduled != MALI_FALSE ) { /* Determine whether this context should be requeued on the policy queue */ if ( js_kctx_info->ctx.nr_jobs > 0 && js_kctx_info->ctx.is_dying == MALI_FALSE ) { OSK_PRINT_INFO(OSK_BASE_JM, "JS: Requeue Context %p", kctx ); osk_mutex_lock( &js_devdata->queue_mutex ); kbasep_js_policy_enqueue_ctx( js_policy, kctx ); osk_mutex_unlock( &js_devdata->queue_mutex ); } else { OSK_PRINT_INFO(OSK_BASE_JM, "JS: Idling Context %p (not requeued)", kctx ); /* Notify PM that a context has gone idle */ kbase_pm_context_idle(kctx->kbdev); } } /* We've finished with this context for now, so drop the lock for it. */ osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex ); if ( was_descheduled != MALI_FALSE ) { /* We've freed up an address space, so let's try to schedule in another * context * * Note: if there's a context to schedule in, then it also tries to run * another job, in case the new context has jobs satisfying requirements * that no other context/job in the runpool does */ kbasep_js_try_schedule_head_ctx( kbdev ); } if ( nss_state_changed != MALI_FALSE ) { osk_mutex_lock( &js_devdata->runpool_mutex ); kbasep_js_try_run_next_job( kbdev ); osk_mutex_unlock( &js_devdata->runpool_mutex ); } }