mali_bool kbase_security_has_capability(kbase_context *kctx, kbase_security_capability cap, u32 flags) { /* Assume failure */ mali_bool access_allowed = MALI_FALSE; mali_bool audit = (KBASE_SEC_FLAG_AUDIT & flags) ? MALI_TRUE : MALI_FALSE; KBASE_DEBUG_ASSERT(NULL != kctx); CSTD_UNUSED(kctx); /* Detect unsupported flags */ KBASE_DEBUG_ASSERT(((~KBASE_SEC_FLAG_MASK) & flags) == 0); /* Determine if access is allowed for the given cap */ switch (cap) { case KBASE_SEC_MODIFY_PRIORITY: case KBASE_SEC_INSTR_HW_COUNTERS_COLLECT: /* Access is granted only if the caller is privileged */ access_allowed = kbasep_am_i_root(); break; } /* Report problem if requested */ if (MALI_FALSE == access_allowed) { if (MALI_FALSE != audit) KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Security capability failure: %d, %p", cap, (void *)kctx); } return access_allowed; }
void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx) { struct kbasep_js_kctx_info *js_kctx_info; mali_bool runpool_state_changed = MALI_FALSE; KBASE_DEBUG_ASSERT(kbdev != NULL); KBASE_DEBUG_ASSERT(kctx != NULL); js_kctx_info = &kctx->jctx.sched_info; if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != MALI_FALSE) { /* This context never submits, so don't track any scheduling attributes */ return; } /* Transfer attributes held in the context flags for contexts that have submit enabled */ if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_HINT_ONLY_COMPUTE) != MALI_FALSE) { /* Compute context */ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE); } /* NOTE: Whether this is a non-compute context depends on the jobs being * run, e.g. it might be submitting jobs with BASE_JD_REQ_ONLY_COMPUTE */ /* ... More attributes can be added here ... */ /* The context should not have been scheduled yet, so ASSERT if this caused * runpool state changes (note that other threads *can't* affect the value * of runpool_state_changed, due to how it's calculated) */ KBASE_DEBUG_ASSERT(runpool_state_changed == MALI_FALSE); CSTD_UNUSED(runpool_state_changed); }
bool kbase_security_has_capability(struct kbase_context *kctx, enum kbase_security_capability cap, u32 flags) { /* Assume failure */ bool access_allowed = false; bool audit = KBASE_SEC_FLAG_AUDIT & flags; KBASE_DEBUG_ASSERT(NULL != kctx); CSTD_UNUSED(kctx); /* Detect unsupported flags */ KBASE_DEBUG_ASSERT(((~KBASE_SEC_FLAG_MASK) & flags) == 0); /* Determine if access is allowed for the given cap */ switch (cap) { case KBASE_SEC_MODIFY_PRIORITY: case KBASE_SEC_INSTR_HW_COUNTERS_COLLECT: /* Access is granted only if the caller is privileged */ access_allowed = kbasep_am_i_root(); break; } /* Report problem if requested */ if (!access_allowed && audit) dev_warn(kctx->kbdev->dev, "Security capability failure: %d, %p", cap, (void *)kctx); return access_allowed; }
static enum hrtimer_restart kbasep_pm_do_shader_poweroff_callback(struct hrtimer *timer) { kbase_device *kbdev; unsigned long flags; kbdev = container_of(timer, kbase_device, pm.shader_poweroff_timer); spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); if (kbdev->pm.shader_poweroff_pending != 0) { u64 prev_shader_state = kbdev->pm.desired_shader_state; mali_bool cores_are_available; kbdev->pm.desired_shader_state &= ~kbdev->pm.shader_poweroff_pending; kbdev->pm.shader_poweroff_pending = 0; if (prev_shader_state != kbdev->pm.desired_shader_state || kbdev->pm.ca_in_transition != MALI_FALSE) { KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START); cores_are_available = kbase_pm_check_transitions_nolock(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END); } /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); } spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); return HRTIMER_NORESTART; }
void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev) { u64 desired_bitmap; mali_bool cores_are_available; lockdep_assert_held(&kbdev->pm.power_change_lock); if (kbdev->pm.pm_current_policy == NULL) return; desired_bitmap = kbdev->pm.pm_current_policy->get_core_mask(kbdev); desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev); /* Enable core 0 if tiler required, regardless of core availability */ if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0) desired_bitmap |= 1; if (kbdev->pm.desired_shader_state != desired_bitmap) KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u, (u32)desired_bitmap); /* Are any cores being powered on? */ if (~kbdev->pm.desired_shader_state & desired_bitmap || kbdev->pm.ca_in_transition != MALI_FALSE) { /* Check if we are powering off any cores before updating shader state */ if (kbdev->pm.desired_shader_state & ~desired_bitmap) { /* Start timer to power off cores */ kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap); kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks; } kbdev->pm.desired_shader_state = desired_bitmap; /* If any cores are being powered on, transition immediately */ cores_are_available = kbase_pm_check_transitions_nolock(kbdev); } else if (kbdev->pm.desired_shader_state & ~desired_bitmap) { /* Start timer to power off cores */ kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap); kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks; } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 && kbdev->pm.poweroff_timer_running) { /* If power policy is keeping cores on despite there being no active contexts * then disable poweroff timer as it isn't required */ kbdev->pm.poweroff_timer_running = MALI_FALSE; hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer); } /* Ensure timer does not power off wanted cores and make sure to power off unwanted cores */ if (kbdev->pm.shader_poweroff_pending != 0) { kbdev->pm.shader_poweroff_pending &= ~(kbdev->pm.desired_shader_state & desired_bitmap); if (kbdev->pm.shader_poweroff_pending == 0) kbdev->pm.shader_poweroff_pending_time = 0; } /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); }
/* * The output flags should be a combination of the following values: * KBASE_REG_CPU_CACHED: CPU cache should be enabled */ u32 kbase_cache_enabled(u32 flags, u32 nr_pages) { u32 cache_flags = 0; CSTD_UNUSED(nr_pages); if (flags & BASE_MEM_CACHED_CPU) cache_flags |= KBASE_REG_CPU_CACHED; return cache_flags; }
static enum hrtimer_restart kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer) { kbase_device *kbdev; kbdev = container_of(timer, kbase_device, pm.gpu_poweroff_timer); /* It is safe for this call to do nothing if the work item is already queued. * The worker function will read the must up-to-date state of kbdev->pm.gpu_poweroff_pending * under lock. * * If a state change occurs while the worker function is processing, this * call will succeed as a work item can be requeued once it has started * processing. */ if (kbdev->pm.gpu_poweroff_pending) queue_work(kbdev->pm.gpu_poweroff_wq, &kbdev->pm.gpu_poweroff_work); if (kbdev->pm.shader_poweroff_pending) { unsigned long flags; spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); if (kbdev->pm.shader_poweroff_pending) { kbdev->pm.shader_poweroff_pending_time--; KBASE_DEBUG_ASSERT(kbdev->pm.shader_poweroff_pending_time >= 0); if (kbdev->pm.shader_poweroff_pending_time == 0) { u64 prev_shader_state = kbdev->pm.desired_shader_state; kbdev->pm.desired_shader_state &= ~kbdev->pm.shader_poweroff_pending; kbdev->pm.shader_poweroff_pending = 0; if (prev_shader_state != kbdev->pm.desired_shader_state || kbdev->pm.ca_in_transition != MALI_FALSE) { mali_bool cores_are_available; KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START); cores_are_available = kbase_pm_check_transitions_nolock(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END); /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); } } } spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); } hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time); return HRTIMER_RESTART; }
void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) { mali_bool runpool_state_changed; int i; /* Retain any existing attributes */ for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) { if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != MALI_FALSE) { /* The context is being scheduled in, so update the runpool with the new attributes */ runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i); /* We don't need to know about state changed, because retaining a * context occurs on scheduling it, and that itself will also try * to run new atoms */ CSTD_UNUSED(runpool_state_changed); } } }
static void dvfs_callback(void *data) { kbase_device *kbdev; kbase_pm_dvfs_action action; osk_error ret; OSK_ASSERT(data != NULL); kbdev = (kbase_device*)data; #ifdef CONFIG_VITHAR_DVFS CSTD_UNUSED(action); kbase_platform_dvfs_event(kbdev, kbase_pm_get_dvfs_utilisation(kbdev)); #else action = kbase_pm_get_dvfs_action(kbdev); switch(action) { case KBASE_PM_DVFS_NOP: break; case KBASE_PM_DVFS_CLOCK_UP: /* Do whatever is required to increase the clock frequency */ break; case KBASE_PM_DVFS_CLOCK_DOWN: /* Do whatever is required to decrease the clock frequency */ break; } #endif osk_spinlock_irq_lock(&kbdev->pm.metrics.lock); if (kbdev->pm.metrics.timer_active) { ret = osk_timer_start(&kbdev->pm.metrics.timer, KBASE_PM_DVFS_FREQUENCY); if (ret != OSK_ERR_NONE) { /* Handle the situation where the timer cannot be restarted */ } } osk_spinlock_irq_unlock(&kbdev->pm.metrics.lock); }
void kbase_pm_do_poweroff(kbase_device *kbdev) { unsigned long flags; mali_bool cores_are_available; lockdep_assert_held(&kbdev->pm.lock); spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); /* Force all cores off */ kbdev->pm.desired_shader_state = 0; /* Force all cores to be unavailable, in the situation where * transitions are in progress for some cores but not others, * and kbase_pm_check_transitions_nolock can not immediately * power off the cores */ kbdev->shader_available_bitmap = 0; kbdev->tiler_available_bitmap = 0; kbdev->l2_available_bitmap = 0; KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START); cores_are_available = kbase_pm_check_transitions_nolock(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END); /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); /* NOTE: We won't wait to reach the core's desired state, even if we're * powering off the GPU itself too. It's safe to cut the power whilst * they're transitioning to off, because the cores should be idle and all * cache flushes should already have occurred */ /* Consume any change-state events */ kbase_timeline_pm_check_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); /* Disable interrupts and turn the clock off */ kbase_pm_clock_off(kbdev); }
void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) { mali_bool runpool_state_changed = MALI_FALSE; base_jd_core_req core_req; KBASE_DEBUG_ASSERT(katom); core_req = katom->core_req; if (core_req & BASE_JD_REQ_ONLY_COMPUTE) runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE); else runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE); if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) { /* Atom that can run on slot1 or slot2, and can use all cores */ runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES); } /* We don't need to know about state changed, because retaining an * atom occurs on adding it, and that itself will also try to run * new atoms */ CSTD_UNUSED(runpool_state_changed); }
/** * Terminate the demand power policy. * * This frees the resources that were allocated by @ref demand_init. * * @param kbdev The kbase device structure for the device */ static void demand_term(kbase_device *kbdev) { CSTD_UNUSED(kbdev); }
/** Terminate the always_on power policy * * This frees the resources that were allocated by @ref always_on_init. * * @param kbdev The kbase device structure for the device */ static void always_on_term(kbase_device *kbdev) { CSTD_UNUSED(kbdev); }
static void coarse_demand_term(struct kbase_device *kbdev) { CSTD_UNUSED(kbdev); }
static void fixed_term(struct kbase_device *kbdev) { CSTD_UNUSED(kbdev); }
void oskp_validate_format_string(const char *format, ...) { #if MALI_DEBUG char c; static const char *supported[] = { "d", "ld", "lld", "x", "lx", "llx", "X", "lX", "llX", "u", "lu", "llu", "p", "c", "s", }; static const unsigned char sizes[] = { 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 1, 1 }; unsigned int i; /* %[flags][width][.precision][length]specifier */ while ( (c = *format++) ) { if (c == '%') { c = *format; if (c == '\0') { /* Unsupported format */ OSK_PRINT_WARN(OSK_OSK, "OSK Format specification not complete (%% not followed by anything)\n"); return; } else if (c != '%') { /* Skip to the [length]specifier part assuming it starts with * an alphabetic character and flags, width, precision do not * contain alphabetic characters. */ do { if ((c >= 'a' && c <= 'z') || c == 'X') { /* Match supported formats with current position in format string */ for (i = 0; i < NELEMS(supported); i++) { if (strncmp(format, supported[i], sizes[i]) == 0) { /* Supported format */ break; } } if (i == NELEMS(supported)) { /* Unsupported format */ OSK_PRINT_WARN(OSK_OSK, "OSK Format string specifier not supported (starting at '%s')\n", format); return; } /* Start looking for next '%' */ break; } } while ( (c = *++format) ); } } } #else CSTD_UNUSED(format); #endif }
static void fixed_update_core_status(struct kbase_device *kbdev, u64 cores_ready, u64 cores_transitioning) { CSTD_UNUSED(kbdev); CSTD_UNUSED(cores_ready); CSTD_UNUSED(cores_transitioning); }
static void demand_init(struct kbase_device *kbdev) { CSTD_UNUSED(kbdev); }
static void always_on_init(struct kbase_device *kbdev) { CSTD_UNUSED(kbdev); }
/** * Fast start a higher priority job when the runpool is full and contains a * non-running lower priority job. * The evicted job will be returned to the policy queue. * * The following locking conditions are made on the caller: * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex. */ STATIC void kbasep_js_runpool_attempt_fast_start_ctx( kbase_device *kbdev, kbase_context *kctx ) { kbasep_js_device_data *js_devdata; kbasep_js_policy *js_policy; kbasep_js_per_as_data *js_per_as_data; int evict_as_nr; kbase_as *current_as; mali_bool nss_state_changed = MALI_FALSE; mali_bool is_runpool_full; OSK_ASSERT(kbdev != NULL); OSK_ASSERT(kctx != NULL); js_devdata = &kbdev->js_data; js_policy = &kbdev->js_data.policy; osk_mutex_lock(&js_devdata->runpool_mutex); /* If the runpool is full, attempt to fast start our context */ is_runpool_full = (mali_bool)(js_devdata->nr_contexts_running >= kbdev->nr_address_spaces); if(is_runpool_full != MALI_FALSE) { /* No free address spaces - attempt to evict non-running lower priority context */ osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock); for(evict_as_nr = 0; evict_as_nr < kbdev->nr_address_spaces; evict_as_nr++) { current_as = &kbdev->as[evict_as_nr]; js_per_as_data = &js_devdata->runpool_irq.per_as_data[evict_as_nr]; /* Look for the AS which is not currently running */ if(0 == js_per_as_data->as_busy_refcount) { kbase_context *kctx_evict = js_per_as_data->kctx; osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock); /* Now compare the scheduled priority we are considering evicting with the new ctx priority * and take into consideration if the scheduled priority is a realtime policy or not. * Note that the lower the number, the higher the priority */ if(((kctx_evict->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == MALI_FALSE) && kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy) || ((kctx_evict->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy) && (kctx_evict->jctx.sched_info.runpool.policy_ctx.cfs.bag_priority > kctx->jctx.sched_info.runpool.policy_ctx.cfs.bag_priority))) { /* Evict idle job in the runpool as priority is lower than new job */ osk_mutex_lock(¤t_as->transaction_mutex); osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock); /* Remove the context from the runpool policy list (policy_info->scheduled_ctxs_head) */ kbasep_js_policy_runpool_remove_ctx(js_policy, kctx_evict); /* Stop any more refcounts occuring on the context */ js_per_as_data->kctx = NULL; /* Prevent a context from submitting more jobs on this policy */ kbasep_js_clear_submit_allowed(js_devdata, kctx_evict); /* Disable the MMU on the affected address space, and indicate it's invalid */ kbase_mmu_disable(kctx_evict); kctx_evict->as_nr = KBASEP_AS_NR_INVALID; /* NSS handling */ nss_state_changed = kbasep_js_check_and_deref_nss_running_ctx(js_devdata, kctx_evict); CSTD_UNUSED(nss_state_changed); osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock); osk_mutex_unlock(¤t_as->transaction_mutex); /* Free up the address space */ js_devdata->as_free |= ((u16)(1u << evict_as_nr)); /* update book-keeping info */ --(js_devdata->nr_contexts_running); kctx_evict->jctx.sched_info.ctx.is_scheduled = MALI_FALSE; /* Signal any waiter that the context is not scheduled */ osk_waitq_set(&kctx_evict->jctx.sched_info.ctx.not_scheduled_waitq); osk_mutex_unlock(&js_devdata->runpool_mutex); /* Requeue onto the policy queue */ OSK_PRINT_INFO(OSK_BASE_JM, "JS: Requeue Context %p", kctx_evict); osk_mutex_lock(&js_devdata->queue_mutex); kbasep_js_policy_enqueue_ctx(js_policy, kctx_evict); osk_mutex_unlock(&js_devdata->queue_mutex); /* ctx fast start has taken place */ return; } osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock); } } osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock); } /* ctx fast start has not taken place */ osk_mutex_unlock(&js_devdata->runpool_mutex); }
void kbasep_js_try_run_next_job_on_slot( kbase_device *kbdev, int js ) { kbasep_js_device_data *js_devdata; mali_bool has_job; mali_bool cores_ready; OSK_ASSERT( kbdev != NULL ); js_devdata = &kbdev->js_data; #if BASE_HW_ISSUE_7347 for(js = 0; js < kbdev->nr_job_slots; js++) { #endif kbase_job_slot_lock(kbdev, js); /* Keep submitting while there's space to run a job on this job-slot, * and there are jobs to get that match its requirements (see 'break' * statement below) */ if ( kbasep_jm_is_submit_slots_free( kbdev, js, NULL ) != MALI_FALSE ) { /* Only lock the Run Pool whilst there's work worth doing */ osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock ); /* The caller of this function may not be aware of NSS status changes so we * must recheck if the given slot is still valid. Otherwise do not try to run. */ if (kbase_js_can_run_job_on_slot_no_lock( js_devdata, js)) { do { kbase_jd_atom *dequeued_atom; /* Dequeue a job that matches the requirements */ has_job = kbasep_js_policy_dequeue_job( kbdev, js, &dequeued_atom ); if ( has_job != MALI_FALSE ) { /* NOTE: since the runpool_irq lock is currently held and acts across * all address spaces, any context whose busy refcount has reached * zero won't yet be scheduled out whilst we're trying to run jobs * from it */ kbase_context *parent_ctx = dequeued_atom->kctx; mali_bool retain_success; /* Retain/power up the cores it needs, check if cores are ready */ cores_ready = kbasep_js_job_check_ref_cores( kbdev, js, dequeued_atom ); if ( cores_ready != MALI_TRUE ) { /* The job can't be submitted until the cores are ready */ break; } /* ASSERT that the Policy picked a job from an allowed context */ OSK_ASSERT( kbasep_js_is_submit_allowed( js_devdata, parent_ctx) ); /* Retain the context to stop it from being scheduled out * This is released when the job finishes */ retain_success = kbasep_js_runpool_retain_ctx_nolock( kbdev, parent_ctx ); OSK_ASSERT( retain_success != MALI_FALSE ); CSTD_UNUSED( retain_success ); /* Check if this job needs the cycle counter enabled before submission */ kbasep_js_ref_permon_check_and_enable_cycle_counter( kbdev, dequeued_atom ); /* Submit the job */ kbase_job_submit_nolock( kbdev, dequeued_atom, js ); } } while ( kbasep_jm_is_submit_slots_free( kbdev, js, NULL ) != MALI_FALSE && has_job != MALI_FALSE ); } osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock ); } kbase_job_slot_unlock(kbdev, js); #if BASE_HW_ISSUE_7347 } #endif }
/* * Note: this function is quite similar to kbasep_js_try_run_next_job_on_slot() */ mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock( kbase_device *kbdev, int js, s8 *submit_count ) { kbasep_js_device_data *js_devdata; mali_bool tried_to_dequeue_jobs_but_failed = MALI_FALSE; mali_bool cores_ready; OSK_ASSERT( kbdev != NULL ); js_devdata = &kbdev->js_data; #if BASE_HW_ISSUE_7347 for(js = 0; js < kbdev->nr_job_slots; js++) { #endif /* The caller of this function may not be aware of NSS status changes so we * must recheck if the given slot is still valid. Otherwise do not try to run. */ if (kbase_js_can_run_job_on_slot_no_lock( js_devdata, js)) { /* Keep submitting while there's space to run a job on this job-slot, * and there are jobs to get that match its requirements (see 'break' * statement below) */ while ( *submit_count < KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ && kbasep_jm_is_submit_slots_free( kbdev, js, NULL ) != MALI_FALSE ) { kbase_jd_atom *dequeued_atom; mali_bool has_job = MALI_FALSE; /* Dequeue a job that matches the requirements */ has_job = kbasep_js_policy_dequeue_job_irq( kbdev, js, &dequeued_atom ); if ( has_job != MALI_FALSE ) { /* NOTE: since the runpool_irq lock is currently held and acts across * all address spaces, any context whose busy refcount has reached * zero won't yet be scheduled out whilst we're trying to run jobs * from it */ kbase_context *parent_ctx = dequeued_atom->kctx; mali_bool retain_success; /* Retain/power up the cores it needs, check if cores are ready */ cores_ready = kbasep_js_job_check_ref_cores( kbdev, js, dequeued_atom ); if ( cores_ready != MALI_TRUE ) { /* The job can't be submitted until the cores are ready */ break; } /* ASSERT that the Policy picked a job from an allowed context */ OSK_ASSERT( kbasep_js_is_submit_allowed( js_devdata, parent_ctx) ); /* Retain the context to stop it from being scheduled out * This is released when the job finishes */ retain_success = kbasep_js_runpool_retain_ctx_nolock( kbdev, parent_ctx ); OSK_ASSERT( retain_success != MALI_FALSE ); CSTD_UNUSED( retain_success ); /* Check if this job needs the cycle counter enabled before submission */ kbasep_js_ref_permon_check_and_enable_cycle_counter( kbdev, dequeued_atom ); /* Submit the job */ kbase_job_submit_nolock( kbdev, dequeued_atom, js ); ++(*submit_count); } else { tried_to_dequeue_jobs_but_failed = MALI_TRUE; /* No more jobs - stop submitting for this slot */ break; } } } #if BASE_HW_ISSUE_7347 } #endif /* Indicate whether a retry in submission should be tried on a different * dequeue function. These are the reasons why it *must* happen: * * - kbasep_js_policy_dequeue_job_irq() couldn't get any jobs. In this case, * kbasep_js_policy_dequeue_job() might be able to get jobs (must be done * outside of IRQ) * - kbasep_js_policy_dequeue_job_irq() got some jobs, but failed to get a * job in the last call to it. Again, kbasep_js_policy_dequeue_job() * might be able to get jobs. * - the KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ threshold was reached * and new scheduling must be performed outside of IRQ mode. * * Failure to indicate this correctly could stop further jobs being processed. * * However, we do not _need_ to indicate a retry for the following: * - kbasep_jm_is_submit_slots_free() was MALI_FALSE, indicating jobs were * already running. When those jobs complete, that will still cause events * that cause us to resume job submission. * - kbase_js_can_run_job_on_slot_no_lock() was MALI_FALSE - this is for * NSS handling. That _can_ change outside of IRQ context, but is handled * explicitly by kbasep_js_remove_job() and kbasep_js_runpool_release_ctx(). */ return (mali_bool)(tried_to_dequeue_jobs_but_failed || *submit_count >= KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ); }
static void fast_start_term(struct kbase_device *kbdev) { CSTD_UNUSED(kbdev); }
/** * Initialize the coarse_demand power policy * * @param kbdev The kbase device structure for the device */ static void coarse_demand_init(kbase_device *kbdev) { CSTD_UNUSED(kbdev); }