MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system) { MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system)); MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner); MALI_DEBUG_CODE(system->lock_owner = 0); _mali_osk_spinlock_irq_unlock(system->lock); }
MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void) { MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n")); #if defined(MALI_UPPER_HALF_SCHEDULING) _mali_osk_spinlock_irq_unlock(gp_scheduler_lock); #else _mali_osk_spinlock_unlock(gp_scheduler_lock); #endif /* defined(MALI_UPPER_HALF_SCHEDULING) */ }
void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid) { MALI_DEBUG_ASSERT_POINTER(spinlock); MALI_DEBUG_ASSERT_POINTER(spinlock->lock); MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner); --spinlock->counter; if (0 == spinlock->counter) { spinlock->owner = 0; MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__)); _mali_osk_spinlock_irq_unlock(spinlock->lock); } }
static void calculate_gpu_utilization(void *arg) { u64 time_now; u64 time_period; u32 leading_zeroes; u32 shift_val; u32 work_normalized_gpu; u32 work_normalized_gp; u32 work_normalized_pp; u32 period_normalized; u32 utilization_gpu; u32 utilization_gp; u32 utilization_pp; #if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) u32 window_render_fps; #endif _mali_osk_spinlock_irq_lock(time_data_lock); if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) { /* * No work done for this period * - No need to reschedule timer * - Report zero usage */ timer_running = MALI_FALSE; last_utilization_gpu = 0; last_utilization_gp = 0; last_utilization_pp = 0; _mali_osk_spinlock_irq_unlock(time_data_lock); if (NULL != mali_utilization_callback) { struct mali_gpu_utilization_data data = { 0, }; mali_utilization_callback(&data); } mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND); return; } time_now = _mali_osk_time_get_ns(); time_period = time_now - period_start_time; /* If we are currently busy, update working period up to now */ if (work_start_time_gpu != 0) { accumulated_work_time_gpu += (time_now - work_start_time_gpu); work_start_time_gpu = time_now; /* GP and/or PP will also be busy if the GPU is busy at this point */ if (work_start_time_gp != 0) { accumulated_work_time_gp += (time_now - work_start_time_gp); work_start_time_gp = time_now; } if (work_start_time_pp != 0) { accumulated_work_time_pp += (time_now - work_start_time_pp); work_start_time_pp = time_now; } } /* * We have two 64-bit values, a dividend and a divisor. * To avoid dependencies to a 64-bit divider, we shift down the two values * equally first. * We shift the dividend up and possibly the divisor down, making the result X in 256. */ /* Shift the 64-bit values down so they fit inside a 32-bit integer */ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32)); shift_val = 32 - leading_zeroes; work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val); work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val); work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val); period_normalized = (u32)(time_period >> shift_val); /* * Now, we should report the usage in parts of 256 * this means we must shift up the dividend or down the divisor by 8 * (we could do a combination, but we just use one for simplicity, * but the end result should be good enough anyway) */ if (period_normalized > 0x00FFFFFF) { /* The divisor is so big that it is safe to shift it down */ period_normalized >>= 8; } else {
static void mali_pm_state_unlock(void) { _mali_osk_spinlock_irq_unlock(pm_lock_state); }
static void mali_pm_domain_unlock(struct mali_pm_domain *domain) { _mali_osk_spinlock_irq_unlock(domain->lock); }