static void mali_gp_scheduler_schedule_on_group(struct mali_group *group) { struct mali_gp_job *job; MALI_DEBUG_ASSERT_LOCK_HELD(group->lock); MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock); if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue)) { mali_gp_scheduler_unlock(); MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n", pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0)); return; /* Nothing to do, so early out */ } /* Get (and remove) next job in queue */ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list); _mali_osk_list_del(&job->list); /* Mark slot as busy */ slot.state = MALI_GP_SLOT_STATE_WORKING; mali_gp_scheduler_unlock(); MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job)); if (_MALI_OSK_ERR_OK != mali_group_start_gp_job(slot.group, job)) { MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n")); MALI_DEBUG_ASSERT(0); /* this cant fail on Mali-300+, no need to implement put back of job */ } }
/* session->memory_lock must be held when calling this function */ static void mali_mem_release(mali_mem_allocation *descriptor) { MALI_DEBUG_ASSERT_POINTER(descriptor); MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock); MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic); switch (descriptor->type) { case MALI_MEM_OS: mali_mem_os_release(descriptor); break; case MALI_MEM_DMA_BUF: #if defined(CONFIG_DMA_SHARED_BUFFER) mali_mem_dma_buf_release(descriptor); #endif break; case MALI_MEM_UMP: #if defined(CONFIG_MALI400_UMP) mali_mem_ump_release(descriptor); #endif break; case MALI_MEM_EXTERNAL: mali_mem_external_release(descriptor); break; case MALI_MEM_BLOCK: mali_mem_block_release(descriptor); break; default: MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", descriptor->type)); break; } }
/** @brief Callback function that releases memory * * session->memory_lock must be held when calling this function. */ static void descriptor_table_cleanup_callback(int descriptor_id, void *map_target) { mali_mem_allocation *descriptor; descriptor = (mali_mem_allocation *)map_target; MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock); MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target)); MALI_DEBUG_ASSERT(descriptor); mali_mem_release(descriptor); mali_mem_descriptor_destroy(descriptor); }
/* Group and scheduler must be locked when entering this function. Both will be unlocked before * exiting. */ static void mali_gp_scheduler_schedule_internal_and_unlock(void) { struct mali_gp_job *job = NULL; MALI_DEBUG_ASSERT_LOCK_HELD(slot.group->lock); MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock); if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || (_mali_osk_list_empty(&job_queue) && _mali_osk_list_empty(&job_queue_high))) { mali_gp_scheduler_unlock(); mali_group_unlock(slot.group); MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n", pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0)); #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(), 0, 0, 0); #endif return; /* Nothing to do, so early out */ } /* Get next job in queue */ if (!_mali_osk_list_empty(&job_queue_high)) { job = _MALI_OSK_LIST_ENTRY(job_queue_high.next, struct mali_gp_job, list); } else {
MALI_STATIC_INLINE void mali_gp_scheduler_assert_locked(void) { MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock); }
static void mali_pm_domain_power_up(u32 power_up_mask, struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS], u32 *num_groups_up, struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES], u32 *num_l2_up) { u32 domain_bit; u32 notify_mask = power_up_mask; MALI_DEBUG_ASSERT(0 != power_up_mask); MALI_DEBUG_ASSERT_POINTER(groups_up); MALI_DEBUG_ASSERT_POINTER(num_groups_up); MALI_DEBUG_ASSERT(0 == *num_groups_up); MALI_DEBUG_ASSERT_POINTER(l2_up); MALI_DEBUG_ASSERT_POINTER(num_l2_up); MALI_DEBUG_ASSERT(0 == *num_l2_up); MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state); MALI_DEBUG_PRINT(5, ("PM update: Powering up domains: . [%s]\n", mali_pm_mask_to_string(power_up_mask))); pd_mask_current |= power_up_mask; domain_bit = _mali_osk_fls(notify_mask); while (0 != domain_bit) { u32 domain_id = domain_bit - 1; struct mali_pm_domain *domain = mali_pm_domain_get_from_index( domain_id); struct mali_l2_cache_core *l2_cache; struct mali_l2_cache_core *l2_cache_tmp; struct mali_group *group; struct mali_group *group_tmp; /* Mark domain as powered up */ mali_pm_domain_set_power_on(domain, MALI_TRUE); /* * Make a note of the L2 and/or group(s) to notify * (need to release the PM state lock before doing so) */ _MALI_OSK_LIST_FOREACHENTRY(l2_cache, l2_cache_tmp, mali_pm_domain_get_l2_cache_list( domain), struct mali_l2_cache_core, pm_domain_list) { MALI_DEBUG_ASSERT(*num_l2_up < MALI_MAX_NUMBER_OF_L2_CACHE_CORES); l2_up[*num_l2_up] = l2_cache; (*num_l2_up)++; } _MALI_OSK_LIST_FOREACHENTRY(group, group_tmp, mali_pm_domain_get_group_list(domain), struct mali_group, pm_domain_list) { MALI_DEBUG_ASSERT(*num_groups_up < MALI_MAX_NUMBER_OF_GROUPS); groups_up[*num_groups_up] = group; (*num_groups_up)++; }
void mali_group_assert_locked(struct mali_group *group) { MALI_DEBUG_ASSERT_LOCK_HELD(group->lock); }