int mali_voltage_lock_pop(void) { if (_mali_osk_atomic_read(&voltage_lock_status) <= 0) { MALI_PRINT(("gpu voltage lock status is not valid for pop\n")); return -1; } return _mali_osk_atomic_dec_return(&voltage_lock_status); }
int mali_dvfs_bottom_lock_pop(void) { if (_mali_osk_atomic_read(&bottomlock_status) <= 0) { MALI_PRINT(("gpu bottom lock status is not valid for pop")); return -1; } return _mali_osk_atomic_dec_return(&bottomlock_status); }
int mali_dvfs_bottom_lock_pop(void) { int prev_status = _mali_osk_atomic_read(&bottomlock_status); if (prev_status <= 0) { MALI_PRINT(("gpu bottom lock status is not valid for pop\n")); return -1; } else if (prev_status == 1) { bottom_lock_step = 0; MALI_PRINT(("gpu bottom lock release\n")); } return _mali_osk_atomic_dec_return(&bottomlock_status); }
/* Can run in atomic context */ void _mali_osk_pm_dev_ref_dec(void) { #ifdef CONFIG_PM_RUNTIME MALI_DEBUG_ASSERT_POINTER(mali_platform_device); _mali_osk_atomic_dec(&mali_pm_ref_count); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) pm_runtime_mark_last_busy(&(mali_platform_device->dev)); pm_runtime_put_autosuspend(&(mali_platform_device->dev)); #else pm_runtime_put(&(mali_platform_device->dev)); #endif MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count))); #else /// CONFIG_PM_RUNTIME if(_mali_osk_atomic_dec_return(&mali_pm_ref_count) == 0) { if (NULL != pm_timer) { _mali_osk_timer_mod(pm_timer, _mali_osk_time_mstoticks(mali_pm_wq ? 15 : 3000)); } else { #if MALI_LICENSE_IS_GPL if (mali_pm_wq) { queue_work(mali_pm_wq, &mali_pm_wq_work_handle); } else { MALI_PRINTF(("mali_pm_wq is NULL !!!\n")); mali_bottom_half_pm(NULL); } #else schedule_work(&mali_pm_wq_work_handle); #endif } } MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count))); #endif }
void mali_soft_job_destroy(struct mali_soft_job *job) { MALI_DEBUG_ASSERT_POINTER(job); MALI_DEBUG_ASSERT_POINTER(job->system); MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job)); if (NULL != job) { if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return; _mali_osk_atomic_term(&job->refcount); if (NULL != job->activated_notification) { _mali_osk_notification_delete(job->activated_notification); job->activated_notification = NULL; } mali_soft_job_system_free_job(job->system, job); } }
/* Can run in atomic context */ void _mali_osk_pm_dev_ref_dec(void) { #ifdef CONFIG_PM_RUNTIME MALI_DEBUG_ASSERT_POINTER(mali_platform_device); _mali_osk_atomic_dec(&mali_pm_ref_count); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) pm_runtime_mark_last_busy(&(mali_platform_device->dev)); pm_runtime_put_autosuspend(&(mali_platform_device->dev)); #else pm_runtime_put(&(mali_platform_device->dev)); #endif MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count))); #else /// CONFIG_PM_RUNTIME if(_mali_osk_atomic_dec_return(&mali_pm_ref_count) == 0) { _mali_osk_timer_mod(pm_timer, _mali_osk_time_mstoticks(mali_pm_wq ? 15 : 3000)); } MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count))); #endif }
/* Can run in atomic context */ void _mali_osk_pm_dev_ref_put(void) { #ifdef CONFIG_PM_RUNTIME MALI_DEBUG_ASSERT_POINTER(mali_platform_device); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) pm_runtime_mark_last_busy(&(mali_platform_device->dev)); pm_runtime_put_autosuspend(&(mali_platform_device->dev)); #else pm_runtime_put(&(mali_platform_device->dev)); #endif #else if(_mali_osk_atomic_dec_return(&mtk_mali_pm_ref_count) == 0) { if (mtk_mali_pm_wq) { queue_work(mtk_mali_pm_wq, &mtk_mali_pm_wq_work_handle); } else { MTK_mali_bottom_half_pm_suspend(NULL); } } #endif }
/** * Check if fence has been signaled. * * @param system Timeline system. * @param fence Timeline fence. * @return MALI_TRUE if fence is signaled, MALI_FALSE if not. */ static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence) { int i; u32 tid = _mali_osk_get_tid(); mali_bool ret = MALI_TRUE; #if defined(CONFIG_SYNC) struct sync_fence *sync_fence = NULL; #endif MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); mali_spinlock_reentrant_wait(system->spinlock, tid); for (i = 0; i < MALI_TIMELINE_MAX; ++i) { struct mali_timeline *timeline; mali_timeline_point point; point = fence->points[i]; if (likely(MALI_TIMELINE_NO_POINT == point)) { /* Fence contains no point on this timeline. */ continue; } timeline = system->timelines[i]; MALI_DEBUG_ASSERT_POINTER(timeline); if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next)); } if (!mali_timeline_is_point_released(timeline, point)) { ret = MALI_FALSE; goto exit; } } #if defined(CONFIG_SYNC) if (-1 != fence->sync_fd) { sync_fence = sync_fence_fdget(fence->sync_fd); if (likely(NULL != sync_fence)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) if (0 == sync_fence->status) { #else if (0 == atomic_read(&sync_fence->status)) { #endif ret = MALI_FALSE; } } else { MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd)); } } #endif /* defined(CONFIG_SYNC) */ exit: mali_spinlock_reentrant_signal(system->spinlock, tid); #if defined(CONFIG_SYNC) if (NULL != sync_fence) { sync_fence_put(sync_fence); } #endif /* defined(CONFIG_SYNC) */ return ret; } mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout) { struct mali_timeline_fence_wait_tracker *wait; mali_timeline_point point; mali_bool ret; MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n")); if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) { return mali_timeline_fence_wait_check_status(system, fence); } wait = mali_timeline_fence_wait_tracker_alloc(); if (unlikely(NULL == wait)) { MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n")); return MALI_FALSE; } wait->activated = MALI_FALSE; wait->system = system; /* Initialize refcount to two references. The reference first will be released by this * function after the wait is over. The second reference will be released when the tracker * is activated. */ _mali_osk_atomic_init(&wait->refcount, 2); /* Add tracker to timeline system, but not to a timeline. */ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait); point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE); MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); MALI_IGNORE(point); /* Wait for the tracker to be activated or time out. */ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) { _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait); } else { _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout); } ret = wait->activated; if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } return ret; } void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait) { mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; MALI_DEBUG_ASSERT_POINTER(wait); MALI_DEBUG_ASSERT_POINTER(wait->system); MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n")); MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated); wait->activated = MALI_TRUE; _mali_osk_wait_queue_wake_up(wait->system->wait_queue); /* Nothing can wait on this tracker, so nothing to schedule after release. */ schedule_mask = mali_timeline_tracker_release(&wait->tracker); MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); MALI_IGNORE(schedule_mask); if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } }