static mali_bool kbasep_validate_gpu_clock_freq(kbase_device *kbdev, const kbase_attribute *attributes) { uintptr_t freq_min = kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN); uintptr_t freq_max = kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX); if ((freq_min > MAX_GPU_ALLOWED_FREQ_KHZ) || (freq_min < MIN_GPU_ALLOWED_FREQ_KHZ) || (freq_max > MAX_GPU_ALLOWED_FREQ_KHZ) || (freq_max < MIN_GPU_ALLOWED_FREQ_KHZ) || (freq_min > freq_max)) { KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "Invalid GPU frequencies found in configuration: min=%ldkHz, max=%ldkHz.", freq_min, freq_max); return MALI_FALSE; } return MALI_TRUE; }
mali_error kbase_cpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_cpuprops * const kbase_props) { unsigned int max_cpu_freq; kbase_props->props.cpu_l1_dcache_line_size_log2 = L1_DCACHE_LINE_SIZE_LOG2; kbase_props->props.cpu_l1_dcache_size = L1_DCACHE_SIZE; kbase_props->props.cpu_flags = BASE_CPU_PROPERTY_FLAG_LITTLE_ENDIAN; kbase_props->props.nr_cores = NR_CPUS; kbase_props->props.cpu_page_size_log2 = PAGE_SHIFT; kbase_props->props.available_memory_size = totalram_pages << PAGE_SHIFT; kbasep_cpuprops_uk_get_cpu_id_info(kbase_props); /* check if kernel supports dynamic frequency scaling */ max_cpu_freq = cpufreq_quick_get_max(KBASE_DEFAULT_CPU_NUM); if (max_cpu_freq != 0) { /* convert from kHz to mHz */ kbase_props->props.max_cpu_clock_speed_mhz = max_cpu_freq / 1000; } else { /* fallback if CONFIG_CPU_FREQ turned off */ int result; kbase_cpuprops_clock_speed_function kbase_cpuprops_uk_get_clock_speed; kbase_cpuprops_uk_get_clock_speed = (kbase_cpuprops_clock_speed_function) kbasep_get_config_value(kctx->kbdev, kctx->kbdev->config_attributes, KBASE_CONFIG_ATTR_CPU_SPEED_FUNC); result = kbase_cpuprops_uk_get_clock_speed(&kbase_props->props.max_cpu_clock_speed_mhz); if (result != 0) return MALI_ERROR_FUNCTION_FAILED; } return MALI_ERROR_NONE; }
mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops * const kbase_props) { kbase_gpuprops_clock_speed_function get_gpu_speed_mhz; u32 gpu_speed_mhz; int rc = 1; KBASE_DEBUG_ASSERT(NULL != kctx); KBASE_DEBUG_ASSERT(NULL != kbase_props); /* Current GPU speed is requested from the system integrator via the KBASE_CONFIG_ATTR_GPU_SPEED_FUNC function. * If that function fails, or the function is not provided by the system integrator, we report the maximum * GPU speed as specified by KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX. */ get_gpu_speed_mhz = (kbase_gpuprops_clock_speed_function) kbasep_get_config_value(kctx->kbdev, kctx->kbdev->config_attributes, KBASE_CONFIG_ATTR_GPU_SPEED_FUNC); if (get_gpu_speed_mhz != NULL) { rc = get_gpu_speed_mhz(&gpu_speed_mhz); #ifdef CONFIG_MALI_DEBUG /* Issue a warning message when the reported GPU speed falls outside the min/max range */ if (rc == 0) { u32 gpu_speed_khz = gpu_speed_mhz * 1000; if (gpu_speed_khz < kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min || gpu_speed_khz > kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max) KBASE_DEBUG_PRINT_WARN(KBASE_CORE, "GPU Speed is outside of min/max range (got %lu Khz, min %lu Khz, max %lu Khz)", (unsigned long)gpu_speed_khz, (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_min, (unsigned long)kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max); } #endif /* CONFIG_MALI_DEBUG */ } if (rc != 0) gpu_speed_mhz = kctx->kbdev->gpu_props.props.core_props.gpu_freq_khz_max / 1000; kctx->kbdev->gpu_props.props.core_props.gpu_speed_mhz = gpu_speed_mhz; memcpy(&kbase_props->props, &kctx->kbdev->gpu_props.props, sizeof(kbase_props->props)); return MALI_ERROR_NONE; }
void kbase_pm_register_access_disable(kbase_device *kbdev) { kbase_pm_callback_conf *callbacks; callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS); if (callbacks) callbacks->power_off_callback(kbdev); }
void kbasep_platform_device_term(kbase_device *kbdev) { kbase_platform_funcs_conf *platform_funcs; platform_funcs = (kbase_platform_funcs_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS); if (platform_funcs) { if (platform_funcs->platform_term_func) platform_funcs->platform_term_func(kbdev); } }
mali_bool kbasep_platform_device_init(kbase_device *kbdev) { kbase_platform_funcs_conf *platform_funcs; platform_funcs = (kbase_platform_funcs_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS); if (platform_funcs) { if (platform_funcs->platform_init_func) return platform_funcs->platform_init_func(kbdev); } return MALI_TRUE; }
mali_error kbase_pm_policy_init(kbase_device *kbdev) { KBASE_DEBUG_ASSERT(kbdev != NULL); kbdev->pm.gpu_poweroff_wq = alloc_workqueue("kbase_pm_do_poweroff", WQ_HIGHPRI | WQ_UNBOUND, 1); if (NULL == kbdev->pm.gpu_poweroff_wq) return MALI_ERROR_OUT_OF_MEMORY; INIT_WORK(&kbdev->pm.gpu_poweroff_work, kbasep_pm_do_gpu_poweroff_wq); hrtimer_init(&kbdev->pm.gpu_poweroff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); kbdev->pm.gpu_poweroff_timer.function = kbasep_pm_do_gpu_poweroff_callback; kbdev->pm.pm_current_policy = policy_list[0]; kbdev->pm.pm_current_policy->init(kbdev); kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_GPU_POWEROFF_TICK_NS)); kbdev->pm.poweroff_shader_ticks = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_SHADER); kbdev->pm.poweroff_gpu_ticks = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_GPU); return MALI_ERROR_NONE; }
mali_error kbase_pm_policy_init(kbase_device *kbdev) { KBASE_DEBUG_ASSERT(kbdev != NULL); kbdev->pm.gpu_poweroff_wq = alloc_workqueue("kbase_pm_do_poweroff", WQ_HIGHPRI, 1); if (NULL == kbdev->pm.gpu_poweroff_wq) return MALI_ERROR_OUT_OF_MEMORY; INIT_WORK(&kbdev->pm.gpu_poweroff_work, kbasep_pm_do_gpu_poweroff_wq); hrtimer_init(&kbdev->pm.shader_poweroff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); kbdev->pm.shader_poweroff_timer.function = kbasep_pm_do_shader_poweroff_callback; kbdev->pm.pm_current_policy = policy_list[2]; kbdev->pm.pm_current_policy->init(kbdev); kbdev->pm.shader_poweroff_time = HR_TIMER_DELAY_NSEC(kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_SHADER_POWEROFF_TIME) * 1000); #if SLSI_INTEGRATION kbdev->hwcnt.prev_policy = policy_list[2]; #endif return MALI_ERROR_NONE; }
mali_error kbase_pm_init(kbase_device *kbdev) { mali_error ret = MALI_ERROR_NONE; kbase_pm_callback_conf *callbacks; OSK_ASSERT(kbdev != NULL); kbdev->pm.gpu_powered = MALI_FALSE; atomic_set(&kbdev->pm.gpu_in_desired_state, MALI_TRUE); callbacks = (kbase_pm_callback_conf*) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS); if (callbacks) { kbdev->pm.callback_power_on = callbacks->power_on_callback; kbdev->pm.callback_power_off = callbacks->power_off_callback; kbdev->pm.callback_power_runtime_init = callbacks->power_runtime_init_callback; kbdev->pm.callback_power_runtime_term = callbacks->power_runtime_term_callback; kbdev->pm.callback_power_runtime_on = callbacks->power_runtime_on_callback; kbdev->pm.callback_power_runtime_off = callbacks->power_runtime_off_callback; } else { kbdev->pm.callback_power_on = NULL; kbdev->pm.callback_power_off = NULL; kbdev->pm.callback_power_runtime_init = NULL; kbdev->pm.callback_power_runtime_term = NULL; kbdev->pm.callback_power_runtime_on = NULL; kbdev->pm.callback_power_runtime_off = NULL; } /* Initialise the metrics subsystem */ ret = kbasep_pm_metrics_init(kbdev); if (MALI_ERROR_NONE != ret) { return ret; } init_waitqueue_head(&kbdev->pm.l2_powered_wait); kbdev->pm.l2_powered = 0; init_waitqueue_head(&kbdev->pm.power_state_wait); kbdev->pm.power_state = PM_POWER_STATE_TRANS; init_waitqueue_head(&kbdev->pm.no_outstanding_event_wait); kbdev->pm.no_outstanding_event = 1; /* Simulate failure to create the workqueue */ if(OSK_SIMULATE_FAILURE(OSK_BASE_PM)) { kbdev->pm.workqueue = NULL; goto workq_fail; } kbdev->pm.workqueue = alloc_workqueue("kbase_pm", WQ_NON_REENTRANT | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1); if (NULL == kbdev->pm.workqueue) { goto workq_fail; } spin_lock_init(&kbdev->pm.power_change_lock); spin_lock_init(&kbdev->pm.active_count_lock); spin_lock_init(&kbdev->pm.gpu_cycle_counter_requests_lock); spin_lock_init(&kbdev->pm.gpu_powered_lock); return MALI_ERROR_NONE; workq_fail: kbasep_pm_metrics_term(kbdev); return MALI_ERROR_FUNCTION_FAILED; }
mali_error kbasep_js_devdata_init( kbase_device *kbdev ) { kbasep_js_device_data *js_devdata; mali_error err; int i; u16 as_present; osk_error osk_err; OSK_ASSERT( kbdev != NULL ); js_devdata = &kbdev->js_data; as_present = (1U << kbdev->nr_address_spaces) - 1; OSK_ASSERT( js_devdata->init_status == JS_DEVDATA_INIT_NONE ); js_devdata->nr_contexts_running = 0; js_devdata->as_free = as_present; /* All ASs initially free */ js_devdata->runpool_irq.nr_nss_ctxs_running = 0; js_devdata->runpool_irq.nr_permon_jobs_submitted = 0; js_devdata->runpool_irq.submit_allowed = 0u; /* No ctx allowed to submit */ /* Config attributes */ js_devdata->scheduling_tick_ns = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS ); js_devdata->soft_stop_ticks = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS ); js_devdata->hard_stop_ticks_ss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS ); js_devdata->hard_stop_ticks_nss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS ); js_devdata->gpu_reset_ticks_ss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS ); js_devdata->gpu_reset_ticks_nss = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS ); js_devdata->ctx_timeslice_ns = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS ); js_devdata->cfs_ctx_runtime_init_slices = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES ); js_devdata->cfs_ctx_runtime_min_slices = (u32)kbasep_get_config_value( kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES ); OSK_PRINT_INFO( OSK_BASE_JM, "JS Config Attribs: " ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->scheduling_tick_ns:%d", js_devdata->scheduling_tick_ns ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->soft_stop_ticks:%d", js_devdata->soft_stop_ticks ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->hard_stop_ticks_ss:%d", js_devdata->hard_stop_ticks_ss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->hard_stop_ticks_nss:%d", js_devdata->hard_stop_ticks_nss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->gpu_reset_ticks_ss:%d", js_devdata->gpu_reset_ticks_ss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->gpu_reset_ticks_nss:%d", js_devdata->gpu_reset_ticks_nss ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->ctx_timeslice_ns:%d", js_devdata->ctx_timeslice_ns ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->cfs_ctx_runtime_init_slices:%d", js_devdata->cfs_ctx_runtime_init_slices ); OSK_PRINT_INFO( OSK_BASE_JM, "\tjs_devdata->cfs_ctx_runtime_min_slices:%d", js_devdata->cfs_ctx_runtime_min_slices ); #if MALI_BACKEND_KERNEL /* Only output on real kernel modules, otherwise it fills up multictx testing output */ #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 OSK_PRINT( OSK_BASE_JM, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%d at %dns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns ); #endif #if KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0 OSK_PRINT( OSK_BASE_JM, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%d at %dns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns ); #endif #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 && KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0 OSK_PRINT( OSK_BASE_JM, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing." ); #endif #endif /* MALI_BACKEND_KERNEL */ /* setup the number of irq throttle cycles base on given time */ { u32 irq_throttle_time_us = kbdev->gpu_props.irq_throttle_time_us; u32 irq_throttle_cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev, irq_throttle_time_us); osk_atomic_set( &kbdev->irq_throttle_cycles, irq_throttle_cycles); } /* Clear the AS data, including setting NULL pointers */ OSK_MEMSET( &js_devdata->runpool_irq.per_as_data[0], 0, sizeof(js_devdata->runpool_irq.per_as_data) ); for ( i = 0; i < kbdev->nr_job_slots; ++i ) { js_devdata->js_reqs[i] = core_reqs_from_jsn_features( kbdev->job_slot_features[i] ); } js_devdata->init_status |= JS_DEVDATA_INIT_CONSTANTS; /* On error, we could continue on: providing none of the below resources * rely on the ones above */ osk_err = osk_mutex_init( &js_devdata->runpool_mutex, OSK_LOCK_ORDER_JS_RUNPOOL ); if ( osk_err == OSK_ERR_NONE ) { js_devdata->init_status |= JS_DEVDATA_INIT_RUNPOOL_MUTEX; } osk_err = osk_mutex_init( &js_devdata->queue_mutex, OSK_LOCK_ORDER_JS_QUEUE ); if ( osk_err == OSK_ERR_NONE ) { js_devdata->init_status |= JS_DEVDATA_INIT_QUEUE_MUTEX; } osk_err = osk_spinlock_irq_init( &js_devdata->runpool_irq.lock, OSK_LOCK_ORDER_JS_RUNPOOL_IRQ ); if ( osk_err == OSK_ERR_NONE ) { js_devdata->init_status |= JS_DEVDATA_INIT_RUNPOOL_IRQ_LOCK; } err = kbasep_js_policy_init( kbdev ); if ( err == MALI_ERROR_NONE) { js_devdata->init_status |= JS_DEVDATA_INIT_POLICY; } /* On error, do no cleanup; this will be handled by the caller(s), since * we've designed this resource to be safe to terminate on init-fail */ if ( js_devdata->init_status != JS_DEVDATA_INIT_ALL) { return MALI_ERROR_FUNCTION_FAILED; } return MALI_ERROR_NONE; }
mali_error kbase_pm_init(kbase_device *kbdev) { mali_error ret = MALI_ERROR_NONE; kbase_pm_callback_conf *callbacks; KBASE_DEBUG_ASSERT(kbdev != NULL); mutex_init(&kbdev->pm.lock); kbdev->pm.gpu_powered = MALI_FALSE; kbdev->pm.suspending = MALI_FALSE; #ifdef CONFIG_MALI_DEBUG kbdev->pm.driver_ready_for_irqs = MALI_FALSE; #endif /* CONFIG_MALI_DEBUG */ kbdev->pm.gpu_in_desired_state = MALI_TRUE; init_waitqueue_head(&kbdev->pm.gpu_in_desired_state_wait); callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS); if (callbacks) { kbdev->pm.callback_power_on = callbacks->power_on_callback; kbdev->pm.callback_power_off = callbacks->power_off_callback; kbdev->pm.callback_power_runtime_init = callbacks->power_runtime_init_callback; kbdev->pm.callback_power_runtime_term = callbacks->power_runtime_term_callback; kbdev->pm.callback_power_runtime_on = callbacks->power_runtime_on_callback; kbdev->pm.callback_power_runtime_off = callbacks->power_runtime_off_callback; } else { kbdev->pm.callback_power_on = NULL; kbdev->pm.callback_power_off = NULL; kbdev->pm.callback_power_runtime_init = NULL; kbdev->pm.callback_power_runtime_term = NULL; kbdev->pm.callback_power_runtime_on = NULL; kbdev->pm.callback_power_runtime_off = NULL; } kbdev->pm.platform_dvfs_frequency = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_DVFS_FREQ); /* Initialise the metrics subsystem */ ret = kbasep_pm_metrics_init(kbdev); if (MALI_ERROR_NONE != ret) return ret; init_waitqueue_head(&kbdev->pm.l2_powered_wait); kbdev->pm.l2_powered = 0; init_waitqueue_head(&kbdev->pm.reset_done_wait); kbdev->pm.reset_done = MALI_FALSE; init_waitqueue_head(&kbdev->pm.zero_active_count_wait); kbdev->pm.active_count = 0; spin_lock_init(&kbdev->pm.power_change_lock); spin_lock_init(&kbdev->pm.gpu_cycle_counter_requests_lock); spin_lock_init(&kbdev->pm.gpu_powered_lock); if (MALI_ERROR_NONE != kbase_pm_ca_init(kbdev)) goto workq_fail; if (MALI_ERROR_NONE != kbase_pm_policy_init(kbdev)) goto pm_policy_fail; return MALI_ERROR_NONE; pm_policy_fail: kbase_pm_ca_term(kbdev); workq_fail: kbasep_pm_metrics_term(kbdev); return MALI_ERROR_FUNCTION_FAILED; }
mali_error kbase_pm_init(struct kbase_device *kbdev) { mali_error ret = MALI_ERROR_NONE; struct kbase_pm_callback_conf *callbacks; KBASE_DEBUG_ASSERT(kbdev != NULL); mutex_init(&kbdev->pm.lock); kbdev->pm.gpu_powered = MALI_FALSE; kbdev->pm.suspending = MALI_FALSE; #ifdef CONFIG_MALI_DEBUG kbdev->pm.driver_ready_for_irqs = MALI_FALSE; #endif /* CONFIG_MALI_DEBUG */ kbdev->pm.gpu_in_desired_state = MALI_TRUE; init_waitqueue_head(&kbdev->pm.gpu_in_desired_state_wait); callbacks = (struct kbase_pm_callback_conf *)kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS); if (callbacks) { kbdev->pm.callback_power_on = callbacks->power_on_callback; kbdev->pm.callback_power_off = callbacks->power_off_callback; kbdev->pm.callback_power_suspend = callbacks->power_suspend_callback; kbdev->pm.callback_power_resume = callbacks->power_resume_callback; kbdev->pm.callback_power_runtime_init = callbacks->power_runtime_init_callback; kbdev->pm.callback_power_runtime_term = callbacks->power_runtime_term_callback; kbdev->pm.callback_power_runtime_on = callbacks->power_runtime_on_callback; kbdev->pm.callback_power_runtime_off = callbacks->power_runtime_off_callback; } else { kbdev->pm.callback_power_on = NULL; kbdev->pm.callback_power_off = NULL; kbdev->pm.callback_power_suspend = NULL; kbdev->pm.callback_power_resume = NULL; kbdev->pm.callback_power_runtime_init = NULL; kbdev->pm.callback_power_runtime_term = NULL; kbdev->pm.callback_power_runtime_on = NULL; kbdev->pm.callback_power_runtime_off = NULL; } /* MTK GPU DVFS init */ _mtk_gpu_dvfs_init(); /* MTK Register input boost and power limit call back function */ mt_gpufreq_input_boost_notify_registerCB(mtk_gpu_input_boost_CB); mt_gpufreq_power_limit_notify_registerCB(mtk_gpu_power_limit_CB); /* Register gpu boost function to MTK HAL */ mtk_boost_gpu_freq_fp = mtk_kbase_boost_gpu_freq; mtk_custom_boost_gpu_freq_fp = mtk_kbase_custom_boost_gpu_freq; /* used for for performance service boost */ mtk_set_bottom_gpu_freq_fp = mtk_kbase_ged_bottom_gpu_freq; /* used for GED boost */ mtk_custom_get_gpu_freq_level_count_fp = mtk_kbase_custom_get_gpu_freq_level_count; /* MTK MET use */ mtk_get_gpu_loading_fp = kbasep_get_gl_utilization; kbdev->pm.platform_dvfs_frequency = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_DVFS_FREQ); /* Initialise the metrics subsystem */ ret = kbasep_pm_metrics_init(kbdev); if (MALI_ERROR_NONE != ret) return ret; init_waitqueue_head(&kbdev->pm.l2_powered_wait); kbdev->pm.l2_powered = 0; init_waitqueue_head(&kbdev->pm.reset_done_wait); kbdev->pm.reset_done = MALI_FALSE; init_waitqueue_head(&kbdev->pm.zero_active_count_wait); kbdev->pm.active_count = 0; spin_lock_init(&kbdev->pm.power_change_lock); spin_lock_init(&kbdev->pm.gpu_cycle_counter_requests_lock); spin_lock_init(&kbdev->pm.gpu_powered_lock); if (MALI_ERROR_NONE != kbase_pm_ca_init(kbdev)) goto workq_fail; if (MALI_ERROR_NONE != kbase_pm_policy_init(kbdev)) goto pm_policy_fail; return MALI_ERROR_NONE; pm_policy_fail: kbase_pm_ca_term(kbdev); workq_fail: kbasep_pm_metrics_term(kbdev); return MALI_ERROR_FUNCTION_FAILED; }