void mali_l2_cache_pause_all(mali_bool pause) { int i; struct mali_l2_cache_core * cache; u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores(); MALI_L2_STATUS status = MALI_L2_NORMAL; if (pause) { status = MALI_L2_PAUSE; } for (i = 0; i < num_cores; i++) { cache = mali_l2_cache_core_get_glob_l2_core(i); if (NULL != cache) { _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW); cache->mali_l2_status = status; _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); } } /* Change from pause, do the cache invalidation here to prevent any loss of cache ** operation during the pause period to make sure the SW status is consistent ** with L2 cache status */ if(!pause) { mali_l2_cache_invalidate_all(); } }
inline _mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count) { _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state != MALI_PROFILING_STATE_RUNNING) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } /* go into return state (user to retreive events), no more events will be added after this */ prof_state = MALI_PROFILING_STATE_RETURN; _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); /* wait for all elements to be completely inserted into array */ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written)) { /* do nothing */; } *count = _mali_osk_atomic_read(&profile_insert_index); return _MALI_OSK_ERR_OK; }
mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter) { u32 value = 0; /* disabled src */ mali_bool core_is_on; MALI_DEBUG_ASSERT_POINTER(cache); core_is_on = mali_l2_cache_lock_power_state(cache); _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); cache->counter_src1 = counter; if (MALI_HW_CORE_NO_COUNTER != counter) { value = counter; } if (MALI_TRUE == core_is_on) { mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value); } _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); mali_l2_cache_unlock_power_state(cache); return MALI_TRUE; }
_mali_osk_errcode_t _mali_ukk_transfer_sw_counters(_mali_uk_sw_counters_s *args) { /* Convert the DDK counter ID to what gator expects */ unsigned int gator_counter_value = 0; _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (args->id >= MALI_EGL_COUNTER_OFFSET && args->id <= MALI_SHARED_COUNTER_OFFSET) { gator_counter_value = (args->id - MALI_EGL_COUNTER_OFFSET) + GATOR_EGL_COUNTER_OFFSET; } else if (args->id >= MALI_GLES_COUNTER_OFFSET && args->id <= MALI_VG_COUNTER_OFFSET) { gator_counter_value = (args->id - MALI_GLES_COUNTER_OFFSET) + GATOR_GLES_COUNTER_OFFSET; } else { /* Pass it straight through; gator will ignore it anyway. */ gator_counter_value = args->id; } trace_mali_sw_counter(gator_counter_value, args->value); _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_OK; }
static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val) { int i = 0; const int loop_count = 100000; /* * Grab lock in order to send commands to the L2 cache in a serialized fashion. * The L2 cache will ignore commands if it is busy. */ _mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW); /* First, wait for L2 cache command handler to go idle */ for (i = 0; i < loop_count; i++) { if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) { break; } } if (i == loop_count) { _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW); MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n")); MALI_ERROR( _MALI_OSK_ERR_FAULT ); } /* then issue the command */ mali_hw_core_register_write(&cache->hw_core, reg, val); _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW); MALI_SUCCESS; }
mali_bool set_mali_dvfs_current_step(unsigned int step) { _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); maliDvfsStatus.currentStep = step; _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); return MALI_TRUE; }
UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_meminfo_set(ump_dd_handle memh, void* args) { ump_dd_mem * mem; ump_secure_id secure_id; DEBUG_ASSERT_POINTER(memh); secure_id = ump_dd_secure_id_get(memh); _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem)) { device.backend->set(mem, args); } else { _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); DBG_MSG(1, ("Failed to look up mapping in ump_meminfo_set(). ID: %u\n", (ump_secure_id)secure_id)); return UMP_DD_INVALID; } _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW); return UMP_DD_SUCCESS; }
void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1) { MALI_DEBUG_ASSERT(NULL != src0); MALI_DEBUG_ASSERT(NULL != value0); MALI_DEBUG_ASSERT(NULL != src1); MALI_DEBUG_ASSERT(NULL != value1); /* Caller must hold the PM lock and know that we are powered on */ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); if (MALI_L2_PAUSE == cache->mali_l2_status) { _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); return ; } *src0 = cache->counter_src0; *src1 = cache->counter_src1; if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0); } if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1); } _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); }
void mali_clk_set_rate(unsigned int clk, unsigned int mhz) { int err; unsigned long rate = (unsigned long)clk * (unsigned long)mhz; _mali_osk_lock_wait(mali_freq_lock, _MALI_OSK_LOCKMODE_RW); MALI_DEBUG_PRINT(3, ("Mali platform: Setting frequency to %d mhz\n", clk)); if (mali_clk_get() == MALI_FALSE) { _mali_osk_lock_signal(mali_freq_lock, _MALI_OSK_LOCKMODE_RW); return; } err = clk_set_rate(mali_clock, rate); if (err) MALI_PRINT_ERROR(("Failed to set Mali clock: %d\n", err)); rate = clk_get_rate(mali_clock); GPU_MHZ = mhz; mali_gpu_clk = rate / mhz; MALI_PRINT(("Mali freq %dMhz\n", rate / mhz)); mali_clk_put(MALI_FALSE); _mali_osk_lock_signal(mali_freq_lock, _MALI_OSK_LOCKMODE_RW); }
void mali_regulator_set_voltage(int min_uV, int max_uV) { int voltage; _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); if( IS_ERR_OR_NULL(g3d_regulator) ) { MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n")); return; } MALI_DEBUG_PRINT(2, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV)); #if MALI_TIMELINE_PROFILING_ENABLED _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS, min_uV, max_uV, 0, 0, 0); #endif regulator_set_voltage(g3d_regulator,min_uV,max_uV); voltage = regulator_get_voltage(g3d_regulator); #if MALI_TIMELINE_PROFILING_ENABLED _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS, voltage, 0, 1, 0, 0); #endif mali_gpu_vol = voltage; MALI_DEBUG_PRINT(1, ("= regulator_get_voltage: %d \n",mali_gpu_vol)); _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); }
inline _mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]) { _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state != MALI_PROFILING_STATE_RETURN) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } if (index >= _mali_osk_atomic_read(&profile_entries_written)) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_FAULT; } *timestamp = profile_entries[index].timestamp; *event_id = profile_entries[index].event_id; data[0] = profile_entries[index].data[0]; data[1] = profile_entries[index].data[1]; data[2] = profile_entries[index].data[2]; data[3] = profile_entries[index].data[3]; data[4] = profile_entries[index].data[4]; _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t mali_l2_cache_reset(struct mali_l2_cache_core *cache) { /* Invalidate cache (just to keep it in a known state at startup) */ mali_l2_cache_invalidate_all(cache); /* Enable cache */ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE); mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads); /* Restart any performance counters (if enabled) */ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0); } if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1); } _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_OK; }
static void calculate_gpu_utilization(void* arg) { u64 time_now; u64 time_period; u32 leading_zeroes; u32 shift_val; u32 work_normalized; u32 period_normalized; u32 utilization; _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW); if (accumulated_work_time == 0 && work_start_time == 0) { /* Don't reschedule timer, this will be started if new work arrives */ timer_running = MALI_FALSE; _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW); /* No work done for this period, report zero usage */ mali_gpu_utilization_handler(0); return; } time_now = _mali_osk_time_get_ns(); time_period = time_now - period_start_time; /* If we are currently busy, update working period up to now */ if (work_start_time != 0) { accumulated_work_time += (time_now - work_start_time); work_start_time = time_now; } /* * We have two 64-bit values, a dividend and a divisor. * To avoid dependencies to a 64-bit divider, we shift down the two values * equally first. * We shift the dividend up and possibly the divisor down, making the result X in 256. */ /* Shift the 64-bit values down so they fit inside a 32-bit integer */ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32)); shift_val = 32 - leading_zeroes; work_normalized = (u32)(accumulated_work_time >> shift_val); period_normalized = (u32)(time_period >> shift_val); /* * Now, we should report the usage in parts of 256 * this means we must shift up the dividend or down the divisor by 8 * (we could do a combination, but we just use one for simplicity, * but the end result should be good enough anyway) */ if (period_normalized > 0x00FFFFFF) { /* The divisor is so big that it is safe to shift it down */ period_normalized >>= 8; }
void mali_regulator_set_voltage(int min_uV, int max_uV) { int voltage; #if !MALI_DVFS_ENABLED min_uV = mali_gpu_vol; max_uV = mali_gpu_vol; #endif /* #if MALI_VOLTAGE_LOCK if (mali_vol_lock_flag == MALI_FALSE) { if (min_uV < MALI_BOTTOMLOCK_VOL || max_uV < MALI_BOTTOMLOCK_VOL) { min_uV = MALI_BOTTOMLOCK_VOL; max_uV = MALI_BOTTOMLOCK_VOL; } } else if (_mali_osk_atomic_read(&voltage_lock_status) > 0 ) { if (min_uV < mali_lock_vol || max_uV < mali_lock_vol) { #if MALI_DVFS_ENABLED int mali_vol_get; mali_vol_get = mali_vol_get_from_table(mali_lock_vol); if (mali_vol_get) { min_uV = mali_vol_get; max_uV = mali_vol_get; } #else min_uV = mali_lock_vol; max_uV = mali_lock_vol; #endif } } #endif */ _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); if( IS_ERR_OR_NULL(g3d_regulator) ) { MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n")); return; } MALI_DEBUG_PRINT(2, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV)); regulator_set_voltage(g3d_regulator,min_uV,max_uV); voltage = regulator_get_voltage(g3d_regulator); #if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED gVolt = voltage/1000; _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gFreq, gVolt, 0, 0, 0); #endif mali_gpu_vol = voltage; MALI_DEBUG_PRINT(1, ("= regulator_get_voltage: %d \n",mali_gpu_vol)); _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); }
MALI_STATIC_INLINE void mali_gp_scheduler_lock(void) { if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW)) { MALI_DEBUG_ASSERT(0); } MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n")); }
mali_bool set_mali_dvfs_current_step(unsigned int step) { _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); maliDvfsStatus.currentStep = step % MAX_MALI_DVFS_STEPS; if (step >= MAX_MALI_DVFS_STEPS) mali_runtime_resumed = maliDvfsStatus.currentStep; _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); return MALI_TRUE; }
void mali_group_lock(struct mali_group *group) { if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(group->lock, _MALI_OSK_LOCKMODE_RW)) { /* Non-interruptable lock failed: this should never happen. */ MALI_DEBUG_ASSERT(0); } MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group)); }
MALI_STATIC_INLINE void mali_gp_scheduler_lock(void) { if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW)) { /* Non-interruptable lock failed: this should never happen. */ MALI_DEBUG_ASSERT(0); } MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n")); }
void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor) { _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW); if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) { map->table->mappings[descriptor] = NULL; _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage); } _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW); }
MALI_STATIC_INLINE void mali_pp_scheduler_lock(void) { if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW)) { /* Non-interruptable lock failed: this should never happen. */ MALI_DEBUG_ASSERT(0); } MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken\n")); MALI_DEBUG_ASSERT(0 == pp_scheduler_lock_owner); MALI_DEBUG_CODE(pp_scheduler_lock_owner = _mali_osk_get_tid()); }
void malipmm_irq_bhandler(void *data) { _mali_pmm_internal_state_t *pmm; pmm = (_mali_pmm_internal_state_t *)data; MALI_DEBUG_ASSERT_POINTER(pmm); #if PMM_OS_TEST if( power_test_check() ) return; #endif #ifdef CONFIG_SMP _mali_osk_lock_wait( mali_pmm_lock, _MALI_OSK_LOCKMODE_RW ); #endif /* CONFIG_SMP */ MALI_PMM_LOCK(pmm); /* Quick out when we are shutting down */ if( pmm->status == MALI_PMM_STATUS_OFF ) { MALI_PMM_UNLOCK(pmm); #ifdef CONFIG_SMP _mali_osk_lock_signal( mali_pmm_lock, _MALI_OSK_LOCKMODE_RW ); #endif /* CONFIG_SMP */ return; } MALIPMM_DEBUG_PRINT( ("PMM: bhandler - Processing event\n") ); if( pmm->missed > 0 ) { MALI_PRINT_ERROR( ("PMM: Failed to send %d events", pmm->missed) ); pmm_fatal_reset( pmm ); } if( pmm->check_policy ) { pmm->check_policy = MALI_FALSE; pmm_policy_check_policy(pmm); } else { /* Perform event processing */ pmm_event_process(); if( pmm->fatal_power_err ) { /* Try a reset */ pmm_fatal_reset( pmm ); } } MALI_PMM_UNLOCK(pmm); #ifdef CONFIG_SMP _mali_osk_lock_signal(mali_pmm_lock, _MALI_OSK_LOCKMODE_RW ); #endif /* CONFIG_SMP */ }
_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target) { _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT; _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO); if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) { map->table->mappings[descriptor] = target; result = _MALI_OSK_ERR_OK; } _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO); MALI_ERROR(result); }
mali_bool mali_orion_clk_set_rate(unsigned int clk, unsigned int mhz) { unsigned long rate = 0; mali_bool bis_vpll = MALI_FALSE; #ifdef CONFIG_VPLL_USE_FOR_TVENC bis_vpll = MALI_TRUE; #endif _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); if (mali_orion_clk_get(bis_vpll) == MALI_FALSE) return MALI_FALSE; rate = (unsigned long)clk * (unsigned long)mhz; MALI_DEBUG_PRINT(3,("= clk_set_rate : %d , %d \n",clk, mhz )); if (bis_vpll) { clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ); clk_set_parent(vpll_src_clock, ext_xtal_clock); clk_set_parent(sclk_vpll_clock, fout_vpll_clock); clk_set_parent(mali_parent_clock, sclk_vpll_clock); clk_set_parent(mali_clock, mali_parent_clock); } else { clk_set_parent(mali_parent_clock, mpll_clock); clk_set_parent(mali_clock, mali_parent_clock); } if (clk_enable(mali_clock) < 0) return MALI_FALSE; clk_set_rate(mali_clock, rate); rate = clk_get_rate(mali_clock); if (bis_vpll) mali_gpu_clk = (int)(rate / mhz); else mali_gpu_clk = (int)((rate + 500000) / mhz); GPU_MHZ = mhz; MALI_DEBUG_PRINT(3,("= clk_get_rate: %d \n",mali_gpu_clk)); mali_orion_clk_put(MALI_FALSE); _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); return MALI_TRUE; }
inline u32 _mali_osk_profiling_get_count(void) { u32 retval = 0; _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state == MALI_PROFILING_STATE_RETURN) { retval = _mali_osk_atomic_read(&profile_entries_written); } _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return retval; }
_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target) { _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT; MALI_DEBUG_ASSERT_POINTER(map); _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO); if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) { *target = map->table->mappings[descriptor]; result = _MALI_OSK_ERR_OK; } else *target = NULL; _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO); MALI_ERROR(result); }
static void mali_bottom_half_pm ( struct work_struct *work ) { _mali_osk_lock_wait(pm_lock, _MALI_OSK_LOCKMODE_RW); if((_mali_osk_atomic_read(&mali_pm_ref_count) == 0) && (_mali_osk_atomic_read(&mali_suspend_called) == 0)) { mali_pm_runtime_suspend(); _mali_osk_atomic_inc(&mali_suspend_called); mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP); } _mali_osk_lock_signal(pm_lock, _MALI_OSK_LOCKMODE_RW); }
void mali_regulator_set_voltage(int min_uV, int max_uV) { _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); if(IS_ERR_OR_NULL(g3d_regulator)) { MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n")); _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); return; } MALI_DEBUG_PRINT(1, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV)); regulator_set_voltage(g3d_regulator, min_uV, max_uV); mali_gpu_vol = regulator_get_voltage(g3d_regulator); MALI_DEBUG_PRINT(1, ("Mali voltage: %d\n", mali_gpu_vol)); _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW); }
static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block) { block_allocator * info; mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE; MALI_DEBUG_ASSERT_POINTER(ctx); MALI_DEBUG_ASSERT_POINTER(block); info = (block_allocator*)ctx; if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE; if (NULL != info->first_free) { void * virt; u32 phys; u32 size; block_info * alloc; alloc = info->first_free; phys = get_phys(info, alloc); /* Does not modify info or alloc */ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" ); /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE, * because it's unlikely another allocator will be able to map in. */ if ( NULL != virt ) { block->ctx = info; /* same as incoming ctx */ block->handle = alloc; block->phys_base = phys; block->size = size; block->release = block_allocator_release_page_table_block; block->mapping = virt; info->first_free = alloc->next; alloc->next = NULL; /* Could potentially link many blocks together instead */ result = MALI_MEM_ALLOC_FINISHED; } } else result = MALI_MEM_ALLOC_NONE; _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW); return result; }
/** This function is called when Operating system wants to power down * the mali GPU device. */ static int mali_pm_suspend(struct device *dev) { int err = 0; _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if ((mali_device_state == _MALI_DEVICE_SUSPEND)) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return err; } #if MALI_DVFS_ENABLED mali_utilization_suspend(); #endif err = mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread); mali_device_state = _MALI_DEVICE_SUSPEND; _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return err; }