int mali_dvfs_bottom_lock_push(int lock_step)
{
	int prev_status = _mali_osk_atomic_read(&bottomlock_status);

	if (prev_status < 0) {
		MALI_PRINT(("gpu bottom lock status is not valid for push\n"));
		return -1;
	}
	// not a bad idea to limit locking to 4th step, so let's leave this -gm
	if (samsung_rev() < EXYNOS4412_REV_2_0)
		lock_step = min(lock_step, MALI_DVFS_STEPS - 2);
	else
		lock_step = min(lock_step, MALI_DVFS_STEPS - 1);

	if (bottom_lock_step < lock_step) {
		bottom_lock_step = lock_step;
		if (get_mali_dvfs_status() < lock_step) {
			mali_regulator_set_voltage(mali_dvfs[lock_step].vol,
						   mali_dvfs[lock_step].vol);
			mali_clk_set_rate(mali_dvfs[lock_step].clock,
					  mali_dvfs[lock_step].freq);
			set_mali_dvfs_current_step(lock_step);
		}
	}
	return _mali_osk_atomic_inc_return(&bottomlock_status);
}
Exemplo n.º 2
0
inline _mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
{
	u32 cur_index = _mali_osk_atomic_inc_return(&profile_insert_index) - 1;

#if MALI_TRACEPOINTS_ENABLED
	_mali_osk_profiling_add_event(event_id, data0);
#endif

	if (prof_state != MALI_PROFILING_STATE_RUNNING || cur_index >= profile_entry_count)
	{
		/*
		 * Not in recording mode, or buffer is full
		 * Decrement index again, and early out
		 */
		_mali_osk_atomic_dec(&profile_insert_index);
		return _MALI_OSK_ERR_FAULT;
	}

	profile_entries[cur_index].timestamp = _mali_timestamp_get();
	profile_entries[cur_index].event_id = event_id;
	profile_entries[cur_index].data[0] = data0;
	profile_entries[cur_index].data[1] = data1;
	profile_entries[cur_index].data[2] = data2;
	profile_entries[cur_index].data[3] = data3;
	profile_entries[cur_index].data[4] = data4;

	_mali_osk_atomic_inc(&profile_entries_written);

	return _MALI_OSK_ERR_OK;
}
int mali_dvfs_bottom_lock_push(void)
{
	int prev_status = _mali_osk_atomic_read(&bottomlock_status);

	if (prev_status < 0) {
		MALI_PRINT(("gpu bottom lock status is not valid for push"));
		return -1;
	}

	if (prev_status == 0) {
		mali_regulator_set_voltage(mali_dvfs[1].vol, mali_dvfs[1].vol);
		mali_clk_set_rate(mali_dvfs[1].clock, mali_dvfs[1].freq);
		set_mali_dvfs_current_step(1);
	}

	return _mali_osk_atomic_inc_return(&bottomlock_status);
}
Exemplo n.º 4
0
int mali_dvfs_bottom_lock_push(int lock_step)
{
	int prev_status = _mali_osk_atomic_read(&bottomlock_status);

	if (prev_status < 0) {
		MALI_PRINT(("gpu bottom lock status is not valid for push\n"));
		return -1;
	}
	if (bottom_lock_step < lock_step) {
		bottom_lock_step = lock_step;
		if (get_mali_dvfs_status() < lock_step) {
			mali_regulator_set_voltage(mali_dvfs[lock_step].vol, mali_dvfs[lock_step].vol);
			mali_clk_set_rate(mali_dvfs[lock_step].clock, mali_dvfs[lock_step].freq);
			set_mali_dvfs_current_step(lock_step);
		}
	}
	return _mali_osk_atomic_inc_return(&bottomlock_status);
}
Exemplo n.º 5
0
int mali_voltage_lock_push(int lock_vol)
{
	int prev_status = _mali_osk_atomic_read(&voltage_lock_status);

	if (prev_status < 0) {
		MALI_PRINT(("gpu voltage lock status is not valid for push\n"));
		return -1;
	}
	if (prev_status == 0) {
		mali_lock_vol = lock_vol;
		if (mali_gpu_vol < mali_lock_vol)
			mali_regulator_set_voltage(mali_lock_vol, mali_lock_vol);
	} else {
		MALI_PRINT(("gpu voltage lock status is already pushed, current lock voltage : %d\n", mali_lock_vol));
		return -1;
	}

	return _mali_osk_atomic_inc_return(&voltage_lock_status);
}
static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
{
	u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;

	profile_entries[cur_index].timestamp = _mali_timestamp_get();
	profile_entries[cur_index].event_id = event_id;
	profile_entries[cur_index].data[0] = data0;
	profile_entries[cur_index].data[1] = data1;
	profile_entries[cur_index].data[2] = data2;
	profile_entries[cur_index].data[3] = data3;
	profile_entries[cur_index].data[4] = data4;

	/* If event is "leave API function", add current memory usage to the event
	 * as data point 4.  This is used in timeline profiling to indicate how
	 * much memory was used when leaving a function. */
	if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
		profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
	}
}
inline void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
{
    u32 cur_index = _mali_osk_atomic_inc_return(&profile_insert_index) - 1;

    if (prof_state != MALI_PROFILING_STATE_RUNNING || cur_index >= profile_entry_count)
    {
        /*
         * Not in recording mode, or buffer is full
         * Decrement index again, and early out
         */
        _mali_osk_atomic_dec(&profile_insert_index);
        return;
    }

    profile_entries[cur_index].timestamp = _mali_timestamp_get();
    profile_entries[cur_index].event_id = event_id;
    profile_entries[cur_index].data[0] = data0;
    profile_entries[cur_index].data[1] = data1;
    profile_entries[cur_index].data[2] = data2;
    profile_entries[cur_index].data[3] = data3;
    profile_entries[cur_index].data[4] = data4;

    _mali_osk_atomic_inc(&profile_entries_written);
}
Exemplo n.º 8
0
u32 mali_scheduler_get_new_id(void)
{
	u32 job_id = _mali_osk_atomic_inc_return(&mali_job_autonumber);
	return job_id;
}
Exemplo n.º 9
0
u32 mali_scheduler_get_new_cache_order(void)
{
	u32 job_cache_order = _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
	return job_cache_order;
}