Exemple #1
0
mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
{
	MALI_DEBUG_ASSERT_POINTER(cache);

	if (NULL != cache)
	{
		/* If the last cache invalidation was done by a job with a higher id we
		 * don't have to flush. Since user space will store jobs w/ their
		 * corresponding memory in sequence (first job #0, then job #1, ...),
		 * we don't have to flush for job n-1 if job n has already invalidated
		 * the cache since we know for sure that job n-1's memory was already
		 * written when job n was started. */
		if (((s32)id) <= ((s32)cache->last_invalidated_id))
		{
			return MALI_FALSE;
		}
		else
		{
			cache->last_invalidated_id = mali_scheduler_get_new_id();
		}

		mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
	}
	return MALI_TRUE;
}
Exemple #2
0
void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
{
	MALI_DEBUG_ASSERT_POINTER(cache);

	if (NULL != cache)
	{
		cache->last_invalidated_id = mali_scheduler_get_new_id();
		mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
	}
}
_mali_osk_errcode_t mali_l2_cache_invalidate_pages(struct mali_l2_cache_core *cache, u32 *pages, u32 num_pages)
{
	u32 i;
	_mali_osk_errcode_t ret1, ret = _MALI_OSK_ERR_OK;

	for (i = 0; i < num_pages; i++)
	{
		ret1 = mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[i]);
		if (_MALI_OSK_ERR_OK != ret1)
		{
			ret = ret1;
		}
	}

	return ret;
}
Exemple #4
0
void mali_l2_cache_invalidate_all(void)
{
	u32 i;
	for (i = 0; i < mali_global_num_l2_cache_cores; i++)
	{
		/*additional check*/
		if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
		{
			_mali_osk_errcode_t ret;
			mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_id();
			ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
			if (_MALI_OSK_ERR_OK != ret)
			{
				MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
			}
		}
		mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
	}
}
Exemple #5
0
void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
{
	u32 i;
	for (i = 0; i < mali_global_num_l2_cache_cores; i++)
	{
		/*additional check*/
		if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
		{
			u32 j;
			for (j = 0; j < num_pages; j++)
			{
				_mali_osk_errcode_t ret;
				ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
				if (_MALI_OSK_ERR_OK != ret)
				{
					MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
				}
			}
		}
		mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
	}
}
Exemple #6
0
void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
{
	if (cache && cache->pm_domain && cache->pm_domain->state == MALI_PM_DOMAIN_OFF) {
		//printk(" ***************** couldn't run here..*********\n");
		return;
	}
	
	/* Invalidate cache (just to keep it in a known state at startup) */
	mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);

	_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);

	if (MALI_L2_PAUSE == cache->mali_l2_status)
	{
		_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);

		return ;
	}

	/* Enable cache */
	mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
	mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);

	/* Restart any performance counters (if enabled) */
	if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
	{
		mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
	}

	if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
	{
		mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
	}

	_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
}
void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
{
	/* Invalidate cache (just to keep it in a known state at startup) */
	mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);

	/* Enable cache */
	mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
	mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);

	/* Restart any performance counters (if enabled) */
	_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);

	if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
	{
		mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
	}

	if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
	{
		mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
	}

	_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
}
_mali_osk_errcode_t mali_l2_cache_invalidate_all(struct mali_l2_cache_core *cache)
{
	return mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
}