Пример #1
0
void mali_l2_cache_pause_all(mali_bool pause)
{
	
	int i;
	struct mali_l2_cache_core * cache;
	u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
	MALI_L2_STATUS status = MALI_L2_NORMAL;

	if (pause)
	{
		status = MALI_L2_PAUSE;
	}

	for (i = 0; i < num_cores; i++)
	{
		cache = mali_l2_cache_core_get_glob_l2_core(i);
		if (NULL != cache)
		{
			_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
			_mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
			cache->mali_l2_status = status;
			_mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
			_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
		}
	}

	/* Change from pause, do the cache invalidation here to prevent any loss of cache
	** operation during the pause period to make sure the SW status is consistent
	** with L2 cache status
	*/
	if(!pause)
	{
		mali_l2_cache_invalidate_all();
	}
}
_mali_osk_errcode_t mali_l2_cache_reset(struct mali_l2_cache_core *cache)
{
	/* Invalidate cache (just to keep it in a known state at startup) */
	mali_l2_cache_invalidate_all(cache);

	/* Enable cache */
	mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
	mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);

	/* Restart any performance counters (if enabled) */
	_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);

	if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
	{
		mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
	}

	if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
	{
		mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
	}

	_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);

	return _MALI_OSK_ERR_OK;
}
void mali_cluster_l2_cache_invalidate_all_force(struct mali_cluster *cluster)
{
	MALI_DEBUG_ASSERT_POINTER(cluster);

	if (NULL != cluster->l2)
	{
		mali_l2_cache_invalidate_all(cluster->l2);
	}
}
void mali_cluster_l2_cache_invalidate_all(struct mali_cluster *cluster, u32 id)
{
	MALI_DEBUG_ASSERT_POINTER(cluster);

	if (NULL != cluster->l2)
	{
		/* If the last cache invalidation was done by a job with a higher id we
		 * don't have to flush. Since user space will store jobs w/ their
		 * corresponding memory in sequence (first job #0, then job #1, ...),
		 * we don't have to flush for job n-1 if job n has already invalidated
		 * the cache since we know for sure that job n-1's memory was already
		 * written when job n was started. */
		if (cluster->last_invalidated_id > id)
		{
			return;
		}
		else
		{
			cluster->last_invalidated_id = mali_scheduler_get_new_id();
		}

		mali_l2_cache_invalidate_all(cluster->l2);
	}
}
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
{
	const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
	const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
	u32 left = size;
	int i;
	mali_bool pd_changed = MALI_FALSE;
	u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
	u32 num_pages_inv = 0;
	mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */

	/* For all page directory entries in range. */
	for (i = first_pde; i <= last_pde; i++) {
		u32 size_in_pde, offset;

		MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
		MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);

		/* Offset into page table, 0 if mali_address is 4MiB aligned */
		offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
		if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
			size_in_pde = left;
		} else {
			size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
		}

		pagedir->page_entries_usage_count[i]--;

		/* If entire page table is unused, free it */
		if (0 == pagedir->page_entries_usage_count[i]) {
			u32 page_phys;
			void *page_virt;
			MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
			/* last reference removed, no need to zero out each PTE  */

			page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
			page_virt = pagedir->page_entries_mapped[i];
			pagedir->page_entries_mapped[i] = NULL;
			_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);

			mali_mmu_release_table_page(page_phys, page_virt);
			pd_changed = MALI_TRUE;
		} else {
			MALI_DEBUG_ASSERT(num_pages_inv < 2);
			if (num_pages_inv < 2) {
				pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
				num_pages_inv++;
			} else {
				invalidate_all = MALI_TRUE;
			}

			/* If part of the page table is still in use, zero the relevant PTEs */
			mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
		}

		left -= size_in_pde;
		mali_address += size_in_pde;
	}
	_mali_osk_write_mem_barrier();

	/* L2 pages invalidation */
	if (MALI_TRUE == pd_changed) {
		MALI_DEBUG_ASSERT(num_pages_inv < 3);
		if (num_pages_inv < 3) {
			pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
			num_pages_inv++;
		} else {
			invalidate_all = MALI_TRUE;
		}
	}

	if (invalidate_all) {
		mali_l2_cache_invalidate_all();
	} else {
		mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
	}

	MALI_SUCCESS;
}