コード例 #1
0
static int gralloc_lock(gralloc_module_t const *module, buffer_handle_t handle, int usage, int l, int t, int w, int h, void **vaddr)
{
	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Locking invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;

	if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP || hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
	{
		hnd->writeOwner = usage & GRALLOC_USAGE_SW_WRITE_MASK;
	}

	if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK))
	{
		*vaddr = (void *)hnd->base;
	}

	MALI_IGNORE(module);
	MALI_IGNORE(l);
	MALI_IGNORE(t);
	MALI_IGNORE(w);
	MALI_IGNORE(h);
	return 0;
}
コード例 #2
0
void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
{
#if defined(DEBUG)
	u32 pde_index, pte_index;
	u32 pde, pte;

	pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
	pte_index = MALI_MMU_PTE_ENTRY(fault_addr);


	pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
				     pde_index * sizeof(u32));


	if (pde & MALI_MMU_FLAGS_PRESENT) {
		u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);

		pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
					     pte_index * sizeof(u32));

		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
				     "\t\tPTE: %08x, page %08x is %s\n",
				     fault_addr, pte_addr, pte,
				     MALI_MMU_ENTRY_ADDRESS(pte),
				     pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
	} else {
		MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
				     fault_addr, pde));
	}
#else
	MALI_IGNORE(pagedir);
	MALI_IGNORE(fault_addr);
#endif
}
コード例 #3
0
static int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle)
{
	MALI_IGNORE(module);
	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Unlocking invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;
	int32_t current_value;
	int32_t new_value;
	int retry;

	if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner)
	{
#if GRALLOC_ARM_UMP_MODULE
		ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void *)hnd->base, hnd->size);
#else
		AERR("Buffer 0x%p is UMP type but it is not supported", hnd);
#endif
	} else if ( hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		private_module_t *m = (private_module_t*)module;

		ion_sync_fd(m->ion_client, hnd->share_fd);
#endif
	}

	return 0;
}
コード例 #4
0
_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
{
	_mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
	MALI_IGNORE(event); /* event is not used for release code, and that is OK */

#if defined(CONFIG_MALI400_PROFILING)
	/*
	 * Manually generate user space events in kernel space.
	 * This saves user space from calling kernel space twice in this case.
	 * We just need to remember to add pid and tid manually.
	 */
	if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT)
	{
		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
		                              MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
		                              MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
		                              _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
	}

	if (event==_MALI_UK_VSYNC_EVENT_END_WAIT)
	{

		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
		                              MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
		                              MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
		                              _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
	}
#endif

	MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
	MALI_SUCCESS;
}
コード例 #5
0
/** see comment below inside _flush() **/
MALI_STATIC void projob_cb_wrapper(mali_base_ctx_handle ctx, void * cb_param)
{
	struct mali_projob* projob = cb_param;
	MALI_DEBUG_ASSERT_POINTER(projob);
	MALI_IGNORE(ctx);
	projob->cb_func(projob->cb_param); /* call the real callback */
}
コード例 #6
0
ファイル: mali_kernel_vsync.c プロジェクト: Ante0/xxICSKernel
_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
{
	_mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
	MALI_IGNORE(event); /* event is not used for release code, and that is OK */
/*	u64 ts = _mali_timestamp_get();
 */

#if MALI_TIMELINE_PROFILING_ENABLED
	if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT)
	{
		_mali_profiling_add_event(  MALI_PROFILING_EVENT_TYPE_SUSPEND |
		                            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
		                            MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
		                            0, 0, 0, 0, 0);
	}

	if ( event==_MALI_UK_VSYNC_EVENT_END_WAIT)
	{

		_mali_profiling_add_event(  MALI_PROFILING_EVENT_TYPE_RESUME |
		                            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
		                            MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
		                            0, 0, 0, 0, 0);
	}
#endif

	MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
	MALI_SUCCESS;
}
コード例 #7
0
_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
{
	_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
	mali_bool stall_success;
	MALI_DEBUG_ASSERT_POINTER(mmu);

	stall_success = mali_mmu_enable_stall(mmu);

	/* The stall can not fail in current hw-state */
	MALI_DEBUG_ASSERT(stall_success);
	MALI_IGNORE(stall_success);

	MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));

	if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu))
	{
		mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
		/* no session is active, so just activate the empty page directory */
		mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
		mali_mmu_enable_paging(mmu);
		err = _MALI_OSK_ERR_OK;
	}
	mali_mmu_disable_stall(mmu);

	return err;
}
コード例 #8
0
_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
{
	_mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
	MALI_IGNORE(event); /* event is not used for release code, and that is OK */
/*	u64 ts = _mali_timestamp_get();
 */

	MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));

	MALI_SUCCESS;
}
コード例 #9
0
void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
{
	mali_bool stall_success;
	MALI_DEBUG_ASSERT_POINTER(mmu);

	MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));

	stall_success = mali_mmu_enable_stall(mmu);
	MALI_DEBUG_ASSERT(stall_success);
	MALI_IGNORE(stall_success);
	mali_mmu_activate_address_space(mmu, pagedir->page_directory);
	mali_mmu_disable_stall(mmu);
}
コード例 #10
0
ファイル: arm.c プロジェクト: LCameron/linux-xlnx
int mali_platform_device_deinit(struct platform_device *device)
{
	MALI_IGNORE(device);

	MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n"));

	mali_core_scaling_term();

#if defined(CONFIG_ARCH_REALVIEW)
	mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
#endif

	return 0;
}
コード例 #11
0
static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	void __user *address;
	mali_mem_allocation *descriptor;

	address = vmf->virtual_address;
	descriptor = (mali_mem_allocation *)vma->vm_private_data;

	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

	/*
	 * We always fail the call since all memory is pre-faulted when assigned to the process.
	 * Only the Mali cores can use page faults to extend buffers.
	*/

	MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
	MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void *)address));

	MALI_IGNORE(address);
	MALI_IGNORE(descriptor);

	return VM_FAULT_SIGBUS;
}
コード例 #12
0
void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
{
	mali_bool stall_success;

	MALI_DEBUG_ASSERT_POINTER(mmu);
	MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));

	stall_success = mali_mmu_enable_stall(mmu);

	/* This function can only be called when the core is idle, so it could not fail. */
	MALI_DEBUG_ASSERT(stall_success);
	MALI_IGNORE(stall_success);

	mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
	mali_mmu_disable_stall(mmu);
}
コード例 #13
0
static int gralloc_unlock(gralloc_module_t const *module, buffer_handle_t handle)
{
	MALI_IGNORE(module);

	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Unlocking invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;
	int32_t current_value;
	int32_t new_value;
	int retry;

	if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner)
	{
#if GRALLOC_ARM_UMP_MODULE
		ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void *)hnd->base, hnd->size);
#else
		AERR("Buffer 0x%p is UMP type but it is not supported", hnd);
#endif
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		hw_module_t *pmodule = NULL;
		private_module_t *m = NULL;

		if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
		{
			m = reinterpret_cast<private_module_t *>(pmodule);
			//ion_sync_fd(m->ion_client, hnd->share_fd);
		}
		else
		{
			AERR("Couldnot get gralloc module for handle 0x%p\n", handle);
		}

#endif
	}

	return 0;
}
コード例 #14
0
static void do_scaling(struct work_struct *work)
{
	mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
	int err = mali_perf_set_num_pp_cores(num_cores_enabled);
	scalingdbg(1, "set pp cores to %d\n", num_cores_enabled);
	MALI_DEBUG_ASSERT(0 == err);
	MALI_IGNORE(err);
	scalingdbg(1, "pdvfs[%d].freq_index=%d, pdvfs[%d].freq_index=%d\n",
			currentStep, pdvfs[currentStep].freq_index,
			lastStep, pdvfs[lastStep].freq_index);
	mali_clk_exected();
#ifdef CONFIG_MALI400_PROFILING
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
			MALI_PROFILING_EVENT_CHANNEL_GPU |
			MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
			get_current_frequency(),
			0,	0,	0,	0);
#endif
}
コード例 #15
0
ファイル: scaling.c プロジェクト: kozmo2k4/linux-amlogic
static void do_scaling(struct work_struct *work)
{
	mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
	int err = mali_perf_set_num_pp_cores(num_cores_enabled);
	MALI_DEBUG_ASSERT(0 == err);
	MALI_IGNORE(err);
	if (pdvfs[currentStep].freq_index != pdvfs[lastStep].freq_index) {
		mali_dev_pause();
		mali_clock_set(pdvfs[currentStep].freq_index);
		mali_dev_resume();
		lastStep = currentStep;
	}
#ifdef CONFIG_MALI400_PROFILING
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
					MALI_PROFILING_EVENT_CHANNEL_GPU |
					MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
					get_current_frequency(),
					0,	0,	0,	0);
#endif
}
コード例 #16
0
static int gralloc_lock_ycbcr(struct gralloc_module_t const* module,
            buffer_handle_t handle, int usage,
            int l, int t, int w, int h,
            struct android_ycbcr *ycbcr)
{
	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Locking invalid buffer 0x%p, returning error", handle );
		return -EINVAL;
	}
	private_handle_t* hnd = (private_handle_t*)handle;
	int ystride;
	int err=0;

	switch (hnd->format) {
		case HAL_PIXEL_FORMAT_YCrCb_420_SP:
			ystride = GRALLOC_ALIGN(hnd->width, 16);
			ycbcr->y  = (void*)hnd->base;
			ycbcr->cr = (void*)((uintptr_t)hnd->base + ystride * hnd->height);
			ycbcr->cb = (void*)((uintptr_t)hnd->base + ystride * hnd->height + 1);
			ycbcr->ystride = ystride;
			ycbcr->cstride = ystride;
			ycbcr->chroma_step = 2;
			memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
			break;
		case HAL_PIXEL_FORMAT_YCbCr_420_SP:
			ystride = GRALLOC_ALIGN(hnd->width, 16);
			ycbcr->y  = (void*)hnd->base;
			ycbcr->cb = (void*)((uintptr_t)hnd->base + ystride * hnd->height);
			ycbcr->cr = (void*)((uintptr_t)hnd->base + ystride * hnd->height + 1);
			ycbcr->ystride = ystride;
			ycbcr->cstride = ystride;
			ycbcr->chroma_step = 2;
			memset(ycbcr->reserved, 0, sizeof(ycbcr->reserved));
			break;
		default:
			ALOGD("%s: Invalid format passed: 0x%x", __FUNCTION__,
			       hnd->format);
			err = -EINVAL;
	}

	MALI_IGNORE(module);
	MALI_IGNORE(usage);
	MALI_IGNORE(l);
	MALI_IGNORE(t);
	MALI_IGNORE(w);
	MALI_IGNORE(h);
	return err;
}
コード例 #17
0
void mali_pm_os_suspend(mali_bool os_suspend)
{
	int ret;

	MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));

	/* Suspend execution of all jobs, and go to inactive state */
	mali_executor_suspend();

	if (os_suspend) {
		mali_control_timer_suspend(MALI_TRUE);
	}

	mali_pm_exec_lock();

	ret = mali_pm_common_suspend();

	MALI_DEBUG_ASSERT(MALI_TRUE == ret);
	MALI_IGNORE(ret);

	mali_pm_exec_unlock();
}
コード例 #18
0
/**
 * Draw a quad onto a given master tile list. 
 *
 * This function is adds one drawcall to a pp tile list memory area. 
 *
 * This function will add 16 bytes of data to the pointer given pointer at the given offset. 
 * Sizes are passed in for safety reasons. The next 8 bytes are then listed as an "end list"
 * command. 
 *
 * @param master_tile_list_mem - A CPU memory pointer. 
 *                               Should probably point to some mapped GPU memory 
 * @param tile_list_mem_offset - The offset of the memory to place the commands onto. 
 *                               Inout parameter, will be increased by 16 bytes after calling. 
 * @param tile_list_mem_size   - The size of the master_tile_list_mem
 * @param mali_vertex_address  - The GPU address of the vertex buffer
 * @param mali_rsw_address     - The GPU address of the RSW 
 *
 * Note: There must be at least 16+8 available bytes in the master tile list mem block. 
 * An assert will trigger otherwise. 
 *
 */
MALI_STATIC void _mali200_draw_pp_job_quad( mali_tilelist_cmd*      master_tile_list_mem,
                                            u32*    /*inout*/       tile_list_mem_offset, 
                                            u32                     tile_list_mem_size,
                                            mali_addr               mali_vertex_address,
                                            mali_addr               mali_rsw_address)
{
	mali_tilelist_cmd* ptr = (mali_tilelist_cmd*) (((u8*)master_tile_list_mem) + *tile_list_mem_offset);

	MALI_DEBUG_ASSERT_POINTER(master_tile_list_mem);
	MALI_DEBUG_ASSERT(*tile_list_mem_offset + 3*sizeof(mali_tilelist_cmd) <= tile_list_mem_size, 
	                  ("Not enough room in master tile list: space=%i, need %i", tile_list_mem_size-(*tile_list_mem_offset), 3*sizeof(mali_tilelist_cmd))); 
	MALI_IGNORE(tile_list_mem_size); /* for release builds */
	MALI_PL_CMD_SET_BASE_VB_RSW(ptr[0], mali_vertex_address, mali_rsw_address);
	MALI_PL_PRIMITIVE(ptr[1], 
	                  MALI_PRIMITIVE_TYPE_PIXEL_RECT, 
					  0, /* rsw index, always equal to the base RSW at this point */
					  0, /* vertex0 index*/
					  1, /* vertex1 index*/ 
					  2  /* vertex2 index*/);
	MALI_PL_CMD_END_OF_LIST(ptr[2]);
	/* update the inout ptr */
	(*tile_list_mem_offset) += 2*sizeof(mali_tilelist_cmd);

}
コード例 #19
0
static int gralloc_unregister_buffer(gralloc_module_t const *module, buffer_handle_t handle)
{
	MALI_IGNORE(module);

	if (private_handle_t::validate(handle) < 0)
	{
		AERR("unregistering invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	private_handle_t *hnd = (private_handle_t *)handle;

	AERR_IF(hnd->lockState & private_handle_t::LOCK_STATE_READ_MASK, "[unregister] handle %p still locked (state=%08x)", hnd, hnd->lockState);

	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
	{
		AERR("Can't unregister buffer 0x%p as it is a framebuffer", handle);
	}
	else if (hnd->pid == getpid()) // never unmap buffers that were not registered in this process
	{
		pthread_mutex_lock(&s_map_lock);

		if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
		{
#if GRALLOC_ARM_UMP_MODULE
			ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
			ump_reference_release((ump_handle)hnd->ump_mem_handle);
			hnd->ump_mem_handle = (int)UMP_INVALID_MEMORY_HANDLE;
#else
			AERR("Can't unregister UMP buffer for handle 0x%p. Not supported", handle);
#endif
		}
		else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
		{
#if GRALLOC_ARM_DMA_BUF_MODULE
			void *base = (void *)hnd->base;
			size_t size = hnd->size;

			if (munmap(base, size) < 0)
			{
				AERR("Could not munmap base:0x%p size:%lu '%s'", base, (unsigned long)size, strerror(errno));
			}

#else
			AERR("Can't unregister DMA_BUF buffer for hnd %p. Not supported", hnd);
#endif

		}
		else
		{
			AERR("Unregistering unknown buffer is not supported. Flags = %d", hnd->flags);
		}

		hnd->base = 0;
		hnd->lockState  = 0;
		hnd->writeOwner = 0;

		pthread_mutex_unlock(&s_map_lock);
	}
	else
	{
		AERR("Trying to unregister buffer 0x%p from process %d that was not created in current process: %d", hnd, hnd->pid, getpid());
	}

	return 0;
}
コード例 #20
0
ファイル: mali_pmm_state.c プロジェクト: xbai043/zt280-kernel
void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm )
{
	int n;
	mali_pmm_core_mask pd, ad;
	_mali_osk_errcode_t err;
	volatile mali_pmm_core_mask *pregistered;

	MALI_DEBUG_ASSERT_POINTER(pmm);

	MALIPMM_DEBUG_PRINT( ("PMM: Cancelling power down\n") );

	pd = pmm->cores_pend_down;
	ad = pmm->cores_ack_down;
	/* Clear the pending cores so that they don't move to the off
	 * queue if they haven't already
	 */
	pmm->cores_pend_down = 0;
	pmm->cores_ack_down = 0;
	pregistered = &(pmm->cores_registered);

	/* Power up all the pending power down cores - just so
	 * we make sure the system is in a known state, as a
	 * pending core might have sent an acknowledged message
	 * which hasn't been read yet.
	 */
	for( n = 0; n < SIZEOF_CORES_LIST; n++ )
	{
		if( (cores_list[n] & pd) != 0 )
		{
			/* Can't hold the power lock, when acessing subsystem mutex via
			 * the core power call.
			 * Due to terminatation of driver requiring a subsystem mutex
			 * and then power lock held to unregister a core.
			 * This does mean that the following power up function could fail
			 * as the core is unregistered before we tell it to power
			 * up, but it does not matter as we are terminating
			 */
			MALI_PMM_UNLOCK(pmm);
			/* As we are cancelling - only move the cores back to the queue - 
			 * no reset needed
			 */
			err = mali_core_signal_power_up( cores_list[n], MALI_TRUE );
			MALI_PMM_LOCK(pmm);

			/* Update pending list with the current registered cores */
			pd &= (*pregistered);

			if( err != _MALI_OSK_ERR_OK )
			{
				MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_BUSY && 
										((cores_list[n] & ad) == 0))  ||
										(err == _MALI_OSK_ERR_FAULT &&
										(*pregistered & cores_list[n]) == 0) );
				/* If we didn't power up a core - it must be active and 
				 * hasn't actually tried to power down - this is expected
				 * for cores that haven't acknowledged
				 * Alternatively we are shutting down and the core has
				 * been unregistered
				 */
			}
		}
	}
	/* Only used in debug builds */
	MALI_IGNORE(ad);
}
コード例 #21
0
static void mali_scheduler_wq_schedule_gp(void *arg)
{
	MALI_IGNORE(arg);

	mali_gp_scheduler_schedule();
}
コード例 #22
0
static void set_num_cores(struct work_struct *work)
{
	int err = mali_perf_set_num_pp_cores(num_cores_enabled);
	MALI_DEBUG_ASSERT(0 == err);
	MALI_IGNORE(err);
}
コード例 #23
0
static int gralloc_register_buffer(gralloc_module_t const *module, buffer_handle_t handle)
{
	MALI_IGNORE(module);

	if (private_handle_t::validate(handle) < 0)
	{
		AERR("Registering invalid buffer 0x%p, returning error", handle);
		return -EINVAL;
	}

	// if this handle was created in this process, then we keep it as is.
	private_handle_t *hnd = (private_handle_t *)handle;

	int retval = -EINVAL;

	pthread_mutex_lock(&s_map_lock);

#if GRALLOC_ARM_UMP_MODULE

	if (!s_ump_is_open)
	{
		ump_result res = ump_open(); // MJOLL-4012: UMP implementation needs a ump_close() for each ump_open

		if (res != UMP_OK)
		{
			pthread_mutex_unlock(&s_map_lock);
			AERR("Failed to open UMP library with res=%d", res);
			return retval;
		}

		s_ump_is_open = 1;
	}

#endif

	hnd->pid = getpid();

	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
	{
		AERR("Can't register buffer 0x%p as it is a framebuffer", handle);
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
	{
#if GRALLOC_ARM_UMP_MODULE
		hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);

		if (UMP_INVALID_MEMORY_HANDLE != (ump_handle)hnd->ump_mem_handle)
		{
			hnd->base = ump_mapped_pointer_get((ump_handle)hnd->ump_mem_handle);

			if (0 != hnd->base)
			{
				hnd->lockState = private_handle_t::LOCK_STATE_MAPPED;
				hnd->writeOwner = 0;
				hnd->lockState = 0;

				pthread_mutex_unlock(&s_map_lock);
				return 0;
			}
			else
			{
				AERR("Failed to map UMP handle 0x%x", hnd->ump_mem_handle);
			}

			ump_reference_release((ump_handle)hnd->ump_mem_handle);
		}
		else
		{
			AERR("Failed to create UMP handle 0x%x", hnd->ump_mem_handle);
		}

#else
		AERR("Gralloc does not support UMP. Unable to register UMP memory for handle 0x%p", hnd);
#endif
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		int ret;
		unsigned char *mappedAddress;
		size_t size = hnd->size;
		hw_module_t *pmodule = NULL;
		private_module_t *m = NULL;

		if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
		{
			m = reinterpret_cast<private_module_t *>(pmodule);
		}
		else
		{
			AERR("Could not get gralloc module for handle: 0x%p", hnd);
			retval = -errno;
			goto cleanup;
		}

		/* the test condition is set to m->ion_client <= 0 here, because:
		 * 1) module structure are initialized to 0 if no initial value is applied
		 * 2) a second user process should get a ion fd greater than 0.
		 */
		if (m->ion_client <= 0)
		{
			/* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
			m->ion_client = ion_open();

			if (m->ion_client < 0)
			{
				AERR("Could not open ion device for handle: 0x%p", hnd);
				retval = -errno;
				goto cleanup;
			}
		}

		mappedAddress = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);

		if (MAP_FAILED == mappedAddress)
		{
			AERR("mmap( share_fd:%d ) failed with %s",  hnd->share_fd, strerror(errno));
			retval = -errno;
			goto cleanup;
		}

		hnd->base = mappedAddress + hnd->offset;
		pthread_mutex_unlock(&s_map_lock);
		return 0;
#endif
	}
	else
	{
		AERR("registering non-UMP buffer not supported. flags = %d", hnd->flags);
	}

cleanup:
	pthread_mutex_unlock(&s_map_lock);
	return retval;
}
コード例 #24
0
MALI_STATIC mali_err_code allocate_and_setup_pp_job(struct mali_projob* projob, mali_mem_pool* pool, mali_frame_builder* frame_builder, mali_internal_frame* frame)
{
	void* outbuf;
	mali_tilelist_cmd* tilebuf;
	mali_addr out_addr;
	mali_addr tile_addr;
	void* new_jobarray;
	mali_pp_job_handle new_job;
	u32 num_tilebuf_commands;

	MALI_DEBUG_ASSERT_POINTER(projob);
	MALI_DEBUG_ASSERT_POINTER(pool);
	MALI_DEBUG_ASSERT_POINTER(frame_builder);

	/* grow PP job array size by one */
	new_jobarray = _mali_sys_realloc(projob->projob_pp_job_array,
	                  sizeof(mali_pp_job_handle) * (projob->projob_pp_job_array_size + 1));
	if(new_jobarray == NULL) return MALI_ERR_OUT_OF_MEMORY;
	/* use new array regardless of future errors, but don't grow the size until we use it*/
	projob->projob_pp_job_array = new_jobarray;

	/* allocate new empty job. Will mostly be set up on flush!
	 * We allocate this now to avoid memfail issues on flush! */
	new_job = _mali_pp_job_new(frame_builder->base_ctx, 1); /* split count 1 */ 
	if(new_job == NULL) return MALI_ERR_OUT_OF_MEMORY;
	
	/* we need to set up all registers for this job. The WBx registers and the 
	 * tilelist pointers in particular. Both memory blocks must be allocated right here */

	/* allocate a new output buffer. This is allocated on the fbuilder framepool. 
	 * Need space for PROJOB_DRAWCALL_LIMIT drawcalls * vec4 * fp16 bytes outputs.
	 * The space also need to be a full tile worth of outputs, so using MALI_ALIGN
	 * to expand the limit to the next MALI200_TILE_SIZE boundary. */
	outbuf = _mali_mem_pool_alloc(pool,
	             MALI_ALIGN(PROJOB_DRAWCALL_LIMIT, MALI200_TILE_SIZE*MALI200_TILE_SIZE)
				 * 4 * sizeof(u16), &out_addr);	
	if(outbuf == NULL) 
	{
		_mali_pp_job_free(new_job);
		return MALI_ERR_OUT_OF_MEMORY;
	}

	/* allocate new tilelist buffer. This is allocated on the fbuilder framepool. 
	 * Need space for PROJOB_DRAWCALL_LIMIT * 2 commands à 8 bytes each, plus initialization
	 * commands and an end command */
	num_tilebuf_commands = PROJOB_DRAWCALL_LIMIT*2 + 2; /*+2 for "begin tile" and "end list" */
	tilebuf = _mali_mem_pool_alloc(pool,
	             num_tilebuf_commands*sizeof(mali_tilelist_cmd), &tile_addr);	
	if(tilebuf == NULL) 
	{
		_mali_pp_job_free(new_job);
		return MALI_ERR_OUT_OF_MEMORY;
	}

	/* setup registers */
	_m200_frame_reg_write_common(new_job, M200_FRAME_REG_REND_LIST_ADDR, tile_addr);
	_m200_frame_reg_write_common(new_job, M200_FRAME_REG_REND_RSW_BASE, _mali_mem_mali_addr_get( frame_builder->dummy_rsw_mem, 0));
	_m200_frame_reg_write_common(new_job, M200_FRAME_REG_FEATURE_ENABLE, M200_FEATURE_FP_TILEBUF_ENABLE );
	/* Not set: Stack addr, offsets. Currently all projobs don't use stack, so it's not needed. */
	_m200_wb_reg_write( new_job, 0, M200_WBx_REG_SOURCE_SELECT, M200_WBx_SOURCE_ARGB );
	_m200_wb_reg_write( new_job, 0, M200_WBx_REG_TARGET_ADDR, out_addr );
	_m200_wb_reg_write( new_job, 0, M200_WBx_REG_TARGET_PIXEL_FORMAT, MALI_PIXEL_FORMAT_ARGB_FP16 );
	_m200_wb_reg_write( new_job, 0, M200_WBx_REG_TARGET_LAYOUT, MALI_PIXEL_LAYOUT_LINEAR);
	_m200_wb_reg_write( new_job, 0, M200_WBx_REG_TARGET_SCANLINE_LENGTH, MALI_ALIGN(PROJOB_DRAWCALL_WIDTH, MALI200_TILE_SIZE)*sizeof(u16)*4 / 8);
	_m200_wb_reg_write( new_job, 0, M200_WBx_REG_TARGET_FLAGS, M200_WBx_TARGET_FLAGS_SWAP_RED_BLUE_ENABLE);

#if MALI_TIMELINE_PROFILING_ENABLED
	_mali_pp_job_set_identity(new_job, frame_builder->identifier | MALI_FRAME_BUILDER_TYPE_PROJOB_BIT, frame->flush_id);
#else 
	MALI_IGNORE(frame);
#endif

	/* and set this new job as the current frame */
	projob->projob_pp_job_array[projob->projob_pp_job_array_size] = new_job;
	projob->projob_pp_job_array_size++;

	projob->mapped_pp_tilelist = tilebuf;
	projob->mapped_pp_tilelist_bytesize = num_tilebuf_commands*sizeof(mali_tilelist_cmd);
	projob->mapped_pp_tilelist_byteoffset = 1*sizeof(mali_tilelist_cmd); /* adding 1 command: "begin tile" */
    projob->pp_tilelist_addr = tile_addr;
    projob->output_mali_addr = out_addr;

	projob->num_pp_drawcalls_added_to_the_current_pp_job = 0;

	/* fill in begin tile / end list commands in the tile buffer */
	MALI_PL_CMD_BEGIN_NEW_TILE( tilebuf[0], 0, 0 );
	MALI_PL_CMD_END_OF_LIST(tilebuf[1]);

	/* and set up the new job to trigger the sync handle when done */
	_mali_pp_job_add_to_sync_handle(projob->sync_handle, new_job);

	return MALI_ERR_NO_ERROR;
}
コード例 #25
0
void _mali_osk_atomic_term(_mali_osk_atomic_t *atom)
{
	MALI_IGNORE(atom);
}
コード例 #26
0
/**
 * Check if fence has been signaled.
 *
 * @param system Timeline system.
 * @param fence Timeline fence.
 * @return MALI_TRUE if fence is signaled, MALI_FALSE if not.
 */
static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
{
	int i;
	u32 tid = _mali_osk_get_tid();
	mali_bool ret = MALI_TRUE;
#if defined(CONFIG_SYNC)
	struct sync_fence *sync_fence = NULL;
#endif

	MALI_DEBUG_ASSERT_POINTER(system);
	MALI_DEBUG_ASSERT_POINTER(fence);

	mali_spinlock_reentrant_wait(system->spinlock, tid);

	for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
		struct mali_timeline *timeline;
		mali_timeline_point   point;

		point = fence->points[i];

		if (likely(MALI_TIMELINE_NO_POINT == point)) {
			/* Fence contains no point on this timeline. */
			continue;
		}

		timeline = system->timelines[i];
		MALI_DEBUG_ASSERT_POINTER(timeline);

		if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
			MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next));
		}

		if (!mali_timeline_is_point_released(timeline, point)) {
			ret = MALI_FALSE;
			goto exit;
		}
	}

#if defined(CONFIG_SYNC)
	if (-1 != fence->sync_fd) {
		sync_fence = sync_fence_fdget(fence->sync_fd);
		if (likely(NULL != sync_fence)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
			if (0 == sync_fence->status) {
#else
			if (0 == atomic_read(&sync_fence->status)) {
#endif
				ret = MALI_FALSE;
			}
		} else {
			MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd));
		}
	}
#endif /* defined(CONFIG_SYNC) */

exit:
	mali_spinlock_reentrant_signal(system->spinlock, tid);

#if defined(CONFIG_SYNC)
	if (NULL != sync_fence) {
		sync_fence_put(sync_fence);
	}
#endif /* defined(CONFIG_SYNC) */

	return ret;
}

mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout)
{
	struct mali_timeline_fence_wait_tracker *wait;
	mali_timeline_point point;
	mali_bool ret;

	MALI_DEBUG_ASSERT_POINTER(system);
	MALI_DEBUG_ASSERT_POINTER(fence);

	MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n"));

	if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) {
		return mali_timeline_fence_wait_check_status(system, fence);
	}

	wait = mali_timeline_fence_wait_tracker_alloc();
	if (unlikely(NULL == wait)) {
		MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n"));
		return MALI_FALSE;
	}

	wait->activated = MALI_FALSE;
	wait->system = system;

	/* Initialize refcount to two references.  The reference first will be released by this
	 * function after the wait is over.  The second reference will be released when the tracker
	 * is activated. */
	_mali_osk_atomic_init(&wait->refcount, 2);

	/* Add tracker to timeline system, but not to a timeline. */
	mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait);
	point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE);
	MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
	MALI_IGNORE(point);

	/* Wait for the tracker to be activated or time out. */
	if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) {
		_mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait);
	} else {
		_mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout);
	}

	ret = wait->activated;

	if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
		mali_timeline_fence_wait_tracker_free(wait);
	}

	return ret;
}

void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait)
{
	mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;

	MALI_DEBUG_ASSERT_POINTER(wait);
	MALI_DEBUG_ASSERT_POINTER(wait->system);

	MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n"));

	MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated);
	wait->activated = MALI_TRUE;

	_mali_osk_wait_queue_wake_up(wait->system->wait_queue);

	/* Nothing can wait on this tracker, so nothing to schedule after release. */
	schedule_mask = mali_timeline_tracker_release(&wait->tracker);
	MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
	MALI_IGNORE(schedule_mask);

	if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
		mali_timeline_fence_wait_tracker_free(wait);
	}
}