_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void )
{
	_mali_osk_wait_queue_t* ret = NULL;

	ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);

	if (NULL == ret) {
		return ret;
	}

	init_waitqueue_head(&ret->wait_queue);
	MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));

	return ret;
}
void mali_gp_scheduler_terminate(void)
{
	MALI_DEBUG_ASSERT(   MALI_GP_SLOT_STATE_IDLE     == slot.state
	                     || MALI_GP_SLOT_STATE_DISABLED == slot.state);
	MALI_DEBUG_ASSERT_POINTER(slot.group);
	mali_group_delete(slot.group);

	_mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);

#if defined(MALI_UPPER_HALF_SCHEDULING)
	_mali_osk_spinlock_irq_term(gp_scheduler_lock);
#else
	_mali_osk_spinlock_term(gp_scheduler_lock);
#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
}
示例#3
0
void mali_ump_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
{
	ump_dd_handle ump_mem;
	struct mali_page_directory *pagedir;

	ump_mem = descriptor->ump_mem.handle;
	pagedir = session->page_directory;

	MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);

	mali_mem_mali_map_free(descriptor);

	ump_dd_reference_release(ump_mem);
	return;
}
_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
{
    u32 size = descriptor->size;
    struct mali_session_data *session = descriptor->session;

    MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

    /* Map dma-buf into this session's page tables */

    if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
        size += MALI_MMU_PAGE_SIZE;
    }

    return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_mapping.addr, size);
}
static void dump_lock_tracking_list(void)
{
	struct _mali_osk_lock_debug_s *l;
	u32 n = 1;

	/* print list for debugging purposes */
	l = lock_lookup_list;

	while (NULL != l) {
		printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
		l = l->next;
		MALI_DEBUG_ASSERT(n++ < 100);
	}
	printk(" NULL\n");
}
/*
* Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
* This function unbind the backend memory and free the allocation
* no ref_count for this type of memory
*/
_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
{
	/**/
	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;
	u32 mali_addr = args->vaddr;
	MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));

	/* find the allocation by vaddr */
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
	if (likely(mali_vma_node)) {
		MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
		mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
	} else {
struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
{
	struct mali_gp_core *core = NULL;

	MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
	MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));

	core = _mali_osk_malloc(sizeof(struct mali_gp_core));
	if (NULL != core) {
		if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) {
			_mali_osk_errcode_t ret;

			ret = mali_gp_reset(core);

			if (_MALI_OSK_ERR_OK == ret) {
				ret = mali_group_add_gp_core(group, core);
				if (_MALI_OSK_ERR_OK == ret) {
					/* Setup IRQ handlers (which will do IRQ probing if needed) */
					core->irq = _mali_osk_irq_init(resource->irq,
								       mali_group_upper_half_gp,
								       group,
								       mali_gp_irq_probe_trigger,
								       mali_gp_irq_probe_ack,
								       core,
								       resource->description);
					if (NULL != core->irq) {
						MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
						mali_global_gp_core = core;

						return core;
					} else {
						MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
					}
					mali_group_remove_gp_core(group);
				} else {
					MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
				}
			}
			mali_hw_core_delete(&core->hw_core);
		}

		_mali_osk_free(core);
	} else {
		MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
	}

	return NULL;
}
示例#8
0
static _mali_osk_errcode_t mali_pmu_send_command(struct mali_pmu_core *pmu, const u32 command, const u32 mask)
{
	u32 stat;

	if (0 == mask) return _MALI_OSK_ERR_OK;

	stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
	stat &= pmu->registered_cores_mask;

	switch (command)
	{
		case PMU_REG_ADDR_MGMT_POWER_DOWN:
			if (mask == stat) return _MALI_OSK_ERR_OK;
			break;
		case PMU_REG_ADDR_MGMT_POWER_UP:
			if (0 == (stat & mask)) return _MALI_OSK_ERR_OK;
			break;
		default:
			MALI_DEBUG_ASSERT(0);
			break;
	}

	mali_pmu_send_command_internal(pmu, command, mask);

#if defined(DEBUG)
	{
		/* Get power status of cores */
		stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
		stat &= pmu->registered_cores_mask;

		switch (command)
		{
			case PMU_REG_ADDR_MGMT_POWER_DOWN:
				MALI_DEBUG_ASSERT(mask == (stat & mask));
				MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
				MALI_DEBUG_ASSERT((pmu->registered_cores_mask & ~pmu->active_cores_mask) == stat);
				break;
			case PMU_REG_ADDR_MGMT_POWER_UP:
				MALI_DEBUG_ASSERT(0 == (stat & mask));
				MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
				break;
			default:
				MALI_DEBUG_ASSERT(0);
				break;
		}
	}
#endif /* defined(DEBUG) */

	return _MALI_OSK_ERR_OK;
}
void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
{
	mali_bool stall_success;

	MALI_DEBUG_ASSERT_POINTER(mmu);
	MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));

	stall_success = mali_mmu_enable_stall(mmu);

	/* This function can only be called when the core is idle, so it could not fail. */
	MALI_DEBUG_ASSERT(stall_success);
	MALI_IGNORE(stall_success);

	mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
	mali_mmu_disable_stall(mmu);
}
示例#10
0
/**
 * Checks if the criteria is met for removing a physical core from virtual group
 */
MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
{
	MALI_ASSERT_PP_SCHEDULER_LOCKED();
	MALI_DEBUG_ASSERT(NULL != virtual_group);
	MALI_ASSERT_GROUP_LOCKED(virtual_group);
	/*
	 * The criteria for taking out a physical group from a virtual group are the following:
	 * - There virtual group is idle
	 * - There are currently no physical groups (idle and working)
	 * - There are physical jobs to be scheduled (without a barrier)
	 */
	return (!virtual_group_working) &&
	       _mali_osk_list_empty(&group_list_idle) &&
	       _mali_osk_list_empty(&group_list_working) &&
	       (NULL != mali_pp_scheduler_get_physical_job());
}
示例#11
0
void mali_mem_mali_map_free(mali_mem_allocation *descriptor)
{
	u32 size = descriptor->size;
	struct mali_session_data *session = descriptor->session;

	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

	if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
		size += MALI_MMU_PAGE_SIZE;
	}

	/* Umap and flush L2 */
	mali_mmu_pagedir_unmap(session->page_directory, descriptor->mali_mapping.addr, descriptor->size);

	mali_executor_zap_all_active(session);
}
示例#12
0
void mali_pmu_power_down_all(struct mali_pmu_core *pmu)
{
	u32 stat;

	MALI_DEBUG_ASSERT_POINTER(pmu);
	MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);

	mali_pm_exec_lock();

	/* Now simply power down the domains which are marked as powered up */
	stat = mali_hw_core_register_read(&pmu->hw_core,
					  PMU_REG_ADDR_MGMT_STATUS);
	mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask);

	mali_pm_exec_unlock();
}
static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
{
	struct _mali_osk_lock_debug_s *curr;
	struct _mali_osk_lock_debug_s *prev = NULL;
	unsigned long local_lock_flag;
	u32 len;
	u32 n = 0;

	spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
	len = tracking_list_length();
	curr = lock_lookup_list;

	if (NULL == curr) {
		printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n");
		dump_lock_tracking_list();
	}

	MALI_DEBUG_ASSERT_POINTER(curr);


	while (lock != curr) {
		prev = curr;

		MALI_DEBUG_ASSERT_POINTER(curr);
		curr = curr->next;
		MALI_DEBUG_ASSERT(n++ < 100);
	}

	if (NULL == prev) {
		lock_lookup_list = curr->next;
	} else {
		MALI_DEBUG_ASSERT_POINTER(curr);
		MALI_DEBUG_ASSERT_POINTER(prev);
		prev->next = curr->next;
	}

	lock->next = NULL;

	if (len-1 != tracking_list_length()) {
		printk(KERN_ERR "************ lock: %p\n", lock);
		printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
		dump_lock_tracking_list();
		MALI_DEBUG_ASSERT_POINTER(NULL);
	}

	spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
}
void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success)
{
	u32 i;
	mali_bool job_is_done;

	MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) part %u/%u completed (%s)\n", mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job), success ? "success" : "failure"));

	mali_pp_scheduler_lock();

	/* Find slot which was running this job */
	for (i = 0; i < num_slots; i++)
	{
		if (slots[i].group == group)
		{
			MALI_DEBUG_ASSERT(MALI_PP_SLOT_STATE_WORKING == slots[i].state);
			slots[i].state = MALI_PP_SLOT_STATE_IDLE;
			slots[i].session = NULL;
			num_slots_idle++;
			mali_pp_job_mark_sub_job_completed(job, success);
		}
	}

	/* If paused, then this was the last job, so wake up sleeping workers */
	if (pause_count > 0)
	{
		/* Wake up sleeping workers. Their wake-up condition is that
		 * num_slots == num_slots_idle, so unless we are done working, no
		 * threads will actually be woken up.
		 */
		_mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
	}
	else
	{
		mali_pp_scheduler_schedule();
	}

	job_is_done = mali_pp_job_is_complete(job);

	mali_pp_scheduler_unlock();

	if (job_is_done)
	{
		/* Send notification back to user space */
		MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for job %u (0x%08X)\n", mali_pp_job_get_id(job), job));
		mali_pp_scheduler_return_job_to_user(job);
	}
}
mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job)
{
	mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;

	MALI_DEBUG_ASSERT_POINTER(job);
	MALI_DEBUG_ASSERT_POINTER(job->system);
	MALI_DEBUG_ASSERT_POINTER(job->system->session);

	MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job));

	mali_soft_job_system_lock(job->system);

	if (unlikely(job->system->session->is_aborting)) {
		MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job));

		mali_soft_job_system_unlock(job->system);

		/* Since we are in shutdown, we can ignore the scheduling bitmask. */
		mali_timeline_tracker_release(&job->tracker);
		mali_soft_job_destroy(job);
		return schedule_mask;
	}

	/* Send activated notification. */
	mali_soft_job_send_activated_notification(job);

	/* Wake up sleeping signaler. */
	job->activated = MALI_TRUE;

	/* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
	if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
		MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);

		job->state = MALI_SOFT_JOB_STATE_SIGNALED;
		mali_soft_job_system_unlock(job->system);

		schedule_mask |= mali_timeline_tracker_release(&job->tracker);

		mali_soft_job_destroy(job);
	} else {
		_mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);

		mali_soft_job_system_unlock(job->system);
	}

	return schedule_mask;
}
示例#16
0
_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
{
	
#if USING_MALI_PMM
	u32 stat;
	u32 timeout;
	u32 cores_pmu;
	
	MALI_DEBUG_ASSERT_POINTER(pmu_info);
	MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
	MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power down (0x%x)\n", cores) );

	
	cores_pmu = pmu_translate_cores_to_pmu(cores);
	pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_DOWN, cores_pmu );

	/* Wait for cores to be powered down */
	timeout = 10; /* 10ms */ 
	do
	{
		/* Get status of sleeping cores */
		stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
		stat &= cores_pmu;
		if( stat == cores_pmu ) break; /* All cores we wanted are now asleep */
		_mali_osk_time_ubusydelay(1000); /* 1ms */
		timeout--;
	} while( timeout > 0 );

	if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
	
	/*close mali axi/apb clock*/
	if(mali_clk_flag == 1)
	{
		//MALI_PRINT(("disable mali clock\n"));
		mali_clk_flag = 0;
	       clk_disable(h_mali_clk);
	       clk_disable(h_ahb_mali);
	}
	
	MALI_SUCCESS;

#else
	/* Nothing to do when not using PMM */
	MALI_SUCCESS;
#endif
}
示例#17
0
void _mali_pmm_pmu_power_up( platform_pmu_t *pmu, u32 cores, mali_bool immediate_only )
{
	u32 cores_pmu;
	u32 stat;
	u32 timeout;

	MALI_DEBUG_ASSERT_POINTER( pmu );
	MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
	MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: test power up (0x%x)\n", cores) );

	/* use interrupts */
	pmu_reg_write( pmu, (u32)PMU_REG_ADDR_MGMT_INT_MASK, 1 );
	pmu_reg_write( pmu, (u32)PMU_REG_ADDR_MGMT_INT_CLEAR, 0 );

	cores_pmu = pmu_translate_cores_to_pmu(cores);
	pmu_reg_write( pmu, (u32)PMU_REG_ADDR_MGMT_POWER_UP, cores_pmu );
}
示例#18
0
void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
{
	u32 startcmd = 0;
	u32 *frame_registers = mali_gp_job_get_frame_registers(job);

	core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
	core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);

	MALI_DEBUG_ASSERT_POINTER(core);

	if (mali_gp_job_has_vs_job(job))
	{
		startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
	}

	if (mali_gp_job_has_plbu_job(job))
	{
		startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
	}

	MALI_DEBUG_ASSERT(0 != startcmd);

	mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);

	if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
	{
		mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
		mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
	}
	if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
	{
		mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
		mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
	}

	MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));

	/* Barrier to make sure the previous register write is finished */
	_mali_osk_write_mem_barrier();

	/* This is the command that starts the core. */
	mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);

	/* Barrier to make sure the previous register write is finished */
	_mali_osk_write_mem_barrier();
}
void mali_mem_external_release(mali_mem_backend *mem_backend)
{
	mali_mem_allocation *alloc;
	struct mali_session_data *session;
	MALI_DEBUG_ASSERT_POINTER(mem_backend);
	alloc = mem_backend->mali_allocation;
	MALI_DEBUG_ASSERT_POINTER(alloc);
	MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == mem_backend->type);

	session = alloc->session;
	MALI_DEBUG_ASSERT_POINTER(session);
	mali_session_memory_lock(session);
	mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
			       alloc->flags);
	session->mali_mem_array[mem_backend->type] -= mem_backend->size;
	mali_session_memory_unlock(session);
}
_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
{
	_mali_osk_errcode_t err;

	MALI_DEBUG_ASSERT_POINTER(pmu);
	MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);

	mali_pmu_lock(pmu);

	/* Setup the desired defaults in case we were called before mali_pmu_reset() */
	mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
	mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);

	err = mali_pmu_power_up_internal(pmu, pmu->active_cores_mask);

	mali_pmu_unlock(pmu);
	return err;
}
示例#21
0
mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
{
	mali_pmm_core_mask cores_subset;
	MALI_DEBUG_ASSERT_POINTER(pmm);
	MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );

	/* Check that cores aren't pending power down when asked for power up */
	MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );

	cores_subset = (~(pmm->cores_powered) & cores);
	if( cores_subset != 0 )
	{
		/* There are some cores that need powering up */
		pmm->cores_pend_up = cores_subset;
	}

	return cores_subset;
}
void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
{
	MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);

	if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) {
		/* increment count since existing counter was disabled */
		_mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
	}

	if (MALI_HW_CORE_NO_COUNTER == counter) {
		/* decrement count since new counter is disabled */
		_mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
	}

	/* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */

	pp_counter_per_sub_job_src1[sub_job] = counter;
}
示例#23
0
_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
{
	struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	u32 vaddr = args->gpu_vaddr;
	mali_mem_allocation *mali_alloc = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	/* find mali allocation structure by vaddress*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);

	MALI_DEBUG_ASSERT(NULL != mali_vma_node);
	mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);

	if (mali_alloc)
		/* check ref_count */
		mali_allocation_unref(&mali_alloc);

	return _MALI_OSK_ERR_OK;
}
void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker)
{
	mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;

	MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker);
	MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag);

	MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n"));

	/* Signal flag and release reference. */
	mali_sync_flag_signal(sync_fence_tracker->flag, 0);
	mali_sync_flag_put(sync_fence_tracker->flag);

	/* Nothing can wait on this tracker, so nothing to schedule after release. */
	schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker);
	MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);

	_mali_osk_free(sync_fence_tracker);
}
示例#25
0
/**
 * Returns a physical job if a physical job is ready to run (no barrier present)
 */
MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
{
	MALI_ASSERT_PP_SCHEDULER_LOCKED();

	if (!_mali_osk_list_empty(&job_queue))
	{
		struct mali_pp_job *job;

		MALI_DEBUG_ASSERT(job_queue_depth > 0);
		job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_pp_job, list);

		if (!mali_pp_job_has_active_barrier(job))
		{
			return job;
		}
	}

	return NULL;
}
mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
{
	struct mali_vma_node *mali_vma_node = NULL;
	mali_mem_backend *mem_bkend = NULL;
	mali_mem_allocation *mali_alloc = NULL;
	MALI_DEBUG_ASSERT_POINTER(session);
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
	if (NULL == mali_vma_node)  {
		MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
		return NULL;
	}
	mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
	/* Get backend memory & Map on CPU */
	mutex_lock(&mali_idr_mutex);
	mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
	mutex_unlock(&mali_idr_mutex);
	MALI_DEBUG_ASSERT(NULL != mem_bkend);
	return mem_bkend;
}
static void do_scaling(struct work_struct *work)
{
	mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table;
	int err = mali_perf_set_num_pp_cores(num_cores_enabled);
	scalingdbg(1, "set pp cores to %d\n", num_cores_enabled);
	MALI_DEBUG_ASSERT(0 == err);
	MALI_IGNORE(err);
	scalingdbg(1, "pdvfs[%d].freq_index=%d, pdvfs[%d].freq_index=%d\n",
			currentStep, pdvfs[currentStep].freq_index,
			lastStep, pdvfs[lastStep].freq_index);
	mali_clk_exected();
#ifdef CONFIG_MALI400_PROFILING
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
			MALI_PROFILING_EVENT_CHANNEL_GPU |
			MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
			get_current_frequency(),
			0,	0,	0,	0);
#endif
}
示例#28
0
void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id )
{
	/* Check this is the right system */
	MALI_DEBUG_ASSERT( id == mali_subsystem_pmm_id );
	MALI_DEBUG_ASSERT_POINTER(pmm_state);

	if( pmm_state )
	{
#if PMM_OS_TEST
		power_test_end();
#endif
		/* Get the lock so we can shutdown */
		MALI_PMM_LOCK(pmm_state);
#if MALI_STATE_TRACKING
		pmm_state->mali_pmm_lock_acquired = 1;
#endif /* MALI_STATE_TRACKING */
		pmm_state->status = MALI_PMM_STATUS_OFF;
#if MALI_STATE_TRACKING
		pmm_state->mali_pmm_lock_acquired = 0;
#endif /* MALI_STATE_TRACKING */
		MALI_PMM_UNLOCK(pmm_state);
		_mali_osk_pmm_ospmm_cleanup();
		pmm_policy_term(pmm_state);
		_mali_osk_irq_term( pmm_state->irq );
		_mali_osk_notification_queue_term( pmm_state->queue );
		_mali_osk_notification_queue_term( pmm_state->iqueue );
		if (pmm_state->cores_registered) malipmm_powerdown(pmm_state->cores_registered,MALI_POWER_MODE_LIGHT_SLEEP);
#if USING_MALI_PMU
		if( pmm_state->pmu_initialized )
		{
			_mali_osk_resource_type_t t = PMU;
			mali_pmm_pmu_deinit(&t);
		}
#endif /* USING_MALI_PMU */

		_mali_osk_atomic_term( &(pmm_state->messages_queued) );
		MALI_PMM_LOCK_TERM(pmm_state);
		_mali_osk_free(pmm_state);
		pmm_state = NULL; 
	}

	MALIPMM_DEBUG_PRINT( ("PMM: subsystem terminated\n") );
}
示例#29
0
_mali_osk_errcode_t _mali_osk_wq_init(void)
{
#if MALI_LICENSE_IS_GPL
	MALI_DEBUG_ASSERT(NULL == mali_wq);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
	mali_wq = alloc_workqueue("mali", WQ_UNBOUND, 0);
#else
	mali_wq = create_workqueue("mali");
#endif
	if(NULL == mali_wq)
	{
		MALI_PRINT_ERROR(("Unable to create Mali workqueue\n"));
		return _MALI_OSK_ERR_FAULT;
	}
#endif

	return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
{
	struct mali_session_data *session;
	enum mali_group_activate_pd_status activate_status;

	MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);

	mali_pm_core_event(MALI_CORE_EVENT_PP_START);

	session = mali_pp_job_get_session(job);

	mali_group_lock(group);

	mali_cluster_l2_cache_invalidate_all(group->cluster, mali_pp_job_get_id(job));

	activate_status = mali_group_activate_page_directory(group, session);
	if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
	{
		/* if session is NOT kept Zapping is done as part of session switch */
		if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
		{
			MALI_DEBUG_PRINT(3, ("PP starting job PD_Switch 0 Flush 1 Zap 1\n"));
			mali_mmu_zap_tlb_without_stall(group->mmu);
		}
		mali_pp_job_start(group->pp_core, job, sub_job);
		group->pp_running_job = job;
		group->pp_running_sub_job = sub_job;
		group->pp_state = MALI_GROUP_CORE_STATE_WORKING;

		mali_group_unlock(group);

		return _MALI_OSK_ERR_OK;
	}

#if defined(USING_MALI200)
	group->pagedir_activation_failed = MALI_TRUE;
#endif

	mali_group_unlock(group);

	mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* Failed to start, so "cancel" the MALI_CORE_EVENT_PP_START */
	return _MALI_OSK_ERR_FAULT;
}