void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
{
	u32 startcmd = 0;
	u32 *frame_registers = mali_gp_job_get_frame_registers(job);
	core->counter_src0_used = core->counter_src0;
	core->counter_src1_used = core->counter_src1;

	MALI_DEBUG_ASSERT_POINTER(core);
	MALI_ASSERT_GROUP_LOCKED(core->group);

	if (mali_gp_job_has_vs_job(job))
	{
		startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
	}

	if (mali_gp_job_has_plbu_job(job))
	{
		startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
	}

	MALI_DEBUG_ASSERT(0 != startcmd);

	mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);

	/* This selects which performance counters we are reading */
	if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
	{
		/* global_config has enabled HW counters, this will override anything specified by user space */
		if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
		{
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
		}
		if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
		{
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
		}
	}
	else
	{
		/* Use HW counters from job object, if any */
		u32 perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
		if (0 != perf_counter_flag)
		{
			if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
			{
				core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
			}

			if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
			{
				core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
			}
		}
	}

	MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));

	/* Barrier to make sure the previous register write is finished */
	_mali_osk_write_mem_barrier();

	/* This is the command that starts the core. */
	mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);

	/* Barrier to make sure the previous register write is finished */
	_mali_osk_write_mem_barrier();

	/* Setup the timeout timer value and save the job id for the job running on the gp core */

	_mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
	core->timeout_job_id = mali_gp_job_get_id(job);

#if MALI_TIMELINE_PROFILING_ENABLED
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
	                          job->frame_builder_id, job->flush_id, 0, 0, 0);
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), job->pid, job->tid, 0, 0, 0);
#endif

	core->running_job = job;
}
Exemple #2
0
void mali_pp_stop_bus(struct mali_pp_core *core)
{
    MALI_DEBUG_ASSERT_POINTER(core);

    mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
}
Exemple #3
0
void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
{
    u32 *frame_registers = mali_pp_job_get_frame_registers(job);
    u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
    u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
    u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
    core->counter_src0_used = core->counter_src0;
    core->counter_src1_used = core->counter_src1;

    MALI_DEBUG_ASSERT_POINTER(core);
    MALI_ASSERT_GROUP_LOCKED(core->group);

    mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, frame_registers, MALI200_NUM_REGS_FRAME);
    if (0 != sub_job)
    {
        mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job));
        mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job));
    }

    if (wb0_registers[0])
    {
        mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, MALI200_NUM_REGS_WBx);
    }

    if (wb1_registers[0])
    {
        mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, MALI200_NUM_REGS_WBx);
    }

    if (wb2_registers[0])
    {
        mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, MALI200_NUM_REGS_WBx);
    }


    if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
    {

        if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
        {
            mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
            mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
        }
        if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
        {
            mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
            mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
        }
    }
    else
    {

        u32 perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
        if (0 != perf_counter_flag)
        {
            if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
            {
                core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job);
                mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
                mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
            }

            if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
            {
                core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job);
                mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
                mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
            }
        }
    }

    MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));


    _mali_osk_write_mem_barrier();


    mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);


    _mali_osk_write_mem_barrier();


    _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
    core->timeout_job_id = mali_pp_job_get_id(job);

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, job->frame_builder_id, job->flush_id, 0, 0, 0);
    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id), job->pid, job->tid, 0, 0, 0);
#endif

    core->running_job = job;
    core->running_sub_job = sub_job;
}
void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm )
{
	int n;
	mali_pmm_core_mask pd, ad;
	_mali_osk_errcode_t err;
	volatile mali_pmm_core_mask *pregistered;

	MALI_DEBUG_ASSERT_POINTER(pmm);

	MALIPMM_DEBUG_PRINT( ("PMM: Cancelling power down\n") );

	pd = pmm->cores_pend_down;
	ad = pmm->cores_ack_down;
	/* Clear the pending cores so that they don't move to the off
	 * queue if they haven't already
	 */
	pmm->cores_pend_down = 0;
	pmm->cores_ack_down = 0;
	pregistered = &(pmm->cores_registered);

	/* Power up all the pending power down cores - just so
	 * we make sure the system is in a known state, as a
	 * pending core might have sent an acknowledged message
	 * which hasn't been read yet.
	 */
	for( n = 0; n < SIZEOF_CORES_LIST; n++ )
	{
		if( (cores_list[n] & pd) != 0 )
		{
			/* Can't hold the power lock, when acessing subsystem mutex via
			 * the core power call.
			 * Due to terminatation of driver requiring a subsystem mutex
			 * and then power lock held to unregister a core.
			 * This does mean that the following power up function could fail
			 * as the core is unregistered before we tell it to power
			 * up, but it does not matter as we are terminating
			 */
#if MALI_STATE_TRACKING
			pmm->mali_pmm_lock_acquired = 0;
#endif /* MALI_STATE_TRACKING */

			MALI_PMM_UNLOCK(pmm);
			/* As we are cancelling - only move the cores back to the queue - 
			 * no reset needed
			 */
			err = mali_core_signal_power_up( cores_list[n], MALI_TRUE );
			MALI_PMM_LOCK(pmm);
#if MALI_STATE_TRACKING
			pmm->mali_pmm_lock_acquired = 1;
#endif /* MALI_STATE_TRACKING */

			/* Update pending list with the current registered cores */
			pd &= (*pregistered);

			if( err != _MALI_OSK_ERR_OK )
			{
				MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_BUSY && 
										((cores_list[n] & ad) == 0))  ||
										(err == _MALI_OSK_ERR_FAULT &&
										(*pregistered & cores_list[n]) == 0) );
				/* If we didn't power up a core - it must be active and 
				 * hasn't actually tried to power down - this is expected
				 * for cores that haven't acknowledged
				 * Alternatively we are shutting down and the core has
				 * been unregistered
				 */
			}
		}
	}
	/* Only used in debug builds */
	MALI_IGNORE(ad);
}
mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm )
{
	_mali_osk_errcode_t err;

	MALI_DEBUG_ASSERT_POINTER(pmm);

	/* Check that cores are pending power up during power up invoke */
	MALI_DEBUG_ASSERT( pmm->cores_pend_up != 0 );
	/* Check that cores are not pending power down during power up invoke */
	MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );

	if( pmm_power_up_okay( pmm ) )
	{
		/* Power up has completed - sort out subsystem core status */
		
		int n;
		/* Use volatile to access, so that it is updated if any cores are unregistered */
		volatile mali_pmm_core_mask *ppendup = &(pmm->cores_pend_up);
#if MALI_PMM_TRACE
		mali_pmm_core_mask old_power = pmm->cores_powered;
#endif
		/* Move cores into idle queues */
		for( n = 0; n < SIZEOF_CORES_LIST; n++ )
		{
			if( (cores_list[n] & (*ppendup)) != 0 )
			{
				/* Can't hold the power lock, when acessing subsystem mutex via
				 * the core power call.
				 * Due to terminatation of driver requiring a subsystem mutex
				 * and then power lock held to unregister a core.
				 * This does mean that the following function could fail
				 * as the core is unregistered before we tell it to power
				 * up, but it does not matter as we are terminating
				 */
#if MALI_STATE_TRACKING
				pmm->mali_pmm_lock_acquired = 0;
#endif /* MALI_STATE_TRACKING */

				MALI_PMM_UNLOCK(pmm);
				err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
				MALI_PMM_LOCK(pmm);

#if MALI_STATE_TRACKING
				pmm->mali_pmm_lock_acquired = 1;
#endif /* MALI_STATE_TRACKING */


				if( err != _MALI_OSK_ERR_OK )
				{
					MALI_DEBUG_PRINT(1,("In pmm_invoke_power_up:: The error and pending cores to be powered up are...%x...%x",err,*ppendup));
					MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_FAULT &&
										(*ppendup & cores_list[n]) == 0) );
					/* We only expect this to fail when we are shutting down 
					 * and the core has been unregistered
					 */
				}
			}
		}
		/* Finished power up - add cores to idle and powered list */
		pmm->cores_powered |= (*ppendup);
		pmm->cores_idle |= (*ppendup);
		/* Reset pending/acknowledge status */
		pmm->cores_pend_up = 0;
		pmm->cores_ack_up = 0;

#if MALI_PMM_TRACE
		_mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
#endif
		return MALI_TRUE;
	}
	else
	{
#if !MALI_PMM_NO_PMU
		/* Power up must now be done */
		err = malipmm_powerup( pmm->cores_pend_up );
#else
		err = _MALI_OSK_ERR_OK;
#endif
		if( err != _MALI_OSK_ERR_OK )
		{
			MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power up cores - (0x%x) %s", 
					pmm->cores_pend_up, pmm_trace_get_core_name(pmm->cores_pend_up)) );
			pmm->fatal_power_err = MALI_TRUE;
		}
		else
		{
			/* TBD - Update core status immediately rather than use event message */
			_mali_uk_pmm_message_s event = {
				NULL,
				MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK,
				0 };
			/* All the cores that were pending power up, have now completed power up */
			event.data = pmm->cores_pend_up;
			_mali_ukk_pmm_event_message( &event );
			MALIPMM_DEBUG_PRINT( ("PMM: Sending ACK to power up") );
		}
	}

	/* Always return false, as we need an interrupt to acknowledge
	 * when power up is complete
	 */
	return MALI_FALSE;
}
Exemple #6
0
void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
{
	u32 num_frame_registers;
	u32 relative_address;
	u32 start_index;
	u32 nr_of_regs;
	u32 *frame_registers = mali_pp_job_get_frame_registers(job);
	u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
	u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
	u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
	core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job);
	core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job);

	MALI_DEBUG_ASSERT_POINTER(core);

	/* Write frame registers */
	num_frame_registers = (_MALI_PRODUCT_ID_MALI200 == mali_kernel_core_get_product_id()) ? MALI_PP_MALI200_NUM_FRAME_REGISTERS : MALI_PP_MALI400_NUM_FRAME_REGISTERS;

	/*
	 * There are two frame registers which are different for each sub job:
	 * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
	 * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
	 */
	mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);

	/* For virtual jobs, the stack address shouldn't be broadcast but written individually */
	if (!mali_pp_job_is_virtual(job) || restart_virtual)
	{
		mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
	}

	/* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
	relative_address = MALI200_REG_ADDR_RSW;
	start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
	nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);

	mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
	        relative_address, &frame_registers[start_index],
	        nr_of_regs, &mali_frame_registers_reset_values[start_index]);

	/* MALI200_REG_ADDR_STACK_SIZE */
	relative_address = MALI200_REG_ADDR_STACK_SIZE;
	start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);

	mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
	        relative_address, frame_registers[start_index],
	        mali_frame_registers_reset_values[start_index]);

	/* Skip 2 reserved registers */

	/* Write remaining registers */
	relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
	start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
	nr_of_regs = num_frame_registers - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);

	mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
	        relative_address, &frame_registers[start_index],
	        nr_of_regs, &mali_frame_registers_reset_values[start_index]);

	/* Write WBx registers */
	if (wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
	{
		mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
	}

	if (wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
	{
		mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
	}

	if (wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
	{
		mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
	}

	if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
	{
		mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
		mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
	}
	if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
	{
		mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
		mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
	}

	MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));

	/* Adding barrier to make sure all rester writes are finished */
	_mali_osk_write_mem_barrier();

	/* This is the command that starts the core. */
	mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);

	/* Adding barrier to make sure previous rester writes is finished */
	_mali_osk_write_mem_barrier();
}
_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
{
    memory_engine * engine = (memory_engine*)mem_engine;

    MALI_DEBUG_ASSERT_POINTER(engine);
    MALI_DEBUG_ASSERT_POINTER(descriptor);
    MALI_DEBUG_ASSERT_POINTER(physical_allocators);
    /* ASSERT that the list member has been initialized, even if it won't be
     * used for tracking. We need it to be initialized to see if we need to
     * delete it from a list in the release function. */
    MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );

    if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
    {
        _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
        if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
        {
            res = engine->process_address->allocate(descriptor);
        }
        if ( _MALI_OSK_ERR_OK == res )
        {
            /* address space setup OK, commit physical memory to the allocation */
            mali_physical_memory_allocator * active_allocator = physical_allocators;
            struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
            u32 offset = 0;

            while ( NULL != active_allocator )
            {
                switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
                {
                case MALI_MEM_ALLOC_FINISHED:
                    if ( NULL != tracking_list )
                    {
                        /* Insert into the memory session list */
                        /* ASSERT that it is not already part of a list */
                        MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
                        _mali_osk_list_add( &descriptor->list, tracking_list );
                    }

                    MALI_SUCCESS; /* all done */
                case MALI_MEM_ALLOC_NONE:
                    /* reuse current active_allocation_tracker */
                    MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
                                          ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
                                          ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
                    active_allocator = active_allocator->next;
                    break;
                case MALI_MEM_ALLOC_PARTIAL:
                    if (NULL != active_allocator->next)
                    {
                        /* need a new allocation tracker */
                        active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
                        if (NULL != active_allocation_tracker->next)
                        {
                            active_allocation_tracker = active_allocation_tracker->next;
                            MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
                                                  ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
                                                  ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
                            active_allocator = active_allocator->next;
                            break;
                        }
                    }
                /* FALL THROUGH */
                case MALI_MEM_ALLOC_INTERNAL_FAILURE:
                    active_allocator = NULL; /* end the while loop */
                    break;
                }
            }

            MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));

            /* allocation failure, start cleanup */
            /* loop over any potential partial allocations */
            active_allocation_tracker = &descriptor->physical_allocation;
            while (NULL != active_allocation_tracker)
            {
                /* handle blank trackers which will show up during failure */
                if (NULL != active_allocation_tracker->release)
                {
                    active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
                }
                active_allocation_tracker = active_allocation_tracker->next;
            }

            /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
            for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
            {
                void * buf = active_allocation_tracker;
                active_allocation_tracker = active_allocation_tracker->next;
                _mali_osk_free(buf);
            }

            /* release the address spaces */

            if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
            {
                engine->process_address->release(descriptor);
            }
        }
        engine->mali_address->release(descriptor);
    }

    MALI_ERROR(_MALI_OSK_ERR_FAULT);
}
u32 mali_pp_core_get_id(struct mali_pp_core *core)
{
	MALI_DEBUG_ASSERT_POINTER(core);
	return core->core_id;
}
u32 mali_pp_core_get_counter_src1(struct mali_pp_core *core)
{
	MALI_DEBUG_ASSERT_POINTER(core);
	return core->counter_src1;
}
_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
{
	int i;
	const int request_loop_count = 20;

	MALI_DEBUG_ASSERT_POINTER(core);
	MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
	MALI_ASSERT_GROUP_LOCKED(core->group);

	mali_pp_post_process_job(core); /* @@@@ is there some cases where it is unsafe to post process the job here? */

	mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */

#if defined(USING_MALI200)

	/* On Mali-200, stop the  bus, then do a hard reset of the core */

	mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);

	for (i = 0; i < request_loop_count; i++)
	{
		if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
		{
			break;
		}
		_mali_osk_time_ubusydelay(10);
	}

	if (request_loop_count == i)
	{
		MALI_PRINT_ERROR(("Mali PP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description));
		return _MALI_OSK_ERR_FAULT ;
	}

	/* the bus was stopped OK, do the hard reset */
	mali_pp_hard_reset(core);

#elif defined(USING_MALI400)

	/* Mali-300 and Mali-400 have a safe reset command which we use */

	mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
	mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);

	for (i = 0; i < request_loop_count; i++)
	{
		if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED)
		{
			break;
		}
		_mali_osk_time_ubusydelay(10);
	}

	if (request_loop_count == i)
	{
		MALI_DEBUG_PRINT(2, ("Mali PP: Failed to reset core %s, Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
		return _MALI_OSK_ERR_FAULT;
	}
#else
#error "no supported mali core defined"
#endif

	/* Re-enable interrupts */
	mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
	mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);

	return _MALI_OSK_ERR_OK;
}
void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
{
	u32 *frame_registers = mali_pp_job_get_frame_registers(job);
	u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
	u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
	u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
	core->counter_src0_used = core->counter_src0;
	core->counter_src1_used = core->counter_src1;

	MALI_DEBUG_ASSERT_POINTER(core);
	MALI_ASSERT_GROUP_LOCKED(core->group);

	mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, frame_registers, MALI200_NUM_REGS_FRAME);
	if (0 != sub_job)
	{
		/*
		 * There are two frame registers which are different for each sub job.
		 * For the first sub job, these are correctly represented in the frame register array,
		 * but we need to patch these for all other sub jobs
		 */
		mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job));
		mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job));
	}

	if (wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
	{
		mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, MALI200_NUM_REGS_WBx);
	}

	if (wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
	{
		mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, MALI200_NUM_REGS_WBx);
	}

	if (wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
	{
		mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, MALI200_NUM_REGS_WBx);
	}

	/* This selects which performance counters we are reading */
	if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
	{
		/* global_config has enabled HW counters, this will override anything specified by user space */
		if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
		{
			mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
			mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
		}
		if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
		{
			mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
			mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
		}
	}
	else
	{
		/* Use HW counters from job object, if any */
		u32 perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
		if (0 != perf_counter_flag)
		{
			if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
			{
				core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job);
				mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
				mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
			}

			if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
			{
				core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job);
				mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
				mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
			}
		}
	}

	MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));

	/* Adding barrier to make sure all rester writes are finished */
	_mali_osk_write_mem_barrier();

	/* This is the command that starts the core. */
	mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);

	/* Adding barrier to make sure previous rester writes is finished */
	_mali_osk_write_mem_barrier();

	/* Setup the timeout timer value and save the job id for the job running on the pp core */
	_mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
	core->timeout_job_id = mali_pp_job_get_id(job);

#if MALI_TIMELINE_PROFILING_ENABLED
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, job->frame_builder_id, job->flush_id, 0, 0, 0);
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id), job->pid, job->tid, 0, 0, 0);
#endif

	core->running_job = job;
	core->running_sub_job = sub_job;
}
MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system)
{
	MALI_DEBUG_ASSERT_POINTER(system);
	MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
}
_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
{
	_mali_core_info * current_core;
	_mali_mem_info * current_mem;
	_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
	void * current_write_pos, ** current_patch_pos;
    u32 adjust_ptr_base;

	/* check input */
	MALI_DEBUG_ASSERT_POINTER(args);
    MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
    MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);

	/* lock the system info */
	_mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );

	/* first check size */
	if (args->size < system_info_size) goto exit_when_locked;

	/* we build a copy of system_info in the user space buffer specified by the user and
     * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
     * indicate a different base address for patching the pointers (normally the
     * address of the provided system_info buffer would be used). This is helpful when
     * the system_info buffer needs to get copied to user space and the pointers need
     * to be in user space.
     */
    if (0 == args->ukk_private)
    {
        adjust_ptr_base = (u32)args->system_info;
    }
    else
    {
        adjust_ptr_base = args->ukk_private;
    }

	/* copy each struct into the buffer, and update its pointers */
	current_write_pos = (void *)args->system_info;

	/* first, the master struct */
	_mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));

	/* advance write pointer */
	current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));

	/* first we write the core info structs, patch starts at master's core_info pointer */
	current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));

	for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
	{

		/* patch the pointer pointing to this core */
		*current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));

		/* copy the core info */
		_mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));

		/* update patch pos */
		current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));

		/* advance write pos in memory */
		current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
	}
	/* patching of last patch pos is not needed, since we wrote NULL there in the first place */

	/* then we write the mem info structs, patch starts at master's mem_info pointer */
	current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));

	for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
	{
		/* patch the pointer pointing to this core */
		*current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));

		/* copy the core info */
		_mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));

		/* update patch pos */
		current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));

		/* advance write pos in memory */
		current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
	}
	/* patching of last patch pos is not needed, since we wrote NULL there in the first place */

	err = _MALI_OSK_ERR_OK;
exit_when_locked:
	_mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
    MALI_ERROR(err);
}
_mali_osk_errcode_t _mali_ukk_get_system_info_size(_mali_uk_get_system_info_size_s *args)
{
    MALI_DEBUG_ASSERT_POINTER(args);
    args->size = system_info_size;
    MALI_SUCCESS;
}
void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
{
	MALI_DEBUG_ASSERT_POINTER(bcast_unit);
	mali_hw_core_delete(&bcast_unit->hw_core);
	_mali_osk_free(bcast_unit);
}
void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
{
    MALI_DEBUG_ASSERT_POINTER(dlbu);
    mali_hw_core_delete(&dlbu->hw_core);
    _mali_osk_free(dlbu);
}
Exemple #17
0
void mali_pp_stop_bus(struct mali_pp_core *core)
{
	MALI_DEBUG_ASSERT_POINTER(core);
	/* Will only send the stop bus command, and not wait for it to complete */
	mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
}
void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
{
    MALI_DEBUG_ASSERT_POINTER(dlbu);

    mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
}
void mali_allocation_engine_destroy(mali_allocation_engine engine)
{
    MALI_DEBUG_ASSERT_POINTER(engine);
    _mali_osk_free(engine);
}
Exemple #20
0
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
{
	struct mali_pp_job *job;
	u32 perf_counter_flag;

	job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
	if (NULL != job) {
		if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
			goto fail;
		}

		if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
			MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
			goto fail;
		}

		if (!mali_pp_job_use_no_notification(job)) {
			job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
			if (NULL == job->finished_notification) goto fail;
		}

		perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);

		/* case when no counters came from user space
		 * so pass the debugfs / DS-5 provided global ones to the job object */
		if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
		      (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
			u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);

			/* These counters apply for all virtual jobs, and where no per sub job counter is specified */
			job->uargs.perf_counter_src0 = pp_counter_src0;
			job->uargs.perf_counter_src1 = pp_counter_src1;

			/* We only copy the per sub job array if it is enabled with at least one counter */
			if (0 < sub_job_count) {
				job->perf_counter_per_sub_job_count = sub_job_count;
				_mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
				_mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
			}
		}

		_mali_osk_list_init(&job->list);
		job->session = session;
		_mali_osk_list_init(&job->session_list);
		job->id = id;

		job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
		job->pid = _mali_osk_get_pid();
		job->tid = _mali_osk_get_tid();

		job->num_memory_cookies = job->uargs.num_memory_cookies;
		if (job->num_memory_cookies > 0) {
			u32 size;

			if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
				MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
				goto fail;
			}

			size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;

			job->memory_cookies = _mali_osk_malloc(size);
			if (NULL == job->memory_cookies) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
				goto fail;
			}

			if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
				goto fail;
			}

#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
			job->num_dma_bufs = job->num_memory_cookies;
			job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
			if (NULL == job->dma_bufs) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
				goto fail;
			}
#endif
		}

		/* Prepare DMA command buffer to start job, if it is virtual. */
		if (mali_pp_job_is_virtual(job)) {
			struct mali_pp_core *core;
			_mali_osk_errcode_t err =  mali_dma_get_cmd_buf(&job->dma_cmd_buf);

			if (_MALI_OSK_ERR_OK != err) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
				goto fail;
			}

			core = mali_pp_scheduler_get_virtual_pp();
			MALI_DEBUG_ASSERT_POINTER(core);

			mali_pp_job_dma_cmd_prepare(core, job, 0, MALI_FALSE, &job->dma_cmd_buf);
		}

		if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
			/* Not a valid job. */
			goto fail;
		}

		mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
		mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));

		return job;
	}

fail:
	if (NULL != job) {
		mali_pp_job_delete(job);
	}

	return NULL;
}
mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only )
{
	mali_pmm_core_mask cores_subset;
	_mali_osk_errcode_t err;
	MALI_DEBUG_ASSERT_POINTER(pmm);
	MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );

	/* Check that cores aren't pending power up when asked for power down */
	MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );

	cores_subset = (pmm->cores_powered & cores);
	if( cores_subset != 0 )
	{
		int n;
		volatile mali_pmm_core_mask *ppowered = &(pmm->cores_powered);

		/* There are some cores that need powering up, but we may
		 * need to wait until they are idle
		 */
		for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
		{
			if( (cores_list[n] & cores_subset) != 0 )
			{
				/* Core is to be powered down */
				pmm->cores_pend_down |= cores_list[n];

				/* Can't hold the power lock, when acessing subsystem mutex via
				 * the core power call.
				 * Due to terminatation of driver requiring a subsystem mutex
				 * and then power lock held to unregister a core.
				 * This does mean that the following function could fail
				 * as the core is unregistered before we tell it to power
				 * down, but it does not matter as we are terminating
				 */
#if MALI_STATE_TRACKING
                pmm->mali_pmm_lock_acquired = 0;
#endif /* MALI_STATE_TRACKING */

				MALI_PMM_UNLOCK(pmm);
				/* Signal the core to power down
				 * If it is busy (not idle) it will set a pending power down flag 
				 * (as long as we don't want to only immediately power down). 
				 * If it isn't busy it will move out of the idle queue right
				 * away
				 */
				err = mali_core_signal_power_down( cores_list[n], immediate_only );
				MALI_PMM_LOCK(pmm);

#if MALI_STATE_TRACKING
                pmm->mali_pmm_lock_acquired = 1;
#endif /* MALI_STATE_TRACKING */
			

				/* Re-read cores_subset in case it has changed */
				cores_subset = (*ppowered & cores);

				if( err == _MALI_OSK_ERR_OK )
				{
					/* We moved an idle core to the power down queue
					 * which means it is now acknowledged (if it is still 
					 * registered)
					 */
					pmm->cores_ack_down |= (cores_list[n] & cores_subset);
				}
				else
				{
					MALI_DEBUG_PRINT(1,("PMM: In pmm_cores_to_power_down, the error and cores powered are..%x....%x",err,*ppowered));
					MALI_DEBUG_ASSERT( err == _MALI_OSK_ERR_BUSY ||
										(err == _MALI_OSK_ERR_FAULT &&
										(*ppowered & cores_list[n]) == 0) );
					/* If we didn't move a core - it must be active, so
					 * leave it pending, so we get an acknowledgement (when
					 * not in immediate only mode)
					 * Alternatively we are shutting down and the core has
					 * been unregistered
					 */
				}
			}
		}
	}

	return cores_subset;
}
_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args)
{
	struct mali_session_data *session;
	mali_mem_allocation * descriptor;
	int md;
	_mali_osk_errcode_t err;

	MALI_DEBUG_ASSERT_POINTER(args);
	MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);

	session = (struct mali_session_data *)args->ctx;
	MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);

	/* check arguments */
	/* NULL might be a valid Mali address */
	if (! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	/* size must be a multiple of the system page size */
	if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	MALI_DEBUG_PRINT(3,
	                 ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
	                  (void*)args->phys_addr,
	                  (void*)(args->phys_addr + args->size -1),
	                  (void*)args->mali_address)
	                );

	/* Validate the mali physical range */
	if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
		return _MALI_OSK_ERR_FAULT;
	}

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_EXTERNAL);
	if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);

	descriptor->mali_mapping.addr = args->mali_address;
	descriptor->size = args->size;

	if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
		descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
	}

	_mali_osk_mutex_wait(session->memory_lock);
	{
		u32 virt = descriptor->mali_mapping.addr;
		u32 phys = args->phys_addr;
		u32 size = args->size;

		err = mali_mem_mali_map_prepare(descriptor);
		if (_MALI_OSK_ERR_OK != err) {
			_mali_osk_mutex_signal(session->memory_lock);
			mali_mem_descriptor_destroy(descriptor);
			return _MALI_OSK_ERR_NOMEM;
		}

		mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);

		if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
			mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
		}
	}
	_mali_osk_mutex_signal(session->memory_lock);

	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_external_release(descriptor);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		MALI_ERROR(_MALI_OSK_ERR_FAULT);
	}

	args->cookie = md;

	MALI_SUCCESS;
}
mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm )
{
	MALI_DEBUG_ASSERT_POINTER(pmm);

	return ( pmm->cores_pend_up == pmm->cores_ack_up ? MALI_TRUE : MALI_FALSE );
}
void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data )
{
	MALI_DEBUG_ASSERT_POINTER( queue );
	MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
	wait_event(queue->wait_queue, condition(data));
}
void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm )
{
	_mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
	_mali_osk_notification_t *msg = NULL;
	mali_pmm_status status;
	MALI_DEBUG_ASSERT_POINTER(pmm);
	MALIPMM_DEBUG_PRINT( ("PMM: Fatal Reset called") ); 

	MALI_DEBUG_ASSERT( pmm->status != MALI_PMM_STATUS_OFF );

	/* Reset the common status */
	pmm->waiting = 0;
	pmm->missed = 0;
	pmm->fatal_power_err = MALI_FALSE;
	pmm->no_events = 0;
	pmm->check_policy = MALI_FALSE;
	pmm->cores_pend_down = 0;
	pmm->cores_pend_up = 0;
	pmm->cores_ack_down = 0;
	pmm->cores_ack_up = 0;
	pmm->is_dvfs_active = 0; 
#if MALI_PMM_TRACE
	pmm->messages_sent = 0;
	pmm->messages_received = 0;
	pmm->imessages_sent = 0;
	pmm->imessages_received = 0;
	MALI_PRINT( ("PMM Trace: *** Fatal reset occurred ***") );
#endif

	/* Set that we are unavailable whilst resetting */
	pmm->state = MALI_PMM_STATE_UNAVAILABLE;
	status = pmm->status;
	pmm->status = MALI_PMM_STATUS_OFF;

	/* We want all cores powered */
	pmm->cores_powered = pmm->cores_registered;
	/* The cores may not be idle, but this state will be rectified later */
	pmm->cores_idle = pmm->cores_registered;
	
	/* So power on any cores that are registered */
	if( pmm->cores_registered != 0 )
	{
		int n;
		volatile mali_pmm_core_mask *pregistered = &(pmm->cores_registered);
#if !MALI_PMM_NO_PMU
		err = malipmm_powerup( pmm->cores_registered );
#endif
		if( err != _MALI_OSK_ERR_OK )
		{
			/* This is very bad as we can't even be certain the cores are now 
			 * powered up
			 */
			MALI_PRINT_ERROR( ("PMM: Failed to perform PMM reset!\n") );
			/* TBD driver exit? */
		}

		for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
		{
			if( (cores_list[n] & (*pregistered)) != 0 )
			{
#if MALI_STATE_TRACKING
				pmm->mali_pmm_lock_acquired = 0;
#endif /* MALI_STATE_TRACKING */

				MALI_PMM_UNLOCK(pmm);
				/* Core is now active - so try putting it in the idle queue */
				err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
				MALI_PMM_LOCK(pmm);
#if MALI_STATE_TRACKING
                pmm->mali_pmm_lock_acquired = 1;
#endif /* MALI_STATE_TRACKING */

				/* We either succeeded, or we were not off anyway, or we have
				 * just be deregistered 
				 */
				MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_OK) ||
									(err == _MALI_OSK_ERR_BUSY) ||
									(err == _MALI_OSK_ERR_FAULT && 
									(*pregistered & cores_list[n]) == 0) );
			}
		}
	}

	/* Unblock any pending OS event */
	if( status == MALI_PMM_STATUS_OS_POWER_UP )
	{
		/* Get the OS data and respond to the power up */
		_mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
	}
	if( status == MALI_PMM_STATUS_OS_POWER_DOWN )
	{
		/* Get the OS data and respond to the power down 
		 * NOTE: We are not powered down at this point due to power problems,
		 * so we are lying to the system, but something bad has already 
		 * happened and we are trying unstick things
		 * TBD - Add busy loop to power down cores?
		 */
		_mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
	}
		
	/* Purge the event queues */
	do
	{
		if( _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg ) == _MALI_OSK_ERR_OK )
		{
			_mali_osk_notification_delete ( msg );
			break;
		}
	} while (MALI_TRUE);

	do
	{
		if( _mali_osk_notification_queue_dequeue( pmm->queue, &msg ) == _MALI_OSK_ERR_OK )
		{
			_mali_osk_notification_delete ( msg );
			break;
		}
	} while (MALI_TRUE);

	/* Return status/state to normal */
	pmm->status = MALI_PMM_STATUS_IDLE;
	pmm_update_system_state(pmm);
}
void _mali_osk_wait_queue_wait_event_timeout( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data, u32 timeout )
{
	MALI_DEBUG_ASSERT_POINTER( queue );
	MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
	wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout));
}
Exemple #27
0
_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
{
    int i;
    const int request_loop_count = 20;

    MALI_DEBUG_ASSERT_POINTER(core);
    MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
    MALI_ASSERT_GROUP_LOCKED(core->group);

    mali_pp_post_process_job(core);

    mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0);

#if defined(USING_MALI200)



    mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);

    for (i = 0; i < request_loop_count; i++)
    {
        if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
        {
            break;
        }
        _mali_osk_time_ubusydelay(10);
    }

    if (request_loop_count == i)
    {
        MALI_PRINT_ERROR(("Mali PP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description));
        return _MALI_OSK_ERR_FAULT ;
    }


    mali_pp_hard_reset(core);

#elif defined(USING_MALI400)



    mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
    mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);

    for (i = 0; i < request_loop_count; i++)
    {
        if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED)
        {
            break;
        }
        _mali_osk_time_ubusydelay(10);
    }

    if (request_loop_count == i)
    {
        MALI_DEBUG_PRINT(2, ("Mali PP: Failed to reset core %s, Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
        return _MALI_OSK_ERR_FAULT;
    }
#else
#error "no supported mali core defined"
#endif


    mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
    mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);

    return _MALI_OSK_ERR_OK;
}
static void pmm_event_process( void )
{
	_mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
	_mali_osk_notification_t *msg = NULL;
	_mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
	mali_pmm_message_t *event;
	u32 process_messages;

	MALI_DEBUG_ASSERT_POINTER(pmm);


	/* Max number of messages to process before exiting - as we shouldn't stay
	 * processing the messages for a long time
	 */
	process_messages = _mali_osk_atomic_read( &(pmm->messages_queued) );

	while( process_messages > 0 )
	{
		/* Check internal message queue first */
		err = _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg );

		if( err != _MALI_OSK_ERR_OK )
		{
			if( pmm->status == MALI_PMM_STATUS_IDLE || pmm->status == MALI_PMM_STATUS_OS_WAITING || pmm->status == MALI_PMM_STATUS_DVFS_PAUSE) 	
			{
				if( pmm->waiting > 0 ) pmm->waiting--;

				/* We aren't busy changing state, so look at real events */
				err = _mali_osk_notification_queue_dequeue( pmm->queue, &msg );

				if( err != _MALI_OSK_ERR_OK )
				{
					pmm->no_events++;
					MALIPMM_DEBUG_PRINT( ("PMM: event_process - No message to process\n") );
					/* Nothing to do - so return */
					return;
				}
				else
				{
					#if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
						pmm->messages_received++;
					#endif
				}
			}
			else
			{
				/* Waiting for an internal message */
				pmm->waiting++;
				MALIPMM_DEBUG_PRINT( ("PMM: event_process - Waiting for internal message, messages queued=%d\n", pmm->waiting) );
				return;
			}
		}
		else
		{
			#if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
				pmm->imessages_received++;
			#endif
		}

		MALI_DEBUG_ASSERT_POINTER( msg );
		/* Check the message type matches */
		MALI_DEBUG_ASSERT( msg->notification_type == MALI_PMM_NOTIFICATION_TYPE );

		event = msg->result_buffer;

		_mali_osk_atomic_dec( &(pmm->messages_queued) );
		process_messages--;

		#if MALI_PMM_TRACE
			/* Trace before we process the event in case we have an error */
			_mali_pmm_trace_event_message( event, MALI_TRUE );
		#endif
		err = pmm_policy_process( pmm, event );

		
		if( err != _MALI_OSK_ERR_OK )
		{
			MALI_PRINT_ERROR( ("PMM: Error(%d) in policy %d when processing event message with id: %d", 
					err, pmm->policy, event->id) );
		}
		
		/* Delete notification */
		_mali_osk_notification_delete ( msg );

		if( pmm->fatal_power_err )
		{
			/* Nothing good has happened - exit */
			return;
		}

			
		#if MALI_PMM_TRACE
			MALI_PRINT( ("PMM Trace: Event processed, msgs (sent/read) = %d/%d, int msgs (sent/read) = %d/%d, no events = %d, waiting = %d\n", 
					pmm->messages_sent, pmm->messages_received, pmm->imessages_sent, pmm->imessages_received, pmm->no_events, pmm->waiting) );
		#endif
	}

	if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->waiting > 0 )
	{
		/* For events we ignored whilst we were busy, add a new
		 * scheduled time to look at them */
		_mali_osk_irq_schedulework( pmm->irq );
	}
}
Exemple #29
0
u32 mali_pp_core_get_version(struct mali_pp_core *core)
{
    MALI_DEBUG_ASSERT_POINTER(core);
    return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
}
void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
{
	u32 relative_address;
	u32 start_index;
	u32 nr_of_regs;
	u32 *frame_registers = mali_pp_job_get_frame_registers(job);
	u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
	u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
	u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
	u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
	u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);

	MALI_DEBUG_ASSERT_POINTER(core);

	/* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
	relative_address = MALI200_REG_ADDR_RSW;
	start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
	nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);

	mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
	        relative_address, &frame_registers[start_index],
	        nr_of_regs, &mali_frame_registers_reset_values[start_index]);

	/* MALI200_REG_ADDR_STACK_SIZE */
	relative_address = MALI200_REG_ADDR_STACK_SIZE;
	start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);

	mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
	        relative_address, frame_registers[start_index],
	        mali_frame_registers_reset_values[start_index]);

	/* Skip 2 reserved registers */

	/* Write remaining registers */
	relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
	start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
	nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);

	mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
	        relative_address, &frame_registers[start_index],
	        nr_of_regs, &mali_frame_registers_reset_values[start_index]);

	/* Write WBx registers */
	if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
		mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
	}

	if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
		mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
	}

	if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
		mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
	}

	if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
		mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
		mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
	}
	if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
		mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
		mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
	}

#ifdef CONFIG_MALI400_HEATMAPS_ENABLED
	if(job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
		mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
		mali_hw_core_register_write_relaxed(&core->hw_core,  MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
	}
#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */

	MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));

	/* Adding barrier to make sure all rester writes are finished */
	_mali_osk_write_mem_barrier();

	/* This is the command that starts the core. */
	mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);

	/* Adding barrier to make sure previous rester writes is finished */
	_mali_osk_write_mem_barrier();
}