void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr) { u32 irq_readout; MALI_DEBUG_ASSERT_POINTER(core); irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT); if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG)); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr); mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr); MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n")); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC); _mali_osk_write_mem_barrier(); } /* * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response. * A timeout or a page fault on Mali-200 PP core can cause this behaviour. */ }
static void mali_mmu_enable_paging(struct mali_mmu_core *mmu) { int i; mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING); for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) { break; } } if (MALI_REG_POLL_COUNT_FAST == i) { MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); } }
mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu) { mali_bool stall_success = mali_mmu_enable_stall(mmu); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); if (MALI_FALSE == stall_success) { /* False means that it is in Pagefault state. Not possible to disable_stall then */ return MALI_FALSE; } mali_mmu_disable_stall(mmu); return MALI_TRUE; }
/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */ static _mali_osk_errcode_t mali_mmu_probe_ack(void *data) { struct mali_mmu_core *mmu = (struct mali_mmu_core *)data; u32 int_stat; int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS); MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat)); if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) { MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n")); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT); } else { MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n")); } if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) { MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n")); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR); } else { MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n")); } if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) == (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) { return _MALI_OSK_ERR_OK; } return _MALI_OSK_ERR_FAULT; }
void mali_l2_cache_reset(struct mali_l2_cache_core *cache) { /* Invalidate cache (just to keep it in a known state at startup) */ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL); /* Enable cache */ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE); mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads); /* Restart any performance counters (if enabled) */ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0); } if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1); } _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); }
_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) { /* Bus must be stopped before calling this function */ const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); /* Set register to a bogus value. The register will be used to detect when reset is complete */ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value); mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE); /* Force core to reset */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET); /* Wait for reset to be complete */ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value); if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) { break; } } if (MALI_REG_POLL_COUNT_FAST == i) { MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */ /* Re-enable interrupts */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu) { _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; mali_bool stall_success; MALI_DEBUG_ASSERT_POINTER(mmu); stall_success = mali_mmu_enable_stall(mmu); if (!stall_success) { err = _MALI_OSK_ERR_BUSY; } MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description)); if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) { mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR); /* no session is active, so just activate the empty page directory */ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys); mali_mmu_enable_paging(mmu); err = _MALI_OSK_ERR_OK; } mali_mmu_disable_stall(mmu); return err; }
static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; u32 irq_readout; irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS); if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG); _mali_osk_mem_barrier(); return _MALI_OSK_ERR_OK; } return _MALI_OSK_ERR_FAULT; }
mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu) { int i; u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) ) { MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enebled.\n")); return MALI_TRUE; } if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) { MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n")); return MALI_FALSE; } mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL); for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; ++i) { mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if (mmu_status & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) { break; } if (0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED ))) { break; } } if (MALI_REG_POLL_COUNT_SLOW == i) { MALI_PRINT_ERROR(("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); return MALI_FALSE; } if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) { MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it has a pagefault.\n")); return MALI_FALSE; } return MALI_TRUE; }
_mali_osk_errcode_t mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group) { _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; u32 wval, rval; struct mali_pp_core *pp_core = mali_group_get_pp_core(group); /* find the core id and set the mask */ if (NULL != pp_core) { wval = mali_pp_core_get_id(pp_core); rval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, (wval << 0x1) | rval); err = _MALI_OSK_ERR_OK; } return err; }
void mali_mmu_disable_stall(struct mali_mmu_core *mmu) { const int max_loop_count = 100; const int delay_in_usecs = 1; int i; u32 mmu_status; /* There are no group when it is called from mali_mmu_create */ if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group); mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) { MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n")); return; } if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n")); return; } mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL); for (i = 0; i < max_loop_count; ++i) { u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) ) { break; } if ( status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) { break; } if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) { break; } _mali_osk_time_ubusydelay(delay_in_usecs); } if (max_loop_count == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); }
static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val) { int i = 0; const int loop_count = 100000; /* * Grab lock in order to send commands to the L2 cache in a serialized fashion. * The L2 cache will ignore commands if it is busy. */ _mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW); if (MALI_L2_PAUSE == cache->mali_l2_status) { _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW); MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for L2 come back\n")); MALI_ERROR( _MALI_OSK_ERR_BUSY ); } /* First, wait for L2 cache command handler to go idle */ for (i = 0; i < loop_count; i++) { if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) { break; } } if (i == loop_count) { _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW); MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n")); MALI_ERROR( _MALI_OSK_ERR_FAULT ); } /* then issue the command */ mali_hw_core_register_write(&cache->hw_core, reg, val); _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW); MALI_SUCCESS; }
static void mali_mmu_enable_paging(struct mali_mmu_core *mmu) { const int max_loop_count = 100; const int delay_in_usecs = 1; int i; mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING); for (i = 0; i < max_loop_count; ++i) { if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) { break; } _mali_osk_time_ubusydelay(delay_in_usecs); } if (max_loop_count == i) { MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); } }
mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter) { u32 value = 0; /* disabled src */ MALI_DEBUG_ASSERT_POINTER(cache); MALI_DEBUG_ASSERT(counter < (1 << 7)); /* the possible values are 0-127 */ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); cache->counter_src1 = counter; if (counter != MALI_HW_CORE_NO_COUNTER) { value = counter; } mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value); _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); return MALI_TRUE; }
static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(struct mali_pmu_core *pmu) { u32 rawstat; u32 timeout = MALI_REG_POLL_COUNT_SLOW; MALI_DEBUG_ASSERT(pmu); /* Wait for the command to complete */ do { rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT); --timeout; } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout); MALI_DEBUG_ASSERT(0 < timeout); if (0 == timeout) { return _MALI_OSK_ERR_TIMEOUT; } mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ); return _MALI_OSK_ERR_OK; }
/* ------------- interrupt handling below ------------------ */ static _mali_osk_errcode_t mali_gp_upper_half(void *data) { struct mali_gp_core *core = (struct mali_gp_core *)data; u32 irq_readout; irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT); if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) { /* Mask out all IRQs from this core until IRQ is handled */ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0); #endif /* We do need to handle this in a bottom half */ _mali_osk_irq_schedulework(core->irq); return _MALI_OSK_ERR_OK; } return _MALI_OSK_ERR_FAULT; }
mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter) { u32 value = 0; /* disabled src */ mali_bool core_is_on; MALI_DEBUG_ASSERT_POINTER(cache); core_is_on = mali_l2_cache_lock_power_state(cache); _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); if (MALI_L2_PAUSE == cache->mali_l2_status) { _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); mali_l2_cache_unlock_power_state(cache); return MALI_TRUE; } cache->counter_src1 = counter; if (MALI_HW_CORE_NO_COUNTER != counter) { value = counter; } if (MALI_TRUE == core_is_on) { mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value); } _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW); mali_l2_cache_unlock_power_state(cache); return MALI_TRUE; }
_mali_osk_errcode_t mali_dlbu_enable_pp_core(struct mali_dlbu_core *dlbu, u32 pp_core_enable, u32 val) { u32 wval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK); if((pp_core_enable < mali_pp_get_glob_num_pp_cores()) && ((0 == val) || (1 == val))) /* check for valid input parameters */ { if (val == 1) { val = (wval | (pp_core_enable <<= 0x1)); } if (val == 0) { val = (wval & ~(pp_core_enable << 0x1)); } wval |= val; mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, wval); dlbu->pp_cores_mask = wval; return _MALI_OSK_ERR_OK; } return _MALI_OSK_ERR_FAULT; }
void mali_mmu_disable_stall(struct mali_mmu_core *mmu) { int i; u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) { MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n")); return; } if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n")); return; } mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL); for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) ) { break; } if ( status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) { break; } if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) { break; } } if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); }
_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) { /* Bus must be stopped before calling this function */ const int reset_finished_loop_count = 15; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_pp_post_process_job(core); /* @@@@?is there some cases where it is unsafe to post process the job here? */ /* Set register to a bogus value. The register will be used to detect when reset is complete */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value); /* Force core to reset */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET); /* Wait for reset to be complete */ for (i = 0; i < reset_finished_loop_count; i++) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value); if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) { break; } _mali_osk_time_ubusydelay(10); } if (i == reset_finished_loop_count) { MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */ /* Re-enable interrupts */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) { const int reset_finished_loop_count = 15; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_pp_post_process_job(core); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET); for (i = 0; i < reset_finished_loop_count; i++) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value); if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) { break; } _mali_osk_time_ubusydelay(10); } if (i == reset_finished_loop_count) { MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu) { _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; MALI_DEBUG_ASSERT_POINTER(dlbu); MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description)); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, mali_dlbu_phys_addr); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, 0x00); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_FB_DIM, 0x00); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_CONF, 0x00); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS, 0x00); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask); err = _MALI_OSK_ERR_OK; return err; }
void mali_gp_hard_reset(struct mali_gp_core *core) { const int reset_finished_loop_count = 15; const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; const u32 reset_default_value = 0; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_gp_post_process_job(core, MALI_FALSE); mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET); for (i = 0; i < reset_finished_loop_count; i++) { mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value); if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) { break; } } if (i == reset_finished_loop_count) { MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n")); } mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); }
void mali_pp_stop_bus(struct mali_pp_core *core) { MALI_DEBUG_ASSERT_POINTER(core); /* Will only send the stop bus command, and not wait for it to complete */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS); }
void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu) { MALI_DEBUG_ASSERT_POINTER(dlbu); mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask); }
_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu) { /* Don't use interrupts - just poll status */ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0); return _MALI_OSK_ERR_OK; }
void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address) { MALI_ASSERT_GROUP_LOCKED(mmu->group); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address)); }
void mali_dlbu_set_tllist_base_address(struct mali_dlbu_core *dlbu, u32 val) { mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, val); }
/* Is called when we want the mmu to give an interrupt */ static void mali_mmu_probe_trigger(void *data) { struct mali_mmu_core *mmu = (struct mali_mmu_core *)data; mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR); }
void mali_dlbu_disable_all_pp_cores(struct mali_dlbu_core *dlbu) { mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, 0x0); }