/************************************************************************* * FUNCTION * RTFSYSFreeMutex * * DESCRIPTION * This function implements to release filesystem mutex * * PARAMETERS * * RETURNS * * GLOBALS AFFECTED * *************************************************************************/ void RTFAPI RTFSYSFreeMutex(RTFMutex * Mutex) { kal_taskid current_task; #ifdef __P_PROPRIETARY_COPYRIGHT__ /* under construction !*/ /* under construction !*/ #endif current_task = kal_get_current_thread_ID(); // task not ready if(current_task == KAL_NILTASK_ID) return; /* * Bypass all FS lock operations in exception mode to avoid unexpected suspend behavior. (W10.37) * * NOTE 1. If a "HISR" triggers an exception, bypass lock operation to avoid another fatal error * because only "task" can get semaphore (kal_take_sem) successfully. * * NOTE 2. HISR should NOT use FS in normal mode. It only walks here when an exception is triggerred in HISR. */ else if (INT_QueryExceptionStatus() == KAL_TRUE) return; #ifndef WIN32 if (INT_QueryExceptionStatus() == KAL_FALSE) /* Should only be possible for exception cases */ fs_assert_local(Mutex->rtf_sem_owner == current_task); #else fs_assert_local(Mutex->rtf_sem_owner == current_task); #endif Mutex->rtf_lock_count--; if (Mutex->rtf_lock_count == 0) { Mutex->rtf_sem_owner = KAL_NILTASK_ID; /* Solve MMI hang for waiting lock, Karen Hsu, 2004/04/23, ADD START */ Mutex->DeviceNum_1 = 0; Mutex->DeviceNum_2 = 0; /* Solve MMI hang for waiting lock, Karen Hsu, 2004/04/23, ADD END */ kal_give_sem(Mutex->rtf_sem); #if defined(__AUDIO_DSP_LOWPOWER__) AUDMA_UNLOCK(AUDMA_ID_FS); #endif } }
void nvram_util_give_mutex(kal_mutexid ext_mutex_id_ptr) { if (!INT_QueryExceptionStatus() && !kal_query_systemInit() && ext_mutex_id_ptr) { kal_give_mutex(ext_mutex_id_ptr); } }
void idp_internal_crz_usel_dsel(kal_uint32 scenario, kal_uint32 source_w, kal_uint32 source_h, kal_uint32 target_w, kal_uint32 target_h, kal_uint32* usel, kal_uint32* dsel) { idp_custom_crz_usel_dsel(((CUSTOM_SCENARIO_ID)scenario), (source_w), (source_h), (target_w), (target_h), (usel), (dsel)); if ((INT_QueryExceptionStatus() == KAL_FALSE) && (kal_if_hisr() == KAL_FALSE) && (kal_if_lisr() == KAL_FALSE)) { kal_trace(TRACE_INFO, IDP_CUSTOM_CRZ_U_D, (scenario), (source_w), (source_h), (target_w), (target_h), *(usel), *(dsel)); } }
/************************************************************************* * FUNCTION * RTFSYSLockMutex * * DESCRIPTION * This function implements to take mutex during dedicated time period * * PARAMETERS * * RETURNS * * GLOBALS AFFECTED * *************************************************************************/ int RTFAPI RTFSYSLockMutex(RTFMutex * Mutex, UINT Timeout) { /* XXX!!!. Timeout lock is not support by Nucleus Plus =>wrong*/ kal_uint32 lr; kal_taskid current_task; int ret_val = RTF_NO_ERROR; FS_GET_RETURN_ADDRESS(lr); #ifdef __P_PROPRIETARY_COPYRIGHT__ /* under construction !*/ /* under construction !*/ #endif current_task = kal_get_current_thread_ID(); // task not ready if (current_task == KAL_NILTASK_ID) return RTF_NO_ERROR; /* * Bypass all FS lock operations in exception mode to avoid unexpected suspend behavior. (W10.37) * * NOTE 1. If a "HISR" triggers an exception, bypass lock operation to avoid another fatal error * because only "task" can get semaphore (kal_take_sem) successfully. * * NOTE 2. HISR should NOT use FS in normal mode. It only walks here when an exception is triggerred in HISR. */ else if (INT_QueryExceptionStatus() == KAL_TRUE) return RTF_NO_ERROR; if (Mutex->rtf_sem_owner == current_task) { Mutex->rtf_lock_count++; return (ret_val); } #if defined(__AUDIO_DSP_LOWPOWER__) AUDMA_LOCK(AUDMA_ID_FS); #endif ret_val = ((kal_take_sem(Mutex->rtf_sem, (kal_wait_mode)Timeout) == KAL_SUCCESS) ? RTF_NO_ERROR : MT_LOCK_MUTEX_FAIL); if (ret_val != RTF_NO_ERROR) { #if defined(__AUDIO_DSP_LOWPOWER__) AUDMA_UNLOCK(AUDMA_ID_FS); #endif return ret_val; } /* Update owner and lock count */ Mutex->rtf_sem_owner = current_task; Mutex->rtf_sem_owner_lr = lr; Mutex->rtf_lock_count = 1; return (ret_val); }
/************************************************************************* * FUNCTION * mdci_read_and_wait * * DESCRIPTION * This function reads data through either mailbox channel or stream * channel. ONLY USED WHEN NO INTERRUPT * * PARAMETERS * channel - logical channel * buff - pointer to channel buffer * * RETURNS * MDIF error code. * *************************************************************************/ kal_uint32 mdci_read_and_wait(MDCI_CHANNEL_T channel, MDCI_BUFF_T *buff, kal_uint32 ticks) { MDCI_RETURNVAL_T ret; MDCI_BUFF_T *chdata; kal_uint32 index, cur_phych; volatile kal_uint32 reserved_value=0; volatile kal_uint32 chk_channel=0; /* query if used in init stage or exception */ if (KAL_TRUE != kal_query_systemInit() && KAL_TRUE != INT_QueryExceptionStatus()) { return MDCI_API_INVALID; } /* check parameters */ if (channel >= MDCI_MAX_CHANNEL) return MDCI_INVALID_PARAM; if (buff == NULL) return MDCI_INVALID_PARAM; /* check state */ ret = MDCI_NOT_RECEIVE; while (ticks > 0) { for (index = mdci_readindx_for_MD; index < mdci_readindx_for_MD + MDIF_MAX_PHY; index++) { cur_phych = index % MDIF_MAX_PHY; chdata = (MDCI_BUFF_T *)MDIF_RXCHDATA + cur_phych; reserved_value = *(volatile kal_uint32 *)(&(chdata->reserved)); chk_channel = *(volatile kal_uint32 *)(&(chdata->channel)); if (reserved_value == MDCI_EXCEPTION_CHECK_ID && chk_channel == channel) { kal_mem_cpy(buff, chdata, sizeof(MDCI_BUFF_T)); mdci_readindx_for_MD = cur_phych; *(volatile kal_uint32*)(&(chdata->reserved)) = 0; return MDCI_SUCCESS; } } ust_busy_wait(1); ticks--; } return ret; }
void ccci_exception_info_passed_pre(void) { #if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__) CCCI_BUFF_T *buff; qbm_gpd *p_first_gpd, *p_last_gpd; kal_uint32 gpd_num; // ensure in the exception state if(INT_QueryExceptionStatus() == KAL_FALSE) return; //- Already init in ccci_exception_handshake //- ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack); //- ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack); buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); buff->data[0] = MD_EX_MAGIC; buff->data[1] = CCMSG_ID_EXCEPTION_REC_OK; buff->channel = CCCI_CONTROL_CHANNEL; buff->reserved = MD_EX_REC_OK_CHK_ID; ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq kal_mem_cpy((void*)(buff+1), ex_log_ptr, sizeof(EX_LOG_T)); QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T)); QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T)); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T) + sizeof(EX_LOG_T)); // TODO:Need to revise the API, shall use exception API (after channle reset flow is done) #ifdef SWITCH_TO_EXCEPTION_IO p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx; ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); ex_set_step_logging(EX_AUTO_STEP); //0x45 CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); #else ccci_exception_info_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); #endif ccci_exception_state = CCCI_EXPT_INFO_PASS_PRE_ST; #ifdef WDT_ISR_TEST wdt_test(); #endif #endif }
void drv_trace4(trace_class_enum trc_class, kal_uint32 msg_index, const char *arg_type, kal_uint32 data1, kal_uint32 data2, kal_uint32 data3, kal_uint32 data4) { if (INT_QueryExceptionStatus()) return; if(kal_if_lisr() == KAL_TRUE) return; if(kal_if_hisr()) { kal_dev_trace(trc_class, msg_index, arg_type, data1, data2, data3, data4); } else { kal_trace(trc_class, msg_index, arg_type, data1, data2, data3, data4); } }
/************************************************************************* * FUNCTION * mdci_is_chanel_need_check_owner * * DESCRIPTION * This function will return if the owner need to check * * PARAMETERS * none * * RETURNS * none * *************************************************************************/ static kal_bool mdci_is_chanel_need_check_owner(MDCI_CHANNEL_T channel) { #ifndef __IVP__ if(INT_QueryExceptionStatus() == KAL_TRUE || kal_query_systemInit() == KAL_TRUE) { return KAL_FALSE; } else #endif /* __IVP__ */ { return KAL_FALSE; } }
G2D_STATUS_ENUM g2dFontStart(G2D_HANDLE_STRUCT *handle) { switch (handle->type) { case G2D_CODEC_TYPE_HW: { #if defined(__MTK_TARGET__) { G2D_CANVAS_INFO_STRUCT *srcCanvas = &(handle->fontFunc.srcCanvas); G2D_CANVAS_INFO_STRUCT *dstCanvas = &(handle->dstCanvas); ASSERT(INT_QueryIsNonCachedRAM((kal_uint32)(srcCanvas->RGBBufferAddr), srcCanvas->RGBBufferSize)); ASSERT(INT_QueryIsNonCachedRAM((kal_uint32)(dstCanvas->RGBBufferAddr), dstCanvas->RGBBufferSize)); } #endif g2d_drv_font_start(handle); #if defined(__MTK_TARGET__) if (KAL_TRUE != INT_QueryExceptionStatus()) { handle->g2dState = G2D_STATE_BUSY; } else #endif { handle->g2dState = G2D_STATE_BUSY; } } break; default: ASSERT(0); break; } return G2D_STATUS_OK; }
G2D_STATUS_ENUM g2dReleaseHandle(G2D_HANDLE_STRUCT *handle) { switch (handle->type) { case G2D_CODEC_TYPE_HW: #if (defined(G2D_HW_SUPPORT) || defined(G2D_HW_SHARE_WITH_GOVL_SUPPORT)) { #if defined(__DYNAMIC_SWITCH_CACHEABILITY__) && defined(__MTK_TARGET__) { kal_int32 colorSize, colorSizeMax; G2D_CANVAS_INFO_STRUCT *dstCanvas = &(handle->dstCanvas); if (GFX_CACHE_SWITCH_BEHAVIOR_TRY_SWITCH == handle->dstCanvasCacheSwitchBehavior) { if(G2D_MEMORY_TYPE_SWITCH_TO_NONCACHEABLE == (handle->dstCanvasType)) { G2D_CANVAS_INFO_STRUCT *dstCanvas = &(handle->dstCanvas); drv_gfx_dynamic_switch_cacheable(&(dstCanvas->RGBBufferAddr), dstCanvas->RGBBufferSize, KAL_FALSE); handle->dstCanvasType = G2D_MEMORY_TYPE_NO_SWITCH; } } if (GFX_CACHE_SWITCH_BEHAVIOR_TRY_SWITCH == handle->srcCanvasCacheSwitchBehavior) { #if defined(G2D_HW_SUPPORT) if(G2D_MEMORY_TYPE_SWITCH_TO_NONCACHEABLE == (handle->srcBitbltRGBCanvasType)) { G2D_CANVAS_INFO_STRUCT *srcCanvas = &(handle->bitbltFunc.srcCanvas); drv_gfx_dynamic_switch_cacheable(&(srcCanvas->RGBBufferAddr), srcCanvas->RGBBufferSize, KAL_FALSE); handle->srcBitbltRGBCanvasType = G2D_MEMORY_TYPE_NO_SWITCH; } #endif if(G2D_MEMORY_TYPE_SWITCH_TO_NONCACHEABLE == (handle->srcRectFillOverlayRGBCanvasType)) { G2D_CANVAS_INFO_STRUCT *srcCanvas = &(handle->rectFillFunc.srcCanvas); drv_gfx_dynamic_switch_cacheable(&(srcCanvas->RGBBufferAddr), srcCanvas->RGBBufferSize, KAL_FALSE); handle->srcRectFillOverlayRGBCanvasType = G2D_MEMORY_TYPE_NO_SWITCH; } if(G2D_MEMORY_TYPE_SWITCH_TO_NONCACHEABLE == (handle->srcLtRGBCanvasType)) { G2D_CANVAS_INFO_STRUCT *srcCanvas = &(handle->ltFunc.srcCanvas); drv_gfx_dynamic_switch_cacheable(&(srcCanvas->RGBBufferAddr), srcCanvas->RGBBufferSize, KAL_FALSE); handle->srcLtRGBCanvasType = G2D_MEMORY_TYPE_NO_SWITCH; } if(G2D_COLOR_FORMAT_UYVY422 == handle->ltFunc.srcCanvas.colorFormat) { colorSizeMax = 1; } else { colorSizeMax = 3; } for(colorSize = 0; colorSize < colorSizeMax; colorSize++) { if(G2D_MEMORY_TYPE_SWITCH_TO_NONCACHEABLE == (handle->srcLtYUVCanvasType[colorSize])) { G2D_CANVAS_INFO_STRUCT *srcCanvas = &(handle->ltFunc.srcCanvas); if(0 != (srcCanvas->YUVBufferSize[colorSize])) { drv_gfx_dynamic_switch_cacheable(&(srcCanvas->YUVBufferAddr[colorSize]), srcCanvas->YUVBufferSize[colorSize], KAL_FALSE); handle->srcLtYUVCanvasType[colorSize] = G2D_MEMORY_TYPE_NO_SWITCH; } } } } } #endif #if defined(__MTK_TARGET__) if(g2dGetStatus(handle)) return G2D_STATUS_BUSY; g2d_drv_power_off(); if (KAL_TRUE != INT_QueryExceptionStatus()) { handle->g2dState = G2D_STATE_IDLE; g2d_mutex_unlock(&g2d_mutex); } else { handle->g2dState = G2D_STATE_IDLE; } g2d_mutex_wakeup_wait_task(); #else handle->g2dState = G2D_STATE_IDLE; #endif } break; #else return G2D_STATUS_NOT_SUPPORT; #endif /// G2D_HW_SUPPORT #if !defined(G2D_FPGA) case G2D_CODEC_TYPE_SW: free(handle); break; #endif default: ASSERT(0); break; } return G2D_STATUS_OK; }
G2D_STATUS_ENUM g2dGetHandle(G2D_HANDLE_STRUCT **handlePtr, G2D_CODEC_TYPE_ENUM codecType, G2D_GET_HANDLE_MODE_ENUM handleMode) { G2D_HANDLE_STRUCT *handle = NULL; switch (codecType) { case G2D_CODEC_TYPE_HW: #if (defined(G2D_HW_SUPPORT) || defined(G2D_HW_SHARE_WITH_GOVL_SUPPORT)) { *handlePtr = &_g2d_hw_handle; handle = *handlePtr; #if defined(__MTK_TARGET__) if(G2D_GET_HANDLE_MODE_BLOCKING_UNTIL_GET_HANDLE == handleMode) { if (KAL_TRUE != INT_QueryExceptionStatus()) { while(1) { g2d_mutex_lock(&g2d_mutex); if(G2D_STATE_IDLE != (handle->g2dState)) { ASSERT(0); } else { memset(handle, 0, sizeof(G2D_HANDLE_STRUCT)); handle->g2dState = G2D_STATE_SETTING_PARAMETER; break; } } } else { while(1) { if(G2D_STATE_IDLE != (handle->g2dState)) { kal_sleep_task(1); // 4.615 ms continue; } else { break; } } memset(handle, 0, sizeof(G2D_HANDLE_STRUCT)); handle->g2dState = G2D_STATE_SETTING_PARAMETER; } g2d_drv_power_on(); } else { if (KAL_TRUE != INT_QueryExceptionStatus()) { if (KAL_FALSE == g2d_mutex_lock_fast(&g2d_mutex)) { return G2D_STATUS_BUSY; } if(G2D_STATE_IDLE != (handle->g2dState)) { ASSERT(0); return G2D_STATUS_BUSY; } memset(handle, 0, sizeof(G2D_HANDLE_STRUCT)); handle->g2dState = G2D_STATE_SETTING_PARAMETER; } else { if(G2D_STATE_IDLE != (handle->g2dState)) { return G2D_STATUS_BUSY; } memset(handle, 0, sizeof(G2D_HANDLE_STRUCT)); handle->g2dState = G2D_STATE_SETTING_PARAMETER; } g2d_drv_power_on(); } #else if(G2D_GET_HANDLE_MODE_BLOCKING_UNTIL_GET_HANDLE == handleMode) { while(1) { if(G2D_STATE_IDLE != (handle->g2dState)) { continue; } else { break; } } } else { if(G2D_STATE_IDLE != (handle->g2dState)) { return G2D_STATUS_BUSY; } } memset(handle, 0, sizeof(G2D_HANDLE_STRUCT)); handle->g2dState = G2D_STATE_SETTING_PARAMETER; #endif } break; #else return G2D_STATUS_NOT_SUPPORT; #endif /// G2D_HW_SUPPORT #if !defined(G2D_FPGA) case G2D_CODEC_TYPE_SW: handle = (G2D_HANDLE_STRUCT *)malloc(sizeof(G2D_HANDLE_STRUCT)); /// Can not memset to hw handle to 0 memset(handle, 0, sizeof(G2D_HANDLE_STRUCT)); handle->g2dState = G2D_STATE_SETTING_PARAMETER; *handlePtr = handle; break; #endif default: ASSERT(0); *handlePtr = NULL; break; } handle->type = codecType; return G2D_STATUS_OK; }
/************************************************************************* * FUNCTION * void ccci_exception_handshake * * DESCRIPTION * This function . * * PARAMETERS * channel - logical channel * * * RETURNS * The address of the share memory of the input logical channel * *************************************************************************/ void ccci_exception_handshake(void){ #if defined(__MODEM_CCCI_EXIST__)&& !defined(__MODEM_CARD__) CCCI_BUFF_T *buff; kal_uint32 p_cache_aligned; kal_uint32 gpd_num; qbm_gpd *p_first_gpd, *p_last_gpd; //kal_uint32 rcv_size = 0; // ensure in the exception state if(INT_QueryExceptionStatus() == KAL_FALSE) return; //we block here for debuging //if(ccci_exception_state != CCCI_EXPT_CLEAR_CH_ST) while(1); //- Avoid to use kal_mem_cpy //- HW bug ccci_init(CCCI_CONTROL_CHANNEL, ccci_except_ack); ccci_init(CCCI_CONTROL_CHANNEL_ACK, ccci_except_ack); // exception only have single thread, need to do polling mode /* initialize polling mode GPD */ ASSERT(CCCI_EXCEPT_POLLING_MODE_BUF_SZ >= 2*CPU_CACHE_LINE_SIZE); /*make p_gpd aligned to CPU_CACHE_LINE_SIZE_MASK*/ p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_tx; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_tx)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_tx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); //format Rx GPD p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_rx = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx); //format Rx 2nd GPD p_cache_aligned = (kal_uint32)g_ccci_expect_polling_buf_rx2; if(p_cache_aligned&CPU_CACHE_LINE_SIZE_MASK) { p_cache_aligned = ((kal_uint32)(g_ccci_expect_polling_buf_rx2)&~CPU_CACHE_LINE_SIZE_MASK); p_cache_aligned += CPU_CACHE_LINE_SIZE; } // The reason + QBM_HEAD_SIZE is for ROME E1, cldma needs to record this buff whether is cacheable or non-cacheable ccci_except_polling_gpd_rx2 = (qbm_gpd *)(p_cache_aligned + QBM_HEAD_SIZE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_rx2, ccci_except_polling_gpd_rx2); //step 0. config rx gpd next pointer QBM_DES_SET_NEXT(ccci_except_polling_gpd_rx, ccci_except_polling_gpd_rx2); //step 1. send TX handshake pkt buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); buff->data[0] = MD_EX_MAGIC; buff->data[1] = CCMSG_ID_EXCEPTION_CHECK; buff->channel = CCCI_CONTROL_CHANNEL; buff->reserved = MD_EX_CHK_ID; ccci_debug_add_seq(buff, CCCI_DEBUG_ASSERT_BIT); // add ccci seq QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx, sizeof(CCCI_BUFF_T)); QBM_DES_SET_DATALEN(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(CCCI_BUFF_T)); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx); qbm_cal_set_checksum((kal_uint8 *)ccci_except_polling_gpd_tx->p_data_tbd); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(ccci_except_polling_gpd_tx->p_data_tbd, sizeof(qbm_gpd)); QBM_CACHE_FLUSH(buff, sizeof(CCCI_BUFF_T)); ex_set_step_logging(EX_AUTO_STEP); //0x41 //step 2. polling echoed rx handshake pkt (need two GPD, one for tail) // TODO:Need to revise the API, shall use exception API (after channle reset flow is done) #ifdef SWITCH_TO_EXCEPTION_IO p_first_gpd = p_last_gpd = ccci_except_polling_gpd_tx; ccci_except_set_gpd(CCCI_CONTROL_CHANNEL, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); ex_set_step_logging(EX_AUTO_STEP); //0x42 p_first_gpd = ccci_except_polling_gpd_rx; p_last_gpd = ccci_except_polling_gpd_rx2; do{ ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd); gpd_num = 0; do{ ccci_except_hif_st(CCCI_CONTROL_CHANNEL_ACK); ccci_except_poll_gpd(CCCI_CONTROL_CHANNEL_ACK, (void **)&p_first_gpd, (void **)&p_last_gpd, &gpd_num); }while(0 == gpd_num); buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_rx); //rcv_size = CCCIDEV_GET_QBM_DATALEN(ccci_except_polling_gpd_rx); // FIXME QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T)); ccci_debug_check_seq(buff); // check ccci seq if (buff->reserved == MD_EX_CHK_ID){ ex_set_step_logging(EX_AUTO_STEP); //0x43 ccci_exception_handshake_done = KAL_TRUE; CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); ccci_except_set_gpd(CCCI_CONTROL_CHANNEL_ACK, p_first_gpd, p_last_gpd); //reload to CCCI_CONTROL_CHANNEL_ACK break; } else { //dump date ex_fire_extern_step_logging(0xFFFFFFFF); ex_fire_extern_step_logging(buff->data[0]); ex_fire_extern_step_logging(buff->data[1]); ex_fire_extern_step_logging(buff->channel); ex_fire_extern_step_logging(buff->reserved); ex_fire_extern_step_logging(0xFFFFFFFF); } CCCIDEV_RST_CCCI_COMM_GPD_LIST(p_first_gpd, p_last_gpd); }while(1); #else ccci_exception_check_write_result = ccci_polling_io(CCCI_CONTROL_CHANNEL, ccci_except_polling_gpd_tx, KAL_TRUE); CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); while(CCCI_SUCCESS == (ccci_exception_handshake_done = ccci_polling_io(CCCI_CONTROL_CHANNEL_ACK, ccci_except_polling_gpd_tx, KAL_FALSE))){ buff = CCCIDEV_GET_QBM_DATAPTR(ccci_except_polling_gpd_tx); QBM_CACHE_INVALID(buff, sizeof(CCCI_BUFF_T)); ccci_debug_check_seq(buff); // check ccci seq if (buff->reserved == MD_EX_CHK_ID){ CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); break; } CCCIDEV_RST_CCCI_COMM_GPD_LIST(ccci_except_polling_gpd_tx, ccci_except_polling_gpd_tx); } #endif ex_set_step_logging(EX_AUTO_STEP); //0x44 ccci_exception_state = CCCI_EXPT_HANDSHAKE_ST; #ifdef CCCI_EXCETION_PRE_TEST ccci_exception_info_passed_pre(); #endif #endif }
G2D_STATUS_ENUM g2dBitBltStart(G2D_HANDLE_STRUCT *handle) { switch (handle->type) { case G2D_CODEC_TYPE_HW: { #if defined(__DYNAMIC_SWITCH_CACHEABILITY__) && defined(__MTK_TARGET__) { G2D_CANVAS_INFO_STRUCT *dstCanvas = &(handle->dstCanvas); G2D_CANVAS_INFO_STRUCT *srcCanvas = &(handle->bitbltFunc.srcCanvas); /// Source /// RGB if (GFX_CACHE_SWITCH_BEHAVIOR_TRY_SWITCH == handle->srcCanvasCacheSwitchBehavior) { if(KAL_TRUE == drv_gfx_dynamic_switch_noncacheable(&(srcCanvas->RGBBufferAddr), srcCanvas->RGBBufferSize, KAL_TRUE)) { handle->srcBitbltRGBCanvasType = G2D_MEMORY_TYPE_SWITCH_TO_NONCACHEABLE; } } /// Destination /// RGB if (GFX_CACHE_SWITCH_BEHAVIOR_TRY_SWITCH == handle->dstCanvasCacheSwitchBehavior) { if(KAL_TRUE == drv_gfx_dynamic_switch_noncacheable(&(dstCanvas->RGBBufferAddr), dstCanvas->RGBBufferSize, KAL_TRUE)) { handle->dstCanvasType = G2D_MEMORY_TYPE_SWITCH_TO_NONCACHEABLE; } } } #endif #if defined(__MTK_TARGET__) { G2D_CANVAS_INFO_STRUCT *dstCanvas = &(handle->dstCanvas); G2D_CANVAS_INFO_STRUCT *srcCanvas = &(handle->bitbltFunc.srcCanvas); ASSERT(INT_QueryIsNonCachedRAM((kal_uint32)(srcCanvas->RGBBufferAddr), srcCanvas->RGBBufferSize)); ASSERT(INT_QueryIsNonCachedRAM((kal_uint32)(dstCanvas->RGBBufferAddr), dstCanvas->RGBBufferSize)); } #endif g2d_drv_bitblt_start(handle); #if defined(__MTK_TARGET__) if (KAL_TRUE != INT_QueryExceptionStatus()) { handle->g2dState = G2D_STATE_BUSY; } else #endif { handle->g2dState = G2D_STATE_BUSY; } } break; default: ASSERT(0); break; } return G2D_STATUS_OK; }
/************************************************************************* * FUNCTION * ds_mdci_write_and_wait * * DESCRIPTION * This function writes data through either mailbox channel or stream * channel. ONLY USED WHEN NO INTERRUPT * * PARAMETERS * channel - logical channel * buff - pointer to channel buffer * * RETURNS * MDIF error code. * *************************************************************************/ kal_int32 mdci_write_and_wait(MDCI_CHANNEL_T channel, MDCI_BUFF_T *buff, kal_uint32 ticks) { kal_uint32 mdci_busy, phy_chann, saveaddr = 0; kal_int32 ret; MDCI_BUFF_T *chdata; volatile kal_uint32 reserved_value=0; /* query if used in init stage or exception */ if (KAL_TRUE != kal_query_systemInit() && KAL_TRUE != INT_QueryExceptionStatus()) { return MDCI_API_INVALID; } /* check parameters */ if (channel >= MDCI_MAX_CHANNEL) return MDCI_INVALID_PARAM; if (buff == NULL) return MDCI_INVALID_PARAM; *MDIF_ACK = 0xFFFFFFFF; /* check state */ ret = MDCI_NOT_RECEIVE; buff->reserved = MDCI_EXCEPTION_CHECK_ID; /* get one physical channel */ mdci_busy = *MDIF_BUSY; if (0xFF == mdci_busy) { return MDCI_NO_PHY_CHANNEL; } for (phy_chann = mdci_writeindx_for_FC ; phy_chann < (mdci_writeindx_for_FC + MDIF_MAX_PHY); phy_chann++) { phy_chann %= MDIF_MAX_PHY; chdata = (MDCI_BUFF_T *)MDIF_TXCHDATA + phy_chann; reserved_value = *(volatile kal_uint32*)(&(chdata->reserved)); if (reserved_value == MDCI_EXCEPTION_CHECK_ID) { return MDCI_NO_PHY_CHANNEL; } else { /* set BUSY bit */ *MDIF_BUSY |= (1 << phy_chann); mdci_writeindx_for_FC = (phy_chann + 1)%MDIF_MAX_PHY; break; } } /* set logical channel number */ buff->channel = channel; /* copy channel buffer */ chdata = (MDCI_BUFF_T *)MDIF_TXCHDATA + phy_chann; kal_mem_cpy(chdata, buff, sizeof(MDCI_BUFF_T)); /* restore the stream buffer address */ if (!MDCI_IS_MAILBOX(buff)) { MDCI_STREAM_ADDR(buff) = saveaddr; } /* start MDIF */ *MDIF_TCHNUM = phy_chann; while (ticks > 0) { reserved_value = *(volatile kal_uint32*)(&(chdata->reserved)); if (reserved_value != MDCI_EXCEPTION_CHECK_ID) { ret = MDCI_SUCCESS; break; } ust_busy_wait(1); ticks--; } return ret; }