void MTKMALI_DumpRegister( void ) { #define DUMP_REG_INFO( addr ) MALIK_MSG("REG: %s = 0x%08x\n", #addr, M_READ32( addr, 0 )) unsigned long dummy; MALIK_MSG("MTKMALI_DumpRegister-------:\n"); MALIK_MSG("MT_CG_MFG_G3D is %d\n", clock_is_on(MT_CG_MFG_G3D)); MALIK_MSG("MT_CG_DISP0_SMI_COMMON is %d\n", clock_is_on(MT_CG_DISP0_SMI_COMMON)); /*Dump Clock Gating Register*/ DUMP_REG_INFO( REG_SMI_CG_TEMP ); DUMP_REG_INFO( REG_MFG_CG_CON ); DUMP_REG_INFO( REG_MFG_RESET ); DUMP_REG_INFO( REG_MFG_DEBUG_SEL ); /*Test Mali Register*/ dummy = ( 0x1F-M_READ32( REG_MFG_DEBUG_SEL, 0x0 ) ); MALIK_MSG("Write 0x%02X to REG_MFG_DEBUG_SEL\n", (unsigned int)dummy ); M_WRITE32( REG_MFG_DEBUG_SEL, 0x0, dummy ); DUMP_REG_INFO( REG_MFG_DEBUG_SEL ); MALIK_MSG("---------------------------:\n"); /*Dump Call stack*/ dump_stack(); }
void mali_platform_power_mode_change(mali_power_mode power_mode) { unsigned long flags; switch (power_mode) { case MALI_POWER_MODE_ON: MALI_DEBUG_PRINT(3, ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n", atomic_read((atomic_t *)&bPoweroff) ? "powering on" : "already on")); if (atomic_read((atomic_t *)&bPoweroff) == 1) { MALI_DEBUG_PRINT(3,("[+]MFG enable_clock \n")); mfg_pwr_lock(flags); if (!clock_is_on(MT_CG_MFG_G3D)) { enable_clock(MT_CG_DISP0_SMI_COMMON, "MFG"); enable_clock(MT_CG_MFG_G3D, "MFG"); } mfg_pwr_unlock(flags); MALI_DEBUG_PRINT(3,("[-]MFG enable_clock \n")); #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 500, 1200/1000, 0, 0, 0); #endif atomic_set((atomic_t *)&bPoweroff, 0); } break; case MALI_POWER_MODE_LIGHT_SLEEP: case MALI_POWER_MODE_DEEP_SLEEP: MALI_DEBUG_PRINT(3, ("Mali platform: Got %s event, %s\n", power_mode == MALI_POWER_MODE_LIGHT_SLEEP ? "MALI_POWER_MODE_LIGHT_SLEEP" : "MALI_POWER_MODE_DEEP_SLEEP", atomic_read((atomic_t *)&bPoweroff) ? "already off" : "powering off")); if (atomic_read((atomic_t *)&bPoweroff) == 0) { MALI_DEBUG_PRINT(3,("[+]MFG disable_clock \n")); mfg_pwr_lock(flags); if (clock_is_on(MT_CG_MFG_G3D)) { disable_clock(MT_CG_MFG_G3D, "MFG"); disable_clock(MT_CG_DISP0_SMI_COMMON, "MFG"); } mfg_pwr_unlock(flags); MALI_DEBUG_PRINT(3,("[-]MFG disable_clock \n")); #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 0, 0, 0, 0, 0); #endif atomic_set((atomic_t *)&bPoweroff, 1); } break; } }
int disp_bls_set_backlight(unsigned int level) { unsigned int mapped_level; DISP_MSG("disp_bls_set_backlight: %d, gBLSPowerOn = %d\n", level, gBLSPowerOn); mutex_lock(&backlight_mutex); if(level && !clock_is_on(MT_CG_PWM_MM_SW_CG)) enable_clock(MT_CG_PWM_MM_SW_CG, "DDP"); if (level && !clock_is_on(MT_CG_MDP_BLS_26M_SW_CG)) { // remove CG control to DDP path ASSERT(0); if (!gBLSPowerOn) { // config BLS parameter disp_bls_config(); } } #ifdef USE_DISP_BLS_MUTEX disp_bls_get_mutex(); #else DISP_REG_SET(DISP_REG_BLS_DEBUG, 0x3); #endif mapped_level = brightness_mapping(level); DISP_MSG("after mapping, mapped_level: %d\n", mapped_level); DISP_REG_SET(DISP_REG_BLS_PWM_DUTY, mapped_level); if (mapped_level) // enable PWM generator DISP_REG_SET(DISP_REG_BLS_EN, DISP_REG_GET(DISP_REG_BLS_EN) | 0x10000); else // disable PWM generator DISP_REG_SET(DISP_REG_BLS_EN, DISP_REG_GET(DISP_REG_BLS_EN) & 0xFFFEFFFF); DISP_MSG("after SET, PWM_DUTY: %d\n", DISP_REG_GET(DISP_REG_BLS_PWM_DUTY)); #ifdef USE_DISP_BLS_MUTEX disp_bls_release_mutex(); #else DISP_REG_SET(DISP_REG_BLS_DEBUG, 0x0); #endif if(!level && clock_is_on(MT_CG_PWM_MM_SW_CG)) disable_clock(MT_CG_PWM_MM_SW_CG, "DDP"); if (!level && gBLSPowerOn) { DISP_MSG("disp_bls_set_backlight: disable clock\n"); // disable_clock(MT_CG_DISP0_SMI_LARB0 , "DDP"); gBLSPowerOn = 0; } mutex_unlock(&backlight_mutex); return 0; }
void mali_platform_power_mode_change(mali_power_mode power_mode) { unsigned long flags; switch (power_mode) { case MALI_POWER_MODE_ON: MALI_DEBUG_PRINT(4, ("[+] MFG enable_clock \n")); spin_lock_irqsave(&mali_pwr_lock, flags); if (!clock_is_on(MT_CG_BG3D)) { enable_clock(MT_CG_BG3D, "MFG"); } spin_unlock_irqrestore(&mali_pwr_lock, flags); atomic_set(&g_is_power_enabled, 1); mali_dispatch_dvfs_work(); MALI_DEBUG_PRINT(4, ("[-] MFG enable_clock \n")); #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 500, 1200 / 1000, 0, 0, 0); #endif break; case MALI_POWER_MODE_LIGHT_SLEEP: case MALI_POWER_MODE_DEEP_SLEEP: MALI_DEBUG_PRINT(4, ("[+] MFG disable_clock \n")); atomic_set(&g_is_power_enabled, 0); mali_cancel_dvfs_work(); spin_lock_irqsave(&mali_pwr_lock, flags); if (clock_is_on(MT_CG_BG3D)) { disable_clock(MT_CG_BG3D, "MFG"); mtk_set_input_boost_duration(0); } spin_unlock_irqrestore(&mali_pwr_lock, flags); MALI_DEBUG_PRINT(4, ("[-] MFG disable_clock \n")); #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 0, 0, 0, 0, 0); #endif break; } }
void spm_module_init(void) { int r; unsigned long flags; spm_fs_init(); spin_lock_irqsave(&spm_lock, flags); #if 1//def SPM_CLOCK_INIT /*Only set during bringup init. No need to be changed.*/ if(clock_is_on(MT_CG_SPM_52M_SW_CG)) disable_clock(MT_CG_SPM_52M_SW_CG, "SPM"); if(!clock_is_on(MT_CG_SC_26M_CK_SEL_EN)) enable_clock(MT_CG_SC_26M_CK_SEL_EN, "SPM");//Enable the feature that SPM can switch bus and audio clock to be 26Mhz if(clock_is_on(MT_CG_SC_MEM_CK_OFF_EN)) disable_clock(MT_CG_SC_MEM_CK_OFF_EN, "SPM"); /*Dynamic on/off before entering suspend/DPidle and after leaving suspend/DPidle*/ if(!clock_is_on(MT_CG_MEMSLP_DLYER_SW_CG)) enable_clock(MT_CG_MEMSLP_DLYER_SW_CG, "SPM"); if(!clock_is_on(MT_CG_SPM_SW_CG))//need check with mtcmos owner for spm clk init gating enable_clock(MT_CG_SPM_SW_CG, "SPM"); #endif spin_unlock_irqrestore(&spm_lock, flags); r = request_irq(MT_SPM0_IRQ_ID, spm0_irq_handler, IRQF_TRIGGER_LOW,"mt-spm", NULL); if (r) { spm_error("SPM IRQ[0] register failed (%d)\n", r); WARN_ON(1); } #ifdef CONFIG_KICK_SPM_WDT #ifndef CONFIG_FIQ_GLUE //printk("******** MTK WDT register irq ********\n" ); r = request_irq(MT_SPM1_IRQ_ID, spm1_irq_handler, IRQF_TRIGGER_LOW,"[SPM WDT]", NULL); #else //printk("******** MTK WDT register fiq ********\n" ); r = request_fiq(MT_SPM1_IRQ_ID, spm1_fiq_handler, IRQF_TRIGGER_LOW, NULL); #endif #else r = request_irq(MT_SPM1_IRQ_ID, spm1_irq_handler, IRQF_TRIGGER_LOW,"mt-spm", NULL); #endif if (r) { spm_error("SPM IRQ[1] register failed (%d)\n", r); WARN_ON(1);} }
void disp_bls_init(unsigned int srcWidth, unsigned int srcHeight) { struct cust_mt65xx_led *cust_led_list = get_cust_led_list(); struct cust_mt65xx_led *cust = NULL; struct PWM_config *config_data = NULL; if(cust_led_list) { cust = &cust_led_list[MT65XX_LED_TYPE_LCD]; if((strcmp(cust->name,"lcd-backlight") == 0) && (cust->mode == MT65XX_LED_MODE_CUST_BLS_PWM)) { config_data = &cust->config_data; if (config_data->clock_source >= 0 && config_data->clock_source <= 3) { unsigned int regVal = DISP_REG_GET(CLK_CFG_1); clkmux_sel(MT_MUX_PWM, config_data->clock_source, "DISP_PWM"); BLS_DBG("disp_bls_init : CLK_CFG_1 0x%x => 0x%x\n", regVal, DISP_REG_GET(CLK_CFG_1)); } gPWMDiv = (config_data->div == 0) ? PWM_DEFAULT_DIV_VALUE : config_data->div; gPWMDiv &= 0x3FF; BLS_MSG("disp_bls_init : PWM config data (%d,%d)\n", config_data->clock_source, config_data->div); } } BLS_DBG("disp_bls_init : srcWidth = %d, srcHeight = %d\n", srcWidth, srcHeight); BLS_MSG("disp_bls_init : BLS_EN=0x%x, PWM_DUTY=%d, PWM_DUTY_RD=%d, CG=0x%x, %d, %d\n", DISP_REG_GET(DISP_REG_BLS_EN), DISP_REG_GET(DISP_REG_BLS_PWM_DUTY), DISP_REG_GET(DISP_REG_BLS_PWM_DUTY_RD), DISP_REG_GET(DISP_REG_CONFIG_MMSYS_CG_CON0), clock_is_on(MT_CG_DISP0_MDP_BLS_26M), clock_is_on(MT_CG_DISP0_DISP_BLS)); DISP_REG_SET(DISP_REG_BLS_SRC_SIZE, (srcHeight << 16) | srcWidth); DISP_REG_SET(DISP_REG_BLS_PWM_CON, 0x0 | (gPWMDiv << 16)); DISP_REG_SET(DISP_REG_BLS_BLS_SETTING, 0x0); DISP_REG_SET(DISP_REG_BLS_INTEN, 0xF); if (!(DISP_REG_GET(DISP_REG_BLS_EN) & 0x10000)) DISP_REG_SET(DISP_REG_BLS_PWM_DUTY, 0); disp_bls_update_gamma_lut(); //disp_bls_update_pwm_lut(); disp_bls_config_full(srcWidth, srcHeight); if (dbg_log) disp_dump_reg(DISP_MODULE_BLS); }
/***************************************************************************** * FUNCTION * hal_btif_dump_reg * DESCRIPTION * dump BTIF module's information when needed * PARAMETERS * p_base [IN] BTIF module's base address * flag [IN] register id flag * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_btif_dump_reg(P_MTK_BTIF_INFO_STR p_btif, ENUM_BTIF_REG_ID flag) { /*Chaozhong: To be implement*/ int i_ret = -1; int idx = 0; unsigned long irq_flag = 0; unsigned int base = p_btif->base; unsigned char reg_map[0xE0/4] = {0}; unsigned int lsr = 0x0; unsigned int dma_en = 0; spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); if (0 == clock_is_on(MTK_BTIF_CG_BIT)) { spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); BTIF_ERR_FUNC("%s: clock is off, this should never happen!!!\n", __FILE__); return i_ret; } lsr = BTIF_READ32(BTIF_LSR(base)); dma_en = BTIF_READ32(BTIF_DMA_EN(base)); for (idx = 0 ; idx < sizeof (reg_map); idx++) { reg_map[idx] = BTIF_READ8(p_btif->base + (4 * idx)); } spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); BTIF_INFO_FUNC("BTIF's clock is on\n"); BTIF_INFO_FUNC("base address: 0x%x\n", base); switch (flag) { case REG_BTIF_ALL: #if 0 BTIF_INFO_FUNC("BTIF_IER:0x%x\n", BTIF_READ32(BTIF_IER(base))); BTIF_INFO_FUNC("BTIF_IIR:0x%x\n", BTIF_READ32(BTIF_IIR(base))); BTIF_INFO_FUNC("BTIF_FAKELCR:0x%x\n", BTIF_READ32(BTIF_FAKELCR(base))); BTIF_INFO_FUNC("BTIF_LSR:0x%x\n", BTIF_READ32(BTIF_LSR(base))); BTIF_INFO_FUNC("BTIF_SLEEP_EN:0x%x\n", BTIF_READ32(BTIF_SLEEP_EN(base))); BTIF_INFO_FUNC("BTIF_DMA_EN:0x%x\n", BTIF_READ32(BTIF_DMA_EN(base))); BTIF_INFO_FUNC("BTIF_RTOCNT:0x%x\n", BTIF_READ32(BTIF_RTOCNT(base))); BTIF_INFO_FUNC("BTIF_TRI_LVL:0x%x\n", BTIF_READ32(BTIF_TRI_LVL(base))); BTIF_INFO_FUNC("BTIF_WAT_TIME:0x%x\n", BTIF_READ32(BTIF_WAT_TIME(base))); BTIF_INFO_FUNC("BTIF_HANDSHAKE:0x%x\n", BTIF_READ32(BTIF_HANDSHAKE(base))); #endif btif_dump_array("BTIF register", reg_map, sizeof (reg_map)); break; default: break; } BTIF_INFO_FUNC("Tx DMA %s\n", (dma_en & BTIF_DMA_EN_TX) ? "enabled" : "disabled"); BTIF_INFO_FUNC("Rx DMA %s\n", (dma_en & BTIF_DMA_EN_RX) ? "enabled" : "disabled"); BTIF_INFO_FUNC("Rx data is %s\n", (lsr & BTIF_LSR_DR_BIT) ? "not empty" : "empty"); BTIF_INFO_FUNC("Tx data is %s\n", (lsr & BTIF_LSR_TEMT_BIT) ? "empty" : "not empty"); return i_ret; }
bool usb_enable_clock(bool enable) { static int count = 0; bool res = TRUE; unsigned long flags; spin_lock_irqsave(&musb_reg_clock_lock, flags); if (enable && count == 0) { /* * USB CG may default on. * To prevent clk_mgr reference count error. MUST CHECK is clock on? */ if(!clock_is_on(MT_CG_USB_SW_CG)){ res = enable_clock(MT_CG_USB_SW_CG, "PERI_USB"); } } else if (!enable && count == 1) { res = disable_clock(MT_CG_USB_SW_CG, "PERI_USB"); } if (enable) count++; else count = (count==0) ? 0 : (count-1); spin_unlock_irqrestore(&musb_reg_clock_lock, flags); printk(KERN_DEBUG "enable(%d), count(%d) res=%d\n", enable, count, res); return 1; }
/***************************************************************************** * FUNCTION * hal_btif_raise_wak_sig * DESCRIPTION * raise wakeup signal to counterpart * PARAMETERS * p_base [IN] BTIF module's base address * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_btif_raise_wak_sig(P_MTK_BTIF_INFO_STR p_btif) { int i_ret = -1; unsigned int base = p_btif->base; #if MTK_BTIF_ENABLE_CLK_CTL if (0 == clock_is_on(MTK_BTIF_CG_BIT)) { BTIF_ERR_FUNC("%s: clock is off before send wakeup signal!!!\n", __FILE__); return i_ret; } #endif /*write 0 to BTIF_WAK to pull ap_wakeup_consyss low */ BTIF_CLR_BIT(BTIF_WAK(base), BTIF_WAK_BIT); /*wait for a period for longer than 1/32k period, here we use 40us*/ set_current_state(TASK_UNINTERRUPTIBLE); usleep_range(64, 96); /*according to linux/documentation/timers/timers-how-to, we choose usleep_range SLEEPING FOR ~USECS OR SMALL MSECS ( 10us - 20ms): * Use usleep_range */ /*write 1 to pull ap_wakeup_consyss high*/ BTIF_SET_BIT(BTIF_WAK(base), BTIF_WAK_BIT); i_ret = 0; return i_ret; }
void cmdq_core_print_status_seq_clock(struct seq_file *m) { #ifdef CONFIG_MTK_LEGACY #ifdef CMDQ_PWR_AWARE /* MT_CG_DISP0_MUTEX_32K is removed in this platform */ seq_printf(m, "MT_CG_INFRA_GCE: %d\n", clock_is_on(MT_CG_INFRA_GCE)); #endif #endif /* defined(CONFIG_MTK_LEGACY) */ }
/***************************************************************************** * FUNCTION * hal_btif_rx_handler * DESCRIPTION * lower level interrupt handler * PARAMETERS * p_base [IN] BTIF module's base address * p_buf [IN/OUT] pointer to rx data buffer * max_len [IN] max length of rx buffer * RETURNS * 0 means success; negative means fail; positive means rx data length *****************************************************************************/ int hal_btif_irq_handler(P_MTK_BTIF_INFO_STR p_btif, unsigned char *p_buf, const unsigned int max_len) { /*Chaozhong: To be implement*/ int i_ret = -1; unsigned int iir = 0; unsigned int rx_len = 0; unsigned int base = p_btif->base; unsigned long irq_flag = 0; #if 0 /*check parameter valid or not*/ if ((NULL == p_buf) || (max_len == 0)) { i_ret = ERR_INVALID_PAR; return i_ret; } #endif spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); #if MTK_BTIF_ENABLE_CLK_CTL if (0 == clock_is_on(MTK_BTIF_CG_BIT)) { spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n", __FILE__); return i_ret; } #endif /*read interrupt identifier register*/ iir = BTIF_READ32(BTIF_IIR(base)); /*is rx interrupt exist?*/ #if 0 while ((iir & BTIF_IIR_RX) && (rx_len < max_len)) { rx_len += btif_rx_irq_handler(p_btif, (p_buf + rx_len), (max_len - rx_len)); /*update IIR*/ iir = BTIF_READ32(BTIF_IIR(base)); } #endif while (iir & (BTIF_IIR_RX | BTIF_IIR_RX_TIMEOUT)) { rx_len += btif_rx_irq_handler(p_btif, p_buf, max_len); /*update IIR*/ iir = BTIF_READ32(BTIF_IIR(base)); } /*is tx interrupt exist?*/ if (iir & BTIF_IIR_TX_EMPTY) { i_ret = btif_tx_irq_handler(p_btif); } spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); i_ret = rx_len != 0 ? rx_len : i_ret; return i_ret; }
DPI_STATUS DPI_PowerOff() { if (s_isDpiPowerOn) { int ret = 0; if (clock_is_on(MT_CG_DISP_DPI_IF_SW_CG)) ret += disable_clock(MT_CG_DISP_DPI_IF_SW_CG, "DPI"); if (clock_is_on(MT_CG_DISP_DPI_ENGINE_SW_CG)) ret += disable_clock(MT_CG_DISP_DPI_ENGINE_SW_CG, "DPI"); if (ret > 0) { DISP_LOG_PRINT(ANDROID_LOG_ERROR, "DPI", "power manager API return FALSE\n"); } s_isDpiPowerOn = FALSE; } return DPI_STATUS_OK; }
int disp_bls_config(void) { #if !defined(MTK_AAL_SUPPORT) if (!clock_is_on(MT_CG_MDP_BLS_26M_SW_CG)) { // remove CG control to DDP path ASSERT(0); } if (!gBLSPowerOn) { DISP_MSG("disp_bls_config: enable clock\n"); // enable_clock(MT_CG_DISP0_SMI_LARB0, "DDP"); gBLSPowerOn = 1; } #ifdef USE_DISP_BLS_MUTEX DISP_MSG("disp_bls_config : gBLSMutexID = %d\n", gBLSMutexID); DISP_REG_SET(DISP_REG_CONFIG_MUTEX_RST(gBLSMutexID), 1); DISP_REG_SET(DISP_REG_CONFIG_MUTEX_RST(gBLSMutexID), 0); DISP_REG_SET(DISP_REG_CONFIG_MUTEX_MOD(gBLSMutexID), 0x200); // BLS DISP_REG_SET(DISP_REG_CONFIG_MUTEX_SOF(gBLSMutexID), 0); // single mode if (disp_bls_get_mutex() == 0) { #else DISP_MSG("disp_bls_config\n"); DISP_REG_SET(DISP_REG_BLS_DEBUG, 0x3); #endif DISP_REG_SET(DISP_REG_BLS_PWM_DUTY, DISP_REG_GET(DISP_REG_BLS_PWM_DUTY)); DISP_REG_SET(DISP_REG_BLS_PWM_CON, 0x0); DISP_REG_SET(DISP_REG_BLS_EN, 0x00010001); // enable BLS_EN #ifdef USE_DISP_BLS_MUTEX if (disp_bls_release_mutex() == 0) return 0; } return -1; #else DISP_REG_SET(DISP_REG_BLS_DEBUG, 0x0); #endif #endif DISP_MSG("disp_bls_config:-\n"); return 0; return 0; }
void cmdq_virtual_print_status_seq_clock(struct seq_file *m) { #ifdef CMDQ_PWR_AWARE /* MT_CG_DISP0_MUTEX_32K is removed in this platform */ seq_printf(m, "MT_CG_INFRA_GCE: %d", cmdq_dev_gce_clock_is_enable()); #if !defined(CMDQ_USE_CCF) && defined(CMDQ_USE_LEGACY) seq_printf(m, ", MT_CG_DISP0_MUTEX_32K: %d", clock_is_on(MT_CG_DISP0_MUTEX_32K)); #endif seq_puts(m, "\n"); #endif }
ssize_t cmdq_core_print_status_clock(char *buf) { int32_t length = 0; char *pBuffer = buf; #ifdef CONFIG_MTK_LEGACY #ifdef CMDQ_PWR_AWARE /* MT_CG_DISP0_MUTEX_32K is removed in this platform */ pBuffer += sprintf(pBuffer, "MT_CG_INFRA_GCE: %d\n", clock_is_on(MT_CG_INFRA_GCE)); #endif #endif /* defined(CONFIG_MTK_LEGACY) */ length = pBuffer - buf; return length; }
static void mali_late_resume_handler(struct early_suspend *h) { #if 1 MALI_DEBUG_PRINT(1, ("[%s] enable_clock\n", __FUNCTION__)); trace_printk("[GPU power] MFG ON\n"); enable_clock(MT_CG_DISP0_SMI_COMMON, "MFG"); enable_clock(MT_CG_MFG_G3D, "MFG"); #else if (!clock_is_on(MT_CG_MFG_PDN_BG3D_SW_CG)) { enable_clock(MT_CG_MFG_PDN_BG3D_SW_CG, "G3D_DRV"); MALI_DEBUG_PRINT(1, ("[%s] enable_clock\n", __FUNCTION__)); } #endif mali_pm_os_resume(); }
static void mali_early_suspend_handler(struct early_suspend *h) { mali_pm_os_suspend(); #if 1 MALI_DEBUG_PRINT(1, ("[%s] disable_clock\n", __FUNCTION__)); trace_printk("[GPU power] MFG OFF\n"); disable_clock(MT_CG_MFG_G3D, "MFG"); disable_clock(MT_CG_DISP0_SMI_COMMON, "MFG"); #else if (clock_is_on(MT_CG_MFG_PDN_BG3D_SW_CG)) { disable_clock(MT_CG_MFG_PDN_BG3D_SW_CG, "G3D_DRV"); MALI_DEBUG_PRINT(1, ("[%s] disable_clock\n", __FUNCTION__)); } #endif }
DPI_STATUS DPI_MIPI_PowerOff() { if (s_isDpiMipiPowerOn) { int ret = 0; if (clock_is_on(MT_CG_MIPI_26M_DBG_EN)) ret += disable_clock(MT_CG_MIPI_26M_DBG_EN, "DSI"); if (ret > 0) { DISP_LOG_PRINT(ANDROID_LOG_ERROR, "DPI", "power manager API return FALSE\n"); } s_isDpiMipiPowerOn = FALSE; } return DPI_STATUS_OK; }
ssize_t cmdq_virtual_print_status_clock(char *buf) { int32_t length = 0; char *pBuffer = buf; #ifdef CMDQ_PWR_AWARE /* MT_CG_DISP0_MUTEX_32K is removed in this platform */ pBuffer += sprintf(pBuffer, "MT_CG_INFRA_GCE: %d\n", cmdq_dev_gce_clock_is_enable()); #if !defined(CMDQ_USE_CCF) && defined(CMDQ_USE_LEGACY) pBuffer += sprintf(pBuffer, ", MT_CG_DISP0_MUTEX_32K: %d", clock_is_on(MT_CG_DISP0_MUTEX_32K)); #endif pBuffer += sprintf(pBuffer, "\n"); #endif length = pBuffer - buf; return length; }
/***************************************************************************** * FUNCTION * hal_rx_dma_irq_handler * DESCRIPTION * lower level rx interrupt handler * PARAMETERS * p_dma_info [IN] pointer to BTIF dma channel's information * p_buf [IN/OUT] pointer to rx data buffer * max_len [IN] max length of rx buffer * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_rx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info, unsigned char *p_buf, const unsigned int max_len) { int i_ret = -1; unsigned int valid_len = 0; unsigned int wpt_wrap = 0; unsigned int rpt_wrap = 0; unsigned int wpt = 0; unsigned int rpt = 0; unsigned int tail_len = 0; unsigned int real_len = 0; unsigned int base = p_dma_info->base; P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo; dma_rx_buf_write rx_cb = p_dma_info->rx_cb; unsigned char *p_vff_buf = NULL; unsigned char *vff_base = p_vfifo->p_vir_addr; unsigned int vff_size = p_vfifo->vfifo_size; P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo, MTK_BTIF_DMA_VFIFO, vfifo); unsigned long flag = 0; spin_lock_irqsave(&(g_clk_cg_spinlock), flag); if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) { spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n", __FILE__); return i_ret; } /*disable DMA Rx IER*/ hal_btif_dma_ier_ctrl(p_dma_info, false); /*clear Rx DMA's interrupt status*/ BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE); valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base)); rpt = BTIF_READ32(RX_DMA_VFF_RPT(base)); wpt = BTIF_READ32(RX_DMA_VFF_WPT(base)); if ((0 == valid_len) && (rpt == wpt)) { BTIF_DBG_FUNC ("rx interrupt, no data available in Rx DMA, wpt(0x%08x), rpt(0x%08x)\n", rpt, wpt); } i_ret = 0; while ((0 < valid_len) || (rpt != wpt)) { rpt_wrap = rpt & DMA_RPT_WRAP; wpt_wrap = wpt & DMA_WPT_WRAP; rpt &= DMA_RPT_MASK; wpt &= DMA_WPT_MASK; /*calcaute length of available data in vFIFO*/ if (wpt_wrap != p_mtk_vfifo->last_wpt_wrap) { real_len = wpt + vff_size - rpt; } else { real_len = wpt - rpt; } if (NULL != rx_cb) { tail_len = vff_size - rpt; p_vff_buf = vff_base + rpt; if (tail_len >= real_len) { (*rx_cb) (p_dma_info, p_vff_buf, real_len); } else { (*rx_cb) (p_dma_info, p_vff_buf, tail_len); p_vff_buf = vff_base; (*rx_cb) (p_dma_info, p_vff_buf, real_len - tail_len); } i_ret += real_len; } else { BTIF_ERR_FUNC ("no rx_cb found, please check your init process\n"); } dsb(); rpt += real_len; if (rpt >= vff_size) { /*read wrap bit should be revert*/ rpt_wrap ^= DMA_RPT_WRAP; rpt %= vff_size; } rpt |= rpt_wrap; /*record wpt, last_wpt_wrap, rpt, last_rpt_wrap*/ p_mtk_vfifo->wpt = wpt; p_mtk_vfifo->last_wpt_wrap = wpt_wrap; p_mtk_vfifo->rpt = rpt; p_mtk_vfifo->last_rpt_wrap = rpt_wrap; /*update rpt information to DMA controller*/ btif_reg_sync_writel(rpt, RX_DMA_VFF_RPT(base)); /*get vff valid size again and check if rx data is processed completely*/ valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base)); rpt = BTIF_READ32(RX_DMA_VFF_RPT(base)); wpt = BTIF_READ32(RX_DMA_VFF_WPT(base)); } /*enable DMA Rx IER*/ hal_btif_dma_ier_ctrl(p_dma_info, true); spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); return i_ret; }
void spm_i2c_control(u32 channel, bool onoff) { return; #if 0 //static int pdn = 0; static bool i2c_onoff = 0; #ifdef CONFIG_OF void __iomem *base; #else u32 base;//, i2c_clk; #endif switch(channel) { case 0: base = SPM_I2C0_BASE; //i2c_clk = MT_CG_INFRA_I2C0; break; case 1: base = SPM_I2C1_BASE; //i2c_clk = MT_CG_INFRA_I2C1; break; case 2: base = SPM_I2C2_BASE; //i2c_clk = MT_CG_INFRA_I2C2; break; default: base = SPM_I2C2_BASE; break; } if ((1 == onoff) && (0 == i2c_onoff)) { i2c_onoff = 1; #if 0 #if 1 pdn = spm_read(INFRA_PDN_STA0) & (1U << i2c_clk); spm_write(INFRA_PDN_CLR0, pdn); /* power on I2C */ #else pdn = clock_is_on(i2c_clk); if (!pdn) enable_clock(i2c_clk, "spm_i2c"); #endif #endif spm_write(base + OFFSET_CONTROL, 0x0); /* init I2C_CONTROL */ spm_write(base + OFFSET_TRANSAC_LEN, 0x1); /* init I2C_TRANSAC_LEN */ spm_write(base + OFFSET_EXT_CONF, 0x0); /* init I2C_EXT_CONF */ spm_write(base + OFFSET_IO_CONFIG, 0x0); /* init I2C_IO_CONFIG */ spm_write(base + OFFSET_HS, 0x102); /* init I2C_HS */ } else if ((0 == onoff) && (1 == i2c_onoff)) { i2c_onoff = 0; #if 0 #if 1 spm_write(INFRA_PDN_SET0, pdn); /* restore I2C power */ #else if (!pdn) disable_clock(i2c_clk, "spm_i2c"); #endif #endif } else ASSERT(1); #endif }
/***************************************************************************** * FUNCTION * hal_btif_clk_ctrl * DESCRIPTION * control clock output enable/disable of DMA module * PARAMETERS * p_dma_info [IN] pointer to BTIF dma channel's information * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_btif_dma_clk_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_CLOCK_CTRL flag) { /*In MTK DMA BTIF channel, there's only one global CG on AP_DMA, no sub channel's CG bit*/ /*according to Artis's comment, clock of DMA and BTIF is default off, so we assume it to be off by default*/ int i_ret = 0; unsigned long irq_flag = 0; #if MTK_BTIF_ENABLE_CLK_REF_COUNTER static atomic_t s_clk_ref = ATOMIC_INIT(0); #else static ENUM_CLOCK_CTRL status = CLK_OUT_DISABLE; #endif #if defined(CONFIG_MTK_LEGACY) spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); #endif #if MTK_BTIF_ENABLE_CLK_CTL #if MTK_BTIF_ENABLE_CLK_REF_COUNTER if (CLK_OUT_ENABLE == flag) { if (1 == atomic_inc_return(&s_clk_ref)) { #if defined(CONFIG_MTK_LEGACY) i_ret = enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID); if (i_ret) { BTIF_WARN_FUNC ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d", i_ret); } #else clk_prepare(clk_btif_apdma); spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); clk_enable(clk_btif_apdma); spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); BTIF_INFO_FUNC("[CCF]enable clk_btif_apdma\n"); #endif /* defined(CONFIG_MTK_LEGACY) */ } } else if (CLK_OUT_DISABLE == flag) { if (0 == atomic_dec_return(&s_clk_ref)) { #if defined(CONFIG_MTK_LEGACY) i_ret = disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID); if (i_ret) { BTIF_WARN_FUNC ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d", i_ret); } #else spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); clk_disable(clk_btif_apdma); spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); clk_unprepare(clk_btif_apdma); BTIF_INFO_FUNC("[CCF] clk_disable_unprepare(clk_btif_apdma) calling\n"); #endif /* defined(CONFIG_MTK_LEGACY) */ } } else { i_ret = ERR_INVALID_PAR; BTIF_ERR_FUNC("invalid clock ctrl flag (%d)\n", flag); } #else if (status == flag) { i_ret = 0; BTIF_DBG_FUNC("dma clock already %s\n", CLK_OUT_ENABLE == status ? "enabled" : "disabled"); } else { if (CLK_OUT_ENABLE == flag) { #if defined(CONFIG_MTK_LEGACY) i_ret = enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID); status = (0 == i_ret) ? flag : status; if (i_ret) { BTIF_WARN_FUNC ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d", i_ret); } #else clk_prepare(clk_btif_apdma); spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); clk_enable(clk_btif_apdma); spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); BTIF_INFO_FUNC("[CCF]enable clk_btif_apdma\n"); #endif /* defined(CONFIG_MTK_LEGACY) */ } else if (CLK_OUT_DISABLE == flag) { #if defined(CONFIG_MTK_LEGACY) i_ret = disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID); status = (0 == i_ret) ? flag : status; if (i_ret) { BTIF_WARN_FUNC ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d", i_ret); } #else spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); clk_disable(clk_btif_apdma); spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); clk_unprepare(clk_btif_apdma); BTIF_INFO_FUNC("[CCF] clk_disable_unprepare(clk_btif_apdma) calling\n"); #endif /* defined(CONFIG_MTK_LEGACY) */ } else { i_ret = ERR_INVALID_PAR; BTIF_ERR_FUNC("invalid clock ctrl flag (%d)\n", flag); } } #endif #else #if MTK_BTIF_ENABLE_CLK_REF_COUNTER #else status = flag; #endif i_ret = 0; #endif #if defined(CONFIG_MTK_LEGACY) spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); #endif #if MTK_BTIF_ENABLE_CLK_REF_COUNTER if (0 == i_ret) { BTIF_DBG_FUNC("dma clock %s\n", CLK_OUT_ENABLE == flag ? "enabled" : "disabled"); } else { BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n", CLK_OUT_ENABLE == flag ? "enable" : "disable", i_ret); } #else if (0 == i_ret) { BTIF_DBG_FUNC("dma clock %s\n", CLK_OUT_ENABLE == flag ? "enabled" : "disabled"); } else { BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n", CLK_OUT_ENABLE == flag ? "enable" : "disable", i_ret); } #endif #if defined(CONFIG_MTK_LEGACY) BTIF_DBG_FUNC("DMA's clock is %s\n", (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) ? "off" : "on"); #endif return i_ret; }
static void spm_i2c_control(u32 channel, bool onoff) { static int pdn = 0; static bool i2c_onoff = 0; #ifdef CONFIG_OF void __iomem *base; #else u32 base; #endif u32 i2c_clk; switch(channel) { case 0: #ifdef CONFIG_OF base = SPM_I2C0_BASE; #else base = I2C0_BASE; #endif i2c_clk = MT_CG_PERI_I2C0; break; case 1: #ifdef CONFIG_OF base = SPM_I2C1_BASE; #else base = I2C1_BASE; #endif i2c_clk = MT_CG_PERI_I2C1; break; case 2: #ifdef CONFIG_OF base = SPM_I2C2_BASE; #else base = I2C2_BASE; #endif i2c_clk = MT_CG_PERI_I2C2; break; case 3: #ifdef CONFIG_OF base = SPM_I2C3_BASE; #else base = I2C3_BASE; #endif i2c_clk = MT_CG_PERI_I2C3; break; //FIXME: I2C4 is defined in 6595 dts but not in 6795 dts. #if 0 case 4: base = I2C4_BASE; i2c_clk = MT_CG_PERI_I2C4; break; #endif default: break; } if ((1 == onoff) && (0 == i2c_onoff)) { i2c_onoff = 1; #if 1 pdn = spm_read(PERI_PDN0_STA) & (1U << i2c_clk); spm_write(PERI_PDN0_CLR, pdn); /* power on I2C */ #else pdn = clock_is_on(i2c_clk); if (!pdn) enable_clock(i2c_clk, "spm_i2c"); #endif spm_write(base + OFFSET_CONTROL, 0x0); /* init I2C_CONTROL */ spm_write(base + OFFSET_TRANSAC_LEN, 0x1); /* init I2C_TRANSAC_LEN */ spm_write(base + OFFSET_EXT_CONF, 0x1800); /* init I2C_EXT_CONF */ spm_write(base + OFFSET_IO_CONFIG, 0x3); /* init I2C_IO_CONFIG */ spm_write(base + OFFSET_HS, 0x102); /* init I2C_HS */ } else if ((0 == onoff) && (1 == i2c_onoff)) { i2c_onoff = 0; #if 1 spm_write(PERI_PDN0_SET, pdn); /* restore I2C power */ #else if (!pdn) disable_clock(i2c_clk, "spm_i2c"); #endif } else ASSERT(1); }
/***************************************************************************** * FUNCTION * hal_btif_clk_ctrl * DESCRIPTION * control clock output enable/disable of BTIF module * PARAMETERS * p_base [IN] BTIF module's base address * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_btif_clk_ctrl(P_MTK_BTIF_INFO_STR p_btif, ENUM_CLOCK_CTRL flag) { /*In MTK BTIF, there's only one global CG on AP_DMA, no sub channel's CG bit*/ /*according to Artis's comment, clock of DMA and BTIF is default off, so we assume it to be off by default*/ int i_ret = 0; unsigned long irq_flag = 0; #if MTK_BTIF_ENABLE_CLK_REF_COUNTER static atomic_t s_clk_ref = ATOMIC_INIT(0); #else static ENUM_CLOCK_CTRL status = CLK_OUT_DISABLE; #endif spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag); #if MTK_BTIF_ENABLE_CLK_CTL #if MTK_BTIF_ENABLE_CLK_REF_COUNTER if (CLK_OUT_ENABLE == flag) { if (1 == atomic_inc_return(&s_clk_ref)) { i_ret = enable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID); if (i_ret) { BTIF_WARN_FUNC ("enable_clock for MTK_BTIF_CG_BIT failed, ret:%d", i_ret); } } } else if (CLK_OUT_DISABLE == flag) { if (0 == atomic_dec_return(&s_clk_ref)) { i_ret = disable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID); if (i_ret) { BTIF_WARN_FUNC ("disable_clock for MTK_BTIF_CG_BIT failed, ret:%d", i_ret); } } } else { i_ret = ERR_INVALID_PAR; BTIF_ERR_FUNC("invalid clock ctrl flag (%d)\n", flag); } #else if (status == flag) { i_ret = 0; BTIF_DBG_FUNC("btif clock already %s\n", CLK_OUT_ENABLE == status ? "enabled" : "disabled"); } else { if (CLK_OUT_ENABLE == flag) { i_ret = enable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID); status = (0 == i_ret) ? flag : status; if (i_ret) { BTIF_WARN_FUNC ("enable_clock for MTK_BTIF_CG_BIT failed, ret:%d", i_ret); } } else if (CLK_OUT_DISABLE == flag) { i_ret = disable_clock(MTK_BTIF_CG_BIT, BTIF_USER_ID); status = (0 == i_ret) ? flag : status; if (i_ret) { BTIF_WARN_FUNC ("disable_clock for MTK_BTIF_CG_BIT failed, ret:%d", i_ret); } } else { i_ret = ERR_INVALID_PAR; BTIF_ERR_FUNC("invalid clock ctrl flag (%d)\n", flag); } } #endif #else #if MTK_BTIF_ENABLE_CLK_REF_COUNTER #else status = flag; #endif i_ret = 0; #endif spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag); #if MTK_BTIF_ENABLE_CLK_REF_COUNTER if (0 == i_ret) { BTIF_DBG_FUNC("btif clock %s\n", CLK_OUT_ENABLE == flag ? "enabled" : "disabled"); } else { BTIF_ERR_FUNC("%s btif clock failed, ret(%d)\n", CLK_OUT_ENABLE == flag ? "enable" : "disable", i_ret); } #else if (0 == i_ret) { BTIF_DBG_FUNC("btif clock %s\n", CLK_OUT_ENABLE == flag ? "enabled" : "disabled"); } else { BTIF_ERR_FUNC("%s btif clock failed, ret(%d)\n", CLK_OUT_ENABLE == flag ? "enable" : "disable", i_ret); } #endif #if MTK_BTIF_ENABLE_CLK_CTL BTIF_DBG_FUNC("BTIF's clock is %s\n", (0 == clock_is_on(MTK_BTIF_CG_BIT)) ? "off" : "on"); #endif return i_ret; }
int disp_bls_config(void) { #if !defined(CONFIG_MTK_AAL_SUPPORT) struct cust_mt65xx_led *cust_led_list = get_cust_led_list(); struct cust_mt65xx_led *cust = NULL; struct PWM_config *config_data = NULL; if(cust_led_list) { cust = &cust_led_list[MT65XX_LED_TYPE_LCD]; if((strcmp(cust->name,"lcd-backlight") == 0) && (cust->mode == MT65XX_LED_MODE_CUST_BLS_PWM)) { config_data = &cust->config_data; if (config_data->clock_source >= 0 && config_data->clock_source <= 3) { unsigned int regVal = DISP_REG_GET(CLK_CFG_1); clkmux_sel(MT_MUX_PWM, config_data->clock_source, "DISP_PWM"); BLS_DBG("disp_bls_init : CLK_CFG_1 0x%x => 0x%x\n", regVal, DISP_REG_GET(CLK_CFG_1)); } gPWMDiv = (config_data->div == 0) ? PWM_DEFAULT_DIV_VALUE : config_data->div; gPWMDiv &= 0x3FF; BLS_MSG("disp_bls_config : PWM config data (%d,%d)\n", config_data->clock_source, config_data->div); } } if (!clock_is_on(MT_CG_DISP0_MDP_BLS_26M) || !gBLSPowerOn) { BLS_MSG("disp_bls_config: enable clock\n"); enable_clock(MT_CG_DISP0_SMI_LARB0, "DDP"); enable_clock(MT_CG_DISP0_MDP_BLS_26M , "DDP"); gBLSPowerOn = 1; } BLS_MSG("disp_bls_config : BLS_EN=0x%x, PWM_DUTY=%d, PWM_DUTY_RD=%d, CG=0x%x, %d, %d\n", DISP_REG_GET(DISP_REG_BLS_EN), DISP_REG_GET(DISP_REG_BLS_PWM_DUTY), DISP_REG_GET(DISP_REG_BLS_PWM_DUTY_RD), DISP_REG_GET(DISP_REG_CONFIG_MMSYS_CG_CON0), clock_is_on(MT_CG_DISP0_MDP_BLS_26M), clock_is_on(MT_CG_DISP0_DISP_BLS)); #ifdef USE_DISP_BLS_MUTEX BLS_MSG("disp_bls_config : gBLSMutexID = %d\n", gBLSMutexID); DISP_REG_SET(DISP_REG_CONFIG_MUTEX_RST(gBLSMutexID), 1); DISP_REG_SET(DISP_REG_CONFIG_MUTEX_RST(gBLSMutexID), 0); DISP_REG_SET(DISP_REG_CONFIG_MUTEX_MOD(gBLSMutexID), 0x200); // BLS DISP_REG_SET(DISP_REG_CONFIG_MUTEX_SOF(gBLSMutexID), 0); // single mode DISP_REG_SET(DISP_REG_CONFIG_MUTEX_EN(gBLSMutexID), 1); if (disp_bls_get_mutex() == 0) { #else BLS_MSG("disp_bls_config\n"); DISP_REG_SET(DISP_REG_BLS_DEBUG, 0x3); #endif if (!(DISP_REG_GET(DISP_REG_BLS_EN) & 0x10000)) DISP_REG_SET(DISP_REG_BLS_PWM_DUTY, 0); DISP_REG_SET(DISP_REG_BLS_PWM_CON, 0x0 | (gPWMDiv << 16)); //DISP_REG_SET(DISP_REG_BLS_EN, 0x00010001); //Enable BLS_EN #ifdef USE_DISP_BLS_MUTEX if (disp_bls_release_mutex() == 0) return 0; } return -1; #else DISP_REG_SET(DISP_REG_BLS_DEBUG, 0x0); #endif #endif BLS_MSG("disp_bls_config:-\n"); return 0; }
/***************************************************************************** * FUNCTION * hal_tx_dma_irq_handler * DESCRIPTION * lower level tx interrupt handler * PARAMETERS * p_dma_info [IN] pointer to BTIF dma channel's information * RETURNS * 0 means success, negative means fail *****************************************************************************/ int hal_tx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info) { #define MAX_CONTINIOUS_TIMES 512 unsigned int i_ret = -1; unsigned int valid_size = 0; unsigned int vff_len = 0; unsigned int left_len = 0; unsigned int base = p_dma_info->base; static int flush_irq_counter; static struct timeval start_timer; static struct timeval end_timer; unsigned long flag = 0; spin_lock_irqsave(&(g_clk_cg_spinlock), flag); if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) { spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); BTIF_ERR_FUNC ("%s: clock is off before irq status clear done!!!\n", __FILE__); return i_ret; } /*check if Tx VFF Left Size equal to VFIFO size or not*/ vff_len = BTIF_READ32(TX_DMA_VFF_LEN(base)); valid_size = BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base)); left_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base)); if (0 == flush_irq_counter) { do_gettimeofday(&start_timer); } if ((0 < valid_size) && (8 > valid_size)) { i_ret = _tx_dma_flush(p_dma_info); flush_irq_counter++; if (MAX_CONTINIOUS_TIMES <= flush_irq_counter) { do_gettimeofday(&end_timer); /*when btif tx fifo cannot accept any data and counts of bytes left in tx vfifo < 8 for a while we assume that btif cannot send data for a long time in order not to generate interrupt continiously, which may effect system's performance. we clear tx flag and disable btif tx interrupt */ /*clear interrupt flag*/ BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK); /*vFIFO data has been read by DMA controller, just disable tx dma's irq*/ i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false); BTIF_ERR_FUNC ("**********************ERROR, ERROR, ERROR**************************\n"); BTIF_ERR_FUNC ("BTIF Tx IRQ happened %d times (continiously), between %d.%d and %d.%d\n", MAX_CONTINIOUS_TIMES, start_timer.tv_sec, start_timer.tv_usec, end_timer.tv_usec, end_timer.tv_usec); } } else if (vff_len == left_len) { flush_irq_counter = 0; /*clear interrupt flag*/ BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK); /*vFIFO data has been read by DMA controller, just disable tx dma's irq*/ i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false); } else { #if 0 BTIF_ERR_FUNC ("**********************WARNING**************************\n"); BTIF_ERR_FUNC("invalid irq condition, dump register\n"); hal_dma_dump_reg(p_dma_info, REG_TX_DMA_ALL); #endif BTIF_DBG_FUNC ("superious IRQ occurs, vff_len(%d), valid_size(%d), left_len(%d)\n", vff_len, valid_size, left_len); } spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag); return i_ret; }
void spm_go_to_sodi(u32 spm_flags, u32 spm_data) { struct wake_status wakesta; unsigned long flags; struct mtk_irq_mask mask; wake_reason_t wr = WR_NONE; struct pcm_desc *pcmdesc = __spm_sodi.pcmdesc; struct pwr_ctrl *pwrctrl = __spm_sodi.pwrctrl; int vcore_status = 0; //0:disable, 1:HPM, 2:LPM #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(1<<SPM_SODI_ENTER); #endif #if defined (CONFIG_ARM_PSCI)||defined(CONFIG_MTK_PSCI) spm_flags &= ~SPM_DISABLE_ATF_ABORT; #else spm_flags |= SPM_DISABLE_ATF_ABORT; #endif if(gSpm_SODI_mempll_pwr_mode == 1) { spm_flags |= SPM_MEMPLL_CG_EN; //MEMPLL CG mode } else { spm_flags &= ~SPM_MEMPLL_CG_EN; //DDRPHY power down mode } set_pwrctrl_pcm_flags(pwrctrl, spm_flags); //If Vcore DVFS is disable, force to disable SODI internal Vcore DVS if (pwrctrl->pcm_flags_cust == 0) { if ((pwrctrl->pcm_flags & SPM_VCORE_DVFS_EN) == 0) { pwrctrl->pcm_flags |= SPM_VCORE_DVS_EVENT_DIS; } } //SODI will not decrease Vcore voltage in HPM mode. if ((pwrctrl->pcm_flags & SPM_VCORE_DVS_EVENT_DIS) == 0) { if (get_ddr_khz() != FDDR_S1_KHZ) { #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_VCORE_HPM)); #endif /* //modify by mtk //if DRAM freq is high,SPM will not enter event_vector to enter EMI self-refresh if(pwrctrl->pcm_flags_cust == 0) { pwrctrl->pcm_flags|=0x80; } */ vcore_status = 1; //printk("SODI: get_ddr_khz() = %d\n", get_ddr_khz()); } else { #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_VCORE_LPM)); #endif vcore_status = 2; } } //enable APxGPT timer soidle_before_wfi(0); lockdep_off(); spin_lock_irqsave(&__spm_lock, flags); mt_irq_mask_all(&mask); mt_irq_unmask_for_sleep(SPM_IRQ0_ID); mt_cirq_clone_gic(); mt_cirq_enable(); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_UART_SLEEP)); #endif if (request_uart_to_sleep()) { wr = WR_UART_BUSY; goto RESTORE_IRQ; } #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_SPM_FLOW)); #endif __spm_reset_and_init_pcm(pcmdesc); /* * When commond-queue is in shut-down mode, SPM will hang if it tries to access commond-queue status. * Follwoing patch is to let SODI driver to notify SPM that commond-queue is in shut-down mode or not to avoid above SPM hang issue. * But, now display can automatically notify SPM that command-queue is shut-down or not, so following code is not needed anymore. */ #if 0 //check GCE if(clock_is_on(MT_CG_INFRA_GCE)) { pwrctrl->pcm_flags &= ~SPM_DDR_HIGH_SPEED; } else { pwrctrl->pcm_flags |= SPM_DDR_HIGH_SPEED; } #endif __spm_kick_im_to_fetch(pcmdesc); __spm_init_pcm_register(); __spm_init_event_vector(pcmdesc); #if 0 //In D2, can not set apsrc_req bit in SODI. It is used by Vcore DVS for GPU 550M in HPM mode //Display set SPM_PCM_SRC_REQ[0]=1'b1 to force DRAM not enter self-refresh mode if((spm_read(SPM_PCM_SRC_REQ)&0x00000001)) { pwrctrl->pcm_apsrc_req = 1; } else { pwrctrl->pcm_apsrc_req = 0; } #endif __spm_set_power_control(pwrctrl); __spm_set_wakeup_event(pwrctrl); #if SODI_DVT_PCM_TIMER_DISABLE //PCM_Timer is enable in above '__spm_set_wakeup_event(pwrctrl);', disable PCM Timer here spm_write(SPM_PCM_CON1 ,spm_read(SPM_PCM_CON1)&(~CON1_PCM_TIMER_EN)); #endif spm_sodi_pre_process(); __spm_kick_pcm_to_run(pwrctrl); #if SPM_SODI_DUMP_REGS printk("============SODI Before============\n"); spm_sodi_dump_regs(); //dump debug info #endif #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_WFI)); #endif #ifdef SPM_SODI_PROFILE_TIME gpt_get_cnt(SPM_SODI_PROFILE_APXGPT,&soidle_profile[1]); #endif spm_trigger_wfi_for_sodi(pwrctrl); #ifdef SPM_SODI_PROFILE_TIME gpt_get_cnt(SPM_SODI_PROFILE_APXGPT,&soidle_profile[2]); #endif #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_LEAVE_WFI)); #endif #if SPM_SODI_DUMP_REGS printk("============SODI After=============\n"); spm_sodi_dump_regs();//dump debug info #endif spm_sodi_post_process(); __spm_get_wakeup_status(&wakesta); sodi_debug("emi-selfrefrsh cnt = %d, pcm_flag = 0x%x, SPM_PCM_RESERVE2 = 0x%x, vcore_status = %d, %s\n", spm_read(SPM_PCM_PASR_DPD_3), spm_read(SPM_PCM_FLAGS), spm_read(SPM_PCM_RESERVE2), vcore_status, pcmdesc->version); __spm_clean_after_wakeup(); #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_ENTER_UART_AWAKE)); #endif request_uart_to_wakeup(); wr = __spm_output_wake_reason(&wakesta, pcmdesc, false); if (wr == WR_PCM_ASSERT) { sodi_err("PCM ASSERT AT %u (%s), r13 = 0x%x, debug_flag = 0x%x\n", wakesta.assert_pc, pcmdesc->version, wakesta.r13, wakesta.debug_flag); } #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(aee_rr_curr_sodi_val()|(1<<SPM_SODI_LEAVE_SPM_FLOW)); #endif RESTORE_IRQ: mt_cirq_flush(); mt_cirq_disable(); mt_irq_mask_restore(&mask); spin_unlock_irqrestore(&__spm_lock, flags); lockdep_on(); //stop APxGPT timer and enable caore0 local timer soidle_after_wfi(0); #if SODI_DVT_SPM_MEM_RW_TEST { static u32 magic_init = 0; int i =0; if(magic_init == 0){ magic_init++; printk("magicNumArray:0x%p",magicArray); } for(i=0;i<16;i++) { if(magicArray[i]!=SODI_DVT_MAGIC_NUM) { printk("Error: sodi magic number no match!!!"); ASSERT(0); } } if (i>=16) printk("SODI_DVT_SPM_MEM_RW_TEST pass (count = %d)\n", magic_init); } #endif #if SPM_AEE_RR_REC aee_rr_rec_sodi_val(0); #endif }
int mali_platform_power_mode_change(mali_power_mode power_mode) { unsigned long flags; switch (power_mode) { case MALI_POWER_MODE_ON: MALI_DEBUG_PRINT(3, ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n", atomic_read((atomic_t *)&bPoweroff) ? "powering on" : "already on")); if (atomic_read((atomic_t *)&bPoweroff) == 1) { MALI_DEBUG_PRINT(2,("[+]MFG enable_clock \n")); mfg_pwr_lock(flags); if (!clock_is_on(MT_CG_MFG_PDN_BG3D_SW_CG)) { enable_clock(MT_CG_MFG_PDN_BG3D_SW_CG, "G3D_DRV"); /// enable WHPLL and set the GPU freq. to 500MHz if(get_gpu_level() != GPU_LEVEL_0){ clkmux_sel(MT_CLKMUX_MFG_MUX_SEL, MT_CG_GPU_500P5M_EN, "G3D_DRV"); } } mfg_pwr_unlock(flags); MALI_DEBUG_PRINT(2,("[-]MFG enable_clock \n")); #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 500, 1200/1000, 0, 0, 0); #endif atomic_set((atomic_t *)&bPoweroff, 0); } break; case MALI_POWER_MODE_LIGHT_SLEEP: case MALI_POWER_MODE_DEEP_SLEEP: MALI_DEBUG_PRINT(1, ("Mali platform: Got %s event, %s\n", power_mode == MALI_POWER_MODE_LIGHT_SLEEP ? "MALI_POWER_MODE_LIGHT_SLEEP" : "MALI_POWER_MODE_DEEP_SLEEP", atomic_read((atomic_t *)&bPoweroff) ? "already off" : "powering off")); if (atomic_read((atomic_t *)&bPoweroff) == 0) { MALI_DEBUG_PRINT(2,("[+]MFG disable_clock \n")); mfg_pwr_lock(flags); if (clock_is_on(MT_CG_MFG_PDN_BG3D_SW_CG)) { disable_clock(MT_CG_MFG_PDN_BG3D_SW_CG, "G3D_DRV"); } mfg_pwr_unlock(flags); MALI_DEBUG_PRINT(2,("[-]MFG disable_clock \n")); #if defined(CONFIG_MALI400_PROFILING) _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 0, 0, 0, 0, 0); #endif atomic_set((atomic_t *)&bPoweroff, 1); } break; } }
static int hal_tx_dma_dump_reg(P_MTK_DMA_INFO_STR p_dma_info, ENUM_BTIF_REG_ID flag) { int i_ret = -1; unsigned int base = p_dma_info->base; unsigned int int_flag = 0; unsigned int enable = 0; unsigned int stop = 0; unsigned int flush = 0; unsigned int wpt = 0; unsigned int rpt = 0; unsigned int int_buf = 0; unsigned int valid_size = 0; /*unsigned long irq_flag = 0;*/ /*spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);*/ if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) { /*spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);*/ BTIF_ERR_FUNC("%s: clock is off, this should never happen!!!\n", __FILE__); return i_ret; } int_flag = BTIF_READ32(TX_DMA_INT_FLAG(base)); enable = BTIF_READ32(TX_DMA_EN(base)); stop = BTIF_READ32(TX_DMA_STOP(base)); flush = BTIF_READ32(TX_DMA_FLUSH(base)); wpt = BTIF_READ32(TX_DMA_VFF_WPT(base)); rpt = BTIF_READ32(TX_DMA_VFF_RPT(base)); int_buf = BTIF_READ32(TX_DMA_INT_BUF_SIZE(base)); valid_size = BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base)); /*spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);*/ BTIF_INFO_FUNC("DMA's clock is on\n"); BTIF_INFO_FUNC("Tx DMA's base address: 0x%x\n", base); if (REG_TX_DMA_ALL == flag) { BTIF_INFO_FUNC("TX_EN(:0x%x\n", enable); BTIF_INFO_FUNC("INT_FLAG:0x%x\n", int_flag); BTIF_INFO_FUNC("TX_STOP:0x%x\n", stop); BTIF_INFO_FUNC("TX_FLUSH:0x%x\n", flush); BTIF_INFO_FUNC("TX_WPT:0x%x\n", wpt); BTIF_INFO_FUNC("TX_RPT:0x%x\n", rpt); BTIF_INFO_FUNC("INT_BUF_SIZE:0x%x\n", int_buf); BTIF_INFO_FUNC("VALID_SIZE:0x%x\n", valid_size); BTIF_INFO_FUNC("INT_EN:0x%x\n", BTIF_READ32(TX_DMA_INT_EN(base))); BTIF_INFO_FUNC("TX_RST:0x%x\n", BTIF_READ32(TX_DMA_RST(base))); BTIF_INFO_FUNC("VFF_ADDR:0x%x\n", BTIF_READ32(TX_DMA_VFF_ADDR(base))); BTIF_INFO_FUNC("VFF_LEN:0x%x\n", BTIF_READ32(TX_DMA_VFF_LEN(base))); BTIF_INFO_FUNC("TX_THRE:0x%x\n", BTIF_READ32(TX_DMA_VFF_THRE(base))); BTIF_INFO_FUNC("W_INT_BUF_SIZE:0x%x\n", BTIF_READ32(TX_DMA_W_INT_BUF_SIZE(base))); BTIF_INFO_FUNC("LEFT_SIZE:0x%x\n", BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base))); BTIF_INFO_FUNC("DBG_STATUS:0x%x\n", BTIF_READ32(TX_DMA_DEBUG_STATUS(base))); i_ret = 0; } else { BTIF_WARN_FUNC("unknown flag:%d\n", flag); } BTIF_INFO_FUNC("tx dma %s\n", (enable & DMA_EN_BIT) && (!(stop && DMA_STOP_BIT)) ? "enabled" : "stoped"); BTIF_INFO_FUNC("data in tx dma is %s sent by HW\n", ((wpt == rpt) && (int_buf == 0)) ? "completely" : "not completely"); return i_ret; }