/* * Let's power down on idle, but only if we are really * idle, because once we start down the path of * going idle we continue to do idle even if we get * a clock tick interrupt . . */ void omap_pm_idle(void) { extern __u32 arm_idlect1_mask; __u32 use_idlect1 = arm_idlect1_mask; int do_sleep = 0; local_irq_disable(); local_fiq_disable(); if (need_resched()) { local_fiq_enable(); local_irq_enable(); return; } #ifdef CONFIG_OMAP_MPU_TIMER #warning Enable 32kHz OS timer in order to allow sleep states in idle use_idlect1 = use_idlect1 & ~(1 << 9); #else while (enable_dyn_sleep) { #ifdef CONFIG_CBUS_TAHVO_USB extern int vbus_active; /* Clock requirements? */ if (vbus_active) break; #endif do_sleep = 1; break; } #endif #ifdef CONFIG_OMAP_DM_TIMER use_idlect1 = omap_dm_timer_modify_idlect_mask(use_idlect1); #endif if (omap_dma_running()) use_idlect1 &= ~(1 << 6); /* We should be able to remove the do_sleep variable and multiple * tests above as soon as drivers, timer and DMA code have been fixed. * Even the sleep block count should become obsolete. */ if ((use_idlect1 != ~0) || !do_sleep) { __u32 saved_idlect1 = omap_readl(ARM_IDLECT1); if (cpu_is_omap15xx()) use_idlect1 &= OMAP1510_BIG_SLEEP_REQUEST; else use_idlect1 &= OMAP1610_IDLECT1_SLEEP_VAL; omap_writel(use_idlect1, ARM_IDLECT1); __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4"); omap_writel(saved_idlect1, ARM_IDLECT1); local_fiq_enable(); local_irq_enable(); return; }
static int omap3_idle_bm_check(void) { u8 state; /* Check if any modules other than debug uart and gpios are active*/ BAIL_ON(CM_FCLKEN1_CORE & CORE_FCLK_MASK); BAIL_ON(CM_FCLKEN_SGX & SGX_FCLK_MASK); BAIL_ON(CM_FCLKEN_CAM & CAM_FCLK_MASK); BAIL_ON(CM_FCLKEN_PER & PER_FCLK_MASK); BAIL_ON(CM_FCLKEN_USBHOST & USBHOST_FCLK_MASK); BAIL_ON(CM_FCLKEN3_CORE & CORE3_FCLK_MASK); /* To allow core retention during LPR scenario */ BAIL_ON(!omap2_disp_lpr_is_enabled() && (CM_FCLKEN_DSS & DSS_FCLK_MASK)); /* Work around an issue that popped up in ES3.1. For some reason the * ICLKEN1_CORE bit 3 is set and AUTOIDLE1_CORE bit 3 is not. This is * preventing the CORE from going to RET. If we detect this condition * here we fix it... */ if (CM_ICLKEN1_CORE & ~CM_AUTOIDLE1_CORE & (1<<3)) { CM_AUTOIDLE1_CORE |= (1<<3); } /* Check if any modules have ICLK bit enabled and interface clock * autoidle disabled */ BAIL_ON(CORE1_ICLK_VALID & (CM_ICLKEN1_CORE & ~CM_AUTOIDLE1_CORE)); /* Check for secure modules which have only ICLK. Do not check for rng * module. It has been ensured that if rng is active cpu idle will * never be entered. */ BAIL_ON(CORE2_ICLK_VALID & CM_ICLKEN2_CORE & ~4); /* Enabling SGX ICLK will prevent CORE ret*/ BAIL_ON(SGX_ICLK_VALID & (CM_ICLKEN_SGX)); BAIL_ON(CORE3_ICLK_VALID & (CM_ICLKEN3_CORE & ~CM_AUTOIDLE3_CORE)); BAIL_ON(USBHOST_ICLK_VALID & (CM_ICLKEN_USBHOST & ~CM_AUTOIDLE_USBHOST)); BAIL_ON(DSS_ICLK_VALID & (CM_ICLKEN_DSS & ~CM_AUTOIDLE_DSS)); BAIL_ON(CAM_ICLK_VALID & (CM_ICLKEN_CAM & ~CM_AUTOIDLE_CAM)); BAIL_ON(PER_ICLK_VALID & (CM_ICLKEN_PER & ~CM_AUTOIDLE_PER)); BAIL_ON(WKUP_ICLK_VALID & (CM_ICLKEN_WKUP & ~CM_AUTOIDLE_WKUP)); /* Check if IVA power domain is ON */ prcm_get_power_domain_state(DOM_IVA2, &state); BAIL_ON(state == PRCM_ON); /* Check if a DMA transfer is active */ BAIL_ON(omap_dma_running()); /* Check if debug UART is active */ BAIL_ON(pre_uart_activity()); clock_bail_trace("--- C5 ---", 0, __LINE__); return 0; }
void omap1_pm_idle(void) { extern __u32 arm_idlect1_mask; __u32 use_idlect1 = arm_idlect1_mask; int do_sleep = 0; local_irq_disable(); local_fiq_disable(); if (need_resched()) { local_fiq_enable(); local_irq_enable(); return; } #ifdef CONFIG_OMAP_MPU_TIMER #warning Enable 32kHz OS timer in order to allow sleep states in idle use_idlect1 = use_idlect1 & ~(1 << 9); #else while (enable_dyn_sleep) { #ifdef CONFIG_CBUS_TAHVO_USB extern int vbus_active; if (vbus_active) break; #endif do_sleep = 1; break; } #endif #ifdef CONFIG_OMAP_DM_TIMER use_idlect1 = omap_dm_timer_modify_idlect_mask(use_idlect1); #endif if (omap_dma_running()) use_idlect1 &= ~(1 << 6); if ((use_idlect1 != ~0) || !do_sleep) { __u32 saved_idlect1 = omap_readl(ARM_IDLECT1); if (cpu_is_omap15xx()) use_idlect1 &= OMAP1510_BIG_SLEEP_REQUEST; else use_idlect1 &= OMAP1610_IDLECT1_SLEEP_VAL; omap_writel(use_idlect1, ARM_IDLECT1); __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4"); omap_writel(saved_idlect1, ARM_IDLECT1); local_fiq_enable(); local_irq_enable(); return; }
static int omap2_can_sleep(void) { if (omap2_fclks_active()) return 0; if (osc_ck->usecount > 1) return 0; if (omap_dma_running()) return 0; return 1; }
static int omap2_can_sleep(void) { if (!enable_dyn_sleep) return 0; if (omap2_fclks_active()) return 0; if (atomic_read(&sleep_block) > 0) return 0; if (osc_ck->usecount > 1) return 0; if (omap_dma_running()) return 0; return 1; }
/* * Let's power down on idle, but only if we are really * idle, because once we start down the path of * going idle we continue to do idle even if we get * a clock tick interrupt . . */ void omap1_pm_idle(void) { extern __u32 arm_idlect1_mask; __u32 use_idlect1 = arm_idlect1_mask; local_fiq_disable(); #if defined(CONFIG_OMAP_MPU_TIMER) && !defined(CONFIG_OMAP_DM_TIMER) use_idlect1 = use_idlect1 & ~(1 << 9); #endif #ifdef CONFIG_OMAP_DM_TIMER use_idlect1 = omap_dm_timer_modify_idlect_mask(use_idlect1); #endif if (omap_dma_running()) use_idlect1 &= ~(1 << 6); /* * We should be able to remove the do_sleep variable and multiple * tests above as soon as drivers, timer and DMA code have been fixed. * Even the sleep block count should become obsolete. */ if ((use_idlect1 != ~0) || !enable_dyn_sleep) { __u32 saved_idlect1 = omap_readl(ARM_IDLECT1); if (cpu_is_omap15xx()) use_idlect1 &= OMAP1510_BIG_SLEEP_REQUEST; else use_idlect1 &= OMAP1610_IDLECT1_SLEEP_VAL; omap_writel(use_idlect1, ARM_IDLECT1); __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4"); omap_writel(saved_idlect1, ARM_IDLECT1); local_fiq_enable(); return; }
static int omap3_idle_bm_check(void) { u32 core_dep = 0; u8 state; do { /* Check if any modules other than debug uart and gpios are active*/ core_dep = (CM_FCLKEN1_CORE & CORE_FCLK_MASK) | (CM_FCLKEN_SGX & SGX_FCLK_MASK) | (CM_FCLKEN_CAM & CAM_FCLK_MASK) | (CM_FCLKEN_PER & PER_FCLK_MASK) | (CM_FCLKEN_USBHOST & USBHOST_FCLK_MASK) | (CM_FCLKEN3_CORE & CORE3_FCLK_MASK); /* To allow core retention during LPR scenario */ #ifdef CONFIG_OMAP_DISPLAY if (!lpr_enabled) core_dep |= (CM_FCLKEN_DSS & DSS_FCLK_MASK); #endif if (core_dep) break; /* Check if any modules have ICLK bit enabled and interface clock */ /* autoidle disabled */ core_dep |= (CORE1_ICLK_VALID & (CM_ICLKEN1_CORE & ~CM_AUTOIDLE1_CORE)); /* Check for secure modules which have only ICLK */ /* Do not check for rng module.It has been ensured that * if rng is active cpu idle will never be entered */ core_dep |= (CORE2_ICLK_VALID & CM_ICLKEN2_CORE & ~4); if (core_dep) break; /* Enabling SGX ICLK will prevent CORE ret*/ core_dep |= SGX_ICLK_VALID & (CM_ICLKEN_SGX); core_dep |= (CORE3_ICLK_VALID & (CM_ICLKEN3_CORE & ~CM_AUTOIDLE3_CORE)); core_dep |= (USBHOST_ICLK_VALID & (CM_ICLKEN_USBHOST & ~CM_AUTOIDLE_USBHOST)); core_dep |= (DSS_ICLK_VALID & (CM_ICLKEN_DSS & ~CM_AUTOIDLE_DSS)); core_dep |= (CAM_ICLK_VALID & (CM_ICLKEN_CAM & ~CM_AUTOIDLE_CAM)); core_dep |= (PER_ICLK_VALID & (CM_ICLKEN_PER & ~CM_AUTOIDLE_PER)); core_dep |= (WKUP_ICLK_VALID & (CM_ICLKEN_WKUP & ~CM_AUTOIDLE_WKUP)); if (core_dep) break; /* Check if IVA power domain is ON */ prcm_get_power_domain_state(DOM_IVA2, &state); if (state == PRCM_ON) { core_dep |= 1; break; } /* Check if a DMA transfer is active */ if (omap_dma_running()) { core_dep |= 1; break; } /* Check if debug UART is active */ if (!pre_uart_inactivity()) core_dep |= 1; } while (0); return core_dep; }