/** * tick_resume_onshot - resume oneshot mode */ void tick_resume_oneshot(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(dev, ktime_get(), true); }
/* * Event handler for periodic ticks */ void tick_handle_periodic(struct clock_event_device *dev) { int cpu = smp_processor_id(); ktime_t next; tick_periodic(cpu); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return; /* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(dev->next_event, tick_period); for (;;) { if (!clockevents_program_event(dev, next, ktime_get())) return; /* * Have to be careful here. If we're in oneshot mode, * before we call tick_periodic() in a loop, we need * to be sure we're using a real hardware clocksource. * Otherwise we could get trapped in an infinite * loop, as the tick_periodic() increments jiffies, * when then will increment time, posibly causing * the loop to trigger again and again. */ if (timekeeping_valid_for_hres()) tick_periodic(cpu); next = ktime_add(next, tick_period); } }
/* * Event handler for periodic broadcast ticks */ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) { ktime_t next; tick_do_periodic_broadcast(); /* * The device is in periodic mode. No reprogramming necessary: */ if (dev->mode == CLOCK_EVT_MODE_PERIODIC) return; /* * Setup the next period for devices, which do not have * periodic mode. We read dev->next_event first and add to it * when the event already expired. clockevents_program_event() * sets dev->next_event only when the event is really * programmed to the device. */ for (next = dev->next_event; ;) { next = ktime_add(next, tick_period); if (!clockevents_program_event(dev, next, false)) return; tick_do_periodic_broadcast(); } }
/* * Setup the device for a periodic tick */ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) { tick_set_periodic_handler(dev, broadcast); /* Broadcast setup ? */ if (!tick_device_is_functional(dev)) return; if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && !tick_broadcast_oneshot_active()) { clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); } else { unsigned long seq; ktime_t next; do { seq = read_seqbegin(&xtime_lock); next = tick_next_period; } while (read_seqretry(&xtime_lock, seq)); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); for (;;) { if (!clockevents_program_event(dev, next, ktime_get())) return; next = ktime_add(next, tick_period); } } }
/** * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) */ void tick_setup_oneshot(struct clock_event_device *newdev, void (*handler)(struct clock_event_device *), ktime_t next_event) { newdev->event_handler = handler; clockevents_set_state(newdev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(newdev, next_event, true); }
/** * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) */ void tick_setup_oneshot(struct clock_event_device *newdev, void (*handler)(struct clock_event_device *), ktime_t next_event) { newdev->event_handler = handler; clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); clockevents_program_event(newdev, next_event, ktime_get()); }
static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, ktime_t expires) { if (!clockevent_state_oneshot(bc)) clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(bc, expires, 1); tick_broadcast_set_affinity(bc, cpumask_of(cpu)); }
static int tick_broadcast_set_event(ktime_t expires, int force) { struct clock_event_device *bc = tick_broadcast_device.evtdev; if (bc->mode != CLOCK_EVT_MODE_ONESHOT) clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); return clockevents_program_event(bc, expires, force); }
/** * clockevents_update_freq - Update frequency and reprogram a clock event device. * @dev: device to modify * @freq: new device frequency * * Reconfigure and reprogram a clock event device in oneshot * mode. Must be called on the cpu for which the device delivers per * cpu timer events with interrupts disabled! Returns 0 on success, * -ETIME when the event is in the past. */ int clockevents_update_freq(struct clock_event_device *dev, u32 freq) { clockevents_config(dev, freq); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return 0; return clockevents_program_event(dev, dev->next_event, false); }
static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, ktime_t expires, int force) { int ret; if (bc->state != CLOCK_EVT_STATE_ONESHOT) clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT); ret = clockevents_program_event(bc, expires, force); if (!ret) tick_broadcast_set_affinity(bc, cpumask_of(cpu)); return ret; }
/** * tick_program_event internal worker function */ static int __tick_program_event(struct clock_event_device *dev, ktime_t expires, int force) { ktime_t now = ktime_get(); while (1) { int ret = clockevents_program_event(dev, expires, now); if (!ret || !force) return ret; now = ktime_get(); expires = ktime_add(now, ktime_set(0, dev->min_delta_ns)); } }
/** * tick_program_event */ int tick_program_event(ktime_t expires, int force) { struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; ktime_t now = ktime_get(); while (1) { int ret = clockevents_program_event(dev, expires, now); if (!ret || !force) return ret; now = ktime_get(); expires = ktime_add(now, ktime_set(0, dev->min_delta_ns)); } }
static int tick_broadcast_set_event(ktime_t expires, int force) { struct clock_event_device *bc = tick_broadcast_device.evtdev; ktime_t now = ktime_get(); int res; for(;;) { res = clockevents_program_event(bc, expires, now); if (!res || !force) return res; now = ktime_get(); expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); } }
static void tick_handle_periodic_broadcast(struct clock_event_device *dev) { ktime_t next; tick_do_periodic_broadcast(); if (dev->mode == CLOCK_EVT_MODE_PERIODIC) return; for (next = dev->next_event; ;) { next = ktime_add(next, tick_period); if (!clockevents_program_event(dev, next, false)) return; tick_do_periodic_broadcast(); } }
/** * tick_program_event internal worker function */ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, int force) { ktime_t now = ktime_get(); int i; for (i = 0;;) { int ret = clockevents_program_event(dev, expires, now); if (!ret || !force) return ret; dev->retries++; /* * We tried 3 times to program the device with the given * min_delta_ns. If that's not working then we increase it * and emit a warning. */ if (++i > 2) { /* Increase the min. delta and try again */ if (tick_increase_min_delta(dev)) { /* * Get out of the loop if min_delta_ns * hit the limit already. That's * better than staying here forever. * * We clear next_event so we have a * chance that the box survives. */ printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); dev->next_event.tv64 = KTIME_MAX; return -ETIME; } i = 0; } now = ktime_get(); expires = ktime_add_ns(now, dev->min_delta_ns); } }
/* * Event handler for periodic ticks */ void tick_handle_periodic(struct clock_event_device *dev) { int cpu = smp_processor_id(); ktime_t next = dev->next_event; tick_periodic(cpu); #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON) /* * The cpu might have transitioned to HIGHRES or NOHZ mode via * update_process_times() -> run_local_timers() -> * hrtimer_run_queues(). */ if (dev->event_handler != tick_handle_periodic) return; #endif if (!clockevent_state_oneshot(dev)) return; for (;;) { /* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(next, tick_period); if (!clockevents_program_event(dev, next, false)) return; /* * Have to be careful here. If we're in oneshot mode, * before we call tick_periodic() in a loop, we need * to be sure we're using a real hardware clocksource. * Otherwise we could get trapped in an infinite * loop, as the tick_periodic() increments jiffies, * which then will increment time, possibly causing * the loop to trigger again and again. */ if (timekeeping_valid_for_hres()) tick_periodic(cpu); } }
/* * Event handler for periodic ticks */ void tick_handle_periodic(struct clock_event_device *dev) { int cpu = smp_processor_id(); ktime_t next; tick_periodic(cpu); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return; /* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(dev->next_event, tick_period); for (;;) { if (!clockevents_program_event(dev, next, ktime_get())) return; tick_periodic(cpu); next = ktime_add(next, tick_period); } }
/* * Event handler for periodic broadcast ticks */ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) { dev->next_event.tv64 = KTIME_MAX; tick_do_periodic_broadcast(); /* * The device is in periodic mode. No reprogramming necessary: */ if (dev->mode == CLOCK_EVT_MODE_PERIODIC) return; /* * Setup the next period for devices, which do not have * periodic mode: */ for (;;) { ktime_t next = ktime_add(dev->next_event, tick_period); if (!clockevents_program_event(dev, next, ktime_get())) return; tick_do_periodic_broadcast(); } }
static int tick_broadcast_set_event(ktime_t expires, int force) { struct clock_event_device *bc = tick_broadcast_device.evtdev; return clockevents_program_event(bc, expires, force); }
static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_processor_cx *cx; u8 cur_per_state, cur_neon_state, pre_neon_state, pre_per_state; struct timespec ts_preidle, ts_postidle, ts_idle; u32 fclken_core, iclken_core, fclken_per, iclken_per; u32 sdrcpwr_val, sdrc_power_register = 0x0; int wakeup_latency; int core_sleep_flg = 0; u32 per_ctx_saved = 0; int ret = -1; #ifdef CONFIG_ENABLE_SWLATENCY_MEASURE int idle_status = 0; #endif local_irq_disable(); local_fiq_disable(); if (need_resched()) { local_irq_enable(); local_fiq_enable(); return 0; } #ifdef CONFIG_ENABLE_SWLATENCY_MEASURE sw_latency_arr[swlat_arr_wrptr].sleep_start = omap_32k_sync_timer_read(); #endif PM_PREPWSTST_MPU = 0xFF; PM_PREPWSTST_CORE = 0xFF; PM_PREPWSTST_NEON = 0xFF; PM_PREPWSTST_PER = 0xFF; cx = cpuidle_get_statedata(state); target_state.mpu_state = cx->mpu_state; target_state.core_state = cx->core_state; /* take a time marker for residency */ getnstimeofday(&ts_preidle); if (cx->type == OMAP3_STATE_C0) { omap_sram_idle(); goto return_sleep_time; } if (cx->type > OMAP3_STATE_C1) sched_clock_idle_sleep_event(); /* about to enter deep idle */ correct_target_state(); wakeup_latency = cx->wakeup_latency; if (target_state.core_state != cx->core_state) { /* Currently, this can happen only for core_off */ /* Adjust wakeup latency to that of core_cswr state */ /* Hard coded now and needs to be made more generic */ /* omap3_power_states[4] is CSWR for core */ wakeup_latency = omap3_power_states[4].wakeup_latency; } /* Reprogram next wake up tick to adjust for wake latency */ if (wakeup_latency > 1000) { struct tick_device *d = tick_get_device(smp_processor_id()); ktime_t adjust, next, now = ktime_get(); if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) > (wakeup_latency * 1000 + NSEC_PER_MSEC)) { adjust = ktime_set(0, (wakeup_latency * 1000)); next = ktime_sub(d->evtdev->next_event, adjust); clockevents_program_event(d->evtdev, next, now); } } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto return_sleep_time; prcm_get_power_domain_state(DOM_PER, &cur_per_state); prcm_get_power_domain_state(DOM_NEON, &cur_neon_state); fclken_core = CM_FCLKEN1_CORE; iclken_core = CM_ICLKEN1_CORE; fclken_per = CM_FCLKEN_PER; iclken_per = CM_ICLKEN_PER; /* If target state if core_off, save registers * before changing anything */ if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) { prcm_save_registers(&target_state); omap_uart_save_ctx(0); omap_uart_save_ctx(1); } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto return_sleep_time; /* Program MPU and NEON to target state */ if (target_state.mpu_state > PRCM_MPU_ACTIVE) { if ((cur_neon_state == PRCM_ON) && (target_state.neon_state != PRCM_ON)) { if (target_state.neon_state == PRCM_OFF) omap3_save_neon_context(); #ifdef CONFIG_HW_SUP_TRANS /* Facilitating SWSUP RET, from HWSUP mode */ prcm_set_clock_domain_state(DOM_NEON, PRCM_NO_AUTO, PRCM_FALSE); prcm_set_power_domain_state(DOM_NEON, PRCM_ON, PRCM_FORCE); #endif prcm_force_power_domain_state(DOM_NEON, target_state.neon_state); } #ifdef CONFIG_MPU_OFF /* Populate scrathpad restore address */ *(scratchpad_restore_addr) = restore_pointer_address; #endif if (target_state.core_state > PRCM_CORE_CSWR_MEMRET) { ret = omap3_save_secure_ram_context( target_state.core_state); if (ret) printk(KERN_ERR "omap3_save_secure_ram_context" "failed in idle %x\n", ret); if (core_off_notification != NULL) core_off_notification(PRCM_TRUE); } prcm_set_mpu_domain_state(target_state.mpu_state); } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto restore; /* Program CORE and PER to target state */ if (target_state.core_state > PRCM_CORE_ACTIVE) { /* Log core sleep attmept */ core_sleep_flg = 1; #ifdef CONFIG_OMAP_SMARTREFLEX disable_smartreflex(SR1_ID); disable_smartreflex(SR2_ID); #endif /* Workaround for Silicon Errata 1.64 */ if (is_sil_rev_equal_to(OMAP3430_REV_ES1_0)) { if (CM_CLKOUT_CTRL & 0x80) CM_CLKOUT_CTRL &= ~(0x80); } prcm_set_core_domain_state(target_state.core_state); /* Enable Autoidle for GPT1 explicitly - Errata 1.4 */ CM_AUTOIDLE_WKUP |= 0x1; /* Disable UART-1,2 */ CM_FCLKEN1_CORE &= ~0x6000; /* Disable HSUSB OTG ICLK explicitly*/ CM_ICLKEN1_CORE &= ~0x10; /* Enabling IO_PAD capabilities */ PM_WKEN_WKUP |= 0x100; if (cur_per_state == PRCM_ON && cx->type >= OMAP3_STATE_C3 && !(CM_FCLKEN_PER & PER_FCLK_MASK)) { /* In ES3.1, Enable IO Daisy chain */ if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0)) { PM_WKEN_WKUP |= 0x10000; /* Wait for daisy chain to be ready */ while ((PM_WKST_WKUP & 0x10000) == 0x0) ; /* clear the status */ PM_WKST_WKUP &= ~0x10000; } omap3_save_per_context(); prcm_set_power_domain_state(DOM_PER, PRCM_OFF, PRCM_AUTO); per_ctx_saved = 1; CM_FCLKEN_PER = 0; CM_ICLKEN_PER = 0; } } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto restore; if (target_state.core_state == PRCM_CORE_OFF) { if (!is_device_type_gp() && is_sil_rev_greater_than(OMAP3430_REV_ES2_1)) { /* es3 series bug */ sdrc_power_register = sdrc_read_reg(SDRC_POWER); sdrcpwr_val = sdrc_power_register & ~(SDRC_PWR_AUTOCOUNT_MASK | SDRC_PWR_CLKCTRL_MASK); lock_scratchpad_sem(); sdrcpwr_val |= 0x120; save_to_scratchpad(SCRATHPAD_SDRCPWR_OFFSET, sdrcpwr_val); unlock_scratchpad_sem(); } } #ifdef CONFIG_ENABLE_SWLATENCY_MEASURE sw_latency_arr[swlat_arr_wrptr].sleep_end = omap_32k_sync_timer_read(); idle_status++; #endif omap_sram_idle(); if (target_state.core_state == PRCM_CORE_OFF) { if (!is_device_type_gp() && is_sil_rev_greater_than(OMAP3430_REV_ES2_1)) sdrc_write_reg(sdrc_power_register, SDRC_POWER); } restore: /* In case of ES3.1, disable IO daisy chain */ if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0) && per_ctx_saved) PM_WKEN_WKUP &= ~(0x10000); /* Disabling IO_PAD capabilities */ if (core_sleep_flg) PM_WKEN_WKUP &= ~(0x100); /* Disabling IO_PAD capabilities */ PM_WKEN_WKUP &= ~(0x100); #ifdef OMAP3_START_RNG /*Capture the PM_PREPWSTST_CORE to be used later * for starting the RNG (Random Number Generator)*/ prepwst_core_rng = PM_PREPWSTST_CORE; #endif CM_FCLKEN1_CORE = fclken_core; CM_ICLKEN1_CORE = iclken_core; if (target_state.mpu_state > PRCM_MPU_ACTIVE) { #ifdef CONFIG_MPU_OFF /* On ES 2.0, if scrathpad is populated with valid * pointer, warm reset does not work * So populate scrathpad restore address only in * cpuidle and suspend calls */ *(scratchpad_restore_addr) = 0x0; #endif prcm_set_mpu_domain_state(PRCM_MPU_ACTIVE); if ((cur_neon_state == PRCM_ON) && (target_state.mpu_state > PRCM_MPU_INACTIVE)) { prcm_force_power_domain_state(DOM_NEON, cur_neon_state); prcm_get_pre_power_domain_state(DOM_NEON, &pre_neon_state); if (pre_neon_state == PRCM_OFF) omap3_restore_neon_context(); #ifdef CONFIG_HW_SUP_TRANS prcm_set_power_domain_state(DOM_NEON, PRCM_ON, PRCM_AUTO); #endif } } /* Continue core restoration part, only if Core-Sleep is attempted */ if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) { prcm_set_core_domain_state(PRCM_CORE_ACTIVE); #ifdef CONFIG_OMAP_SMARTREFLEX enable_smartreflex(SR1_ID); enable_smartreflex(SR2_ID); #endif if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) { #ifdef CONFIG_OMAP34XX_OFFMODE context_restore_update(DOM_CORE1); #endif prcm_restore_registers(&target_state); prcm_restore_core_context(target_state.core_state); omap3_restore_core_settings(); } /* Errata 1.4 * if the timer device gets idled which is when we * are cutting the timer ICLK which is when we try * to put Core to RET. * Wait Period = 2 timer interface clock cycles + * 1 timer functional clock cycle * Interface clock = L4 clock. For the computation L4 * clock is assumed at 50MHz (worst case). * Functional clock = 32KHz * Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557uSec * Roundingoff the delay value to a safer 50uSec */ omap_udelay(GPTIMER_WAIT_DELAY); CM_AUTOIDLE_WKUP &= ~(0x1); if (core_off_notification != NULL) core_off_notification(PRCM_FALSE); } if (cur_per_state == PRCM_ON) { CM_FCLKEN_PER = fclken_per; CM_ICLKEN_PER = iclken_per; prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state); if (pre_per_state == PRCM_OFF && per_ctx_saved) { if (enable_debug) per_off++; omap3_restore_per_context(); post_uart_inactivity(); #ifdef CONFIG_OMAP34XX_OFFMODE context_restore_update(DOM_PER); #endif } } pr_debug("MPU state:%x,CORE state:%x\n", PM_PREPWSTST_MPU, PM_PREPWSTST_CORE); store_prepwst(); return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); if (cx->type > OMAP3_STATE_C1) sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle)); #ifdef CONFIG_ENABLE_SWLATENCY_MEASURE if (idle_status) { sw_latency_arr[swlat_arr_wrptr].wkup_end = omap_32k_sync_timer_read(); sw_latency_arr[swlat_arr_wrptr].wkup_start = wakeup_start_32ksync; sw_latency_arr[swlat_arr_wrptr].cstate = ((PM_PREPWSTST_MPU & 0x3) << 2) | (PM_PREPWSTST_CORE & 0x3) | (omap_readl(0x48306CB0) << 16); swlat_arr_wrptr++; if (swlat_arr_wrptr == SW_LATENCY_ARR_SIZE) swlat_arr_wrptr = 0; } #endif local_irq_enable(); local_fiq_enable(); #ifdef OMAP3_START_RNG if (!is_device_type_gp()) { /*Start RNG after interrupts are enabled * and only when CORE OFF was successful */ if (!(prepwst_core_rng & 0x3)) { ret = omap3_start_rng(); if (ret) printk(KERN_INFO"Failed to generate new" " RN in idle %x\n", ret); prepwst_core_rng = 0xFF; } } #endif return (u32)timespec_to_ns(&ts_idle)/1000; }
static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_processor_cx *cx; struct timespec ts_preidle; struct timespec ts_postidle; struct timespec ts_idle; /* Used for LPR mode DSS context save/restore. */ u32 pm_wken_dss = 0; u32 pm_pwstctrl_dss = 0; u32 cm_clkstctrl_dss = 0; u32 cm_fclken_dss = 0; u32 cm_iclken_dss = 0; u32 cm_autoidle_dss = 0; u32 fclken_core; u32 iclken_core; u32 fclken_per; u32 iclken_per; int wakeup_latency; struct system_power_state target_state; struct system_power_state cur_state; #ifdef CONFIG_HW_SUP_TRANS u32 sleepdep_per; u32 wakedep_per; #endif /* #ifdef CONFIG_HW_SUP_TRANS */ u32 sdrc_power_register = 0; int core_sleep_flg = 0; int got_console_lock = 0; /* Disable interrupts. */ local_irq_disable(); local_fiq_disable(); /* If need resched - return immediately */ if( need_resched()) { local_fiq_enable(); local_irq_enable(); return 0; } /* Reset previous power state registers. */ clear_prepwstst(); omap3_idle_setup_wkup_sources (); /* Set up target state from state context provided by cpuidle. */ cx = cpuidle_get_statedata(state); target_state.mpu_state = cx->mpu_state; target_state.core_state = cx->core_state; target_state.neon_state = 0; /* Avoid gcc warning. Will be set in adjust_target_states(). */ /* take a time marker for residency. */ getnstimeofday(&ts_preidle); /* If the requested state is C0, we bail here... */ if (cx->type == OMAP3_STATE_C1) { omap_sram_idle(target_state.mpu_state); goto return_sleep_time; } if (cx->type > OMAP3_STATE_C2) sched_clock_idle_sleep_event(); /* about to enter deep idle */ /* Adjust PER and NEON domain target states as well as CORE domain * target state depending on MPU/CORE setting, enable_off sysfs entry * and PER timer status. */ adjust_target_states(&target_state); wakeup_latency = cx->wakeup_latency; /* NOTE: * We will never get the condition below as we are not supporting * CORE OFF right now. Keeping this code around for future reference. */ if (target_state.core_state != cx->core_state) { /* Currently, this can happen only for core_off. Adjust wakeup * latency to that of core_cswr state. Hard coded now and needs * to be made more generic omap3_power_states[4] is CSWR for * core */ wakeup_latency = omap3_power_states[4].wakeup_latency; } /* Reprogram next wake up tick to adjust for wake latency */ if (wakeup_latency > 1000) { struct tick_device *d = tick_get_device(smp_processor_id()); ktime_t now = ktime_get(); if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) > (wakeup_latency * 1000 + NSEC_PER_MSEC)) { ktime_t adjust = ktime_set(0, (wakeup_latency * 1000)); ktime_t next = ktime_sub(d->evtdev->next_event, adjust); clockevents_program_event(d->evtdev, next, now); } } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto return_sleep_time; /* Remember the current power states and clock settings. */ prcm_get_power_domain_state(DOM_PER, &cur_state.per_state); prcm_get_power_domain_state(DOM_CAM, &cur_state.cam_state); prcm_get_power_domain_state(DOM_SGX, &cur_state.sgx_state); prcm_get_power_domain_state(DOM_NEON, &cur_state.neon_state); fclken_core = CM_FCLKEN1_CORE; iclken_core = CM_ICLKEN1_CORE; fclken_per = CM_FCLKEN_PER; iclken_per = CM_ICLKEN_PER; #ifdef CONFIG_HW_SUP_TRANS /* Facilitating SWSUP RET, from HWSUP mode */ sleepdep_per = CM_SLEEPDEP_PER; wakedep_per = PM_WKDEP_PER; #endif /* #ifdef CONFIG_HW_SUP_TRANS */ /* If target state if core_off, save registers before changing * anything. */ if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) { prcm_save_registers(); } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto return_sleep_time; /* Program MPU and NEON to target state */ if (target_state.mpu_state > PRCM_MPU_ACTIVE) { if ((cur_state.neon_state == PRCM_ON) && (target_state.neon_state != PRCM_ON)) { if (target_state.neon_state == PRCM_OFF) omap3_save_neon_context(); prcm_transition_domain_to(DOM_NEON, target_state.neon_state); } #ifdef _disabled_CONFIG_MPU_OFF /* Populate scratchpad restore address */ scratchpad_set_restore_addr(); #endif /* TODO No support for OFF Mode yet if(target_state.core_state > PRCM_CORE_CSWR_MEMRET) omap3_save_secure_ram_context(target_state.core_state); */ prcm_set_mpu_domain_state(target_state.mpu_state); } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto restore; /* Program CORE and PER to target state */ if (target_state.core_state > PRCM_CORE_ACTIVE) { /* Log core sleep attempt */ core_sleep_flg = 1; /* Lock the console to prevent potential access to UARTs. */ if (0 == try_acquire_console_sem()) { got_console_lock = 1; } /* Handle PER, CAM and SGX domains. */ if ((cur_state.per_state == PRCM_ON) && (target_state.per_state != PRCM_ON)) { if (target_state.per_state == PRCM_OFF) { omap3_save_per_context(); } prcm_transition_domain_to(DOM_PER, target_state.per_state); } if (PRCM_ON == cur_state.cam_state) prcm_transition_domain_to(DOM_CAM, PRCM_RET); if (PRCM_ON == cur_state.sgx_state) prcm_transition_domain_to(DOM_SGX, PRCM_RET); disable_smartreflex(SR1_ID); disable_smartreflex(SR2_ID); prcm_set_core_domain_state(target_state.core_state); /* Enable Autoidle for GPT1 explicitly - Errata 1.4 */ CM_AUTOIDLE_WKUP |= 0x1; /* Disable HSUSB OTG ICLK explicitly*/ CM_ICLKEN1_CORE &= ~0x10; /* Enabling GPT1 wake-up capabilities */ PM_WKEN_WKUP |= 0x1; /* Errata 2.15 * Configure UARTs to ForceIdle. Otherwise they can prevent * CORE RET. */ omap24xx_uart_set_force_idle(); CM_ICLKEN1_CORE &= ~(1<<7); /* MAILBOXES */ CM_ICLKEN1_CORE &= ~(1<<6); /* OMAPCTRL */ /* If we are in LPR mode we need to set up DSS accordingly. */ if (omap2_disp_lpr_is_enabled()) { pm_wken_dss = PM_WKEN_DSS; pm_pwstctrl_dss = PM_PWSTCTRL_DSS; cm_clkstctrl_dss = CM_CLKSTCTRL_DSS; cm_fclken_dss = CM_FCLKEN_DSS; cm_iclken_dss = CM_ICLKEN_DSS; cm_autoidle_dss = CM_AUTOIDLE_DSS; PM_WKEN_DSS = 0x00000001; PM_PWSTCTRL_DSS = 0x00030107; CM_CLKSTCTRL_DSS = 0x00000003; CM_FCLKEN_DSS = 0x00000001; CM_ICLKEN_DSS = 0x00000001; CM_AUTOIDLE_DSS = 0x00000001; } } /* Check for pending interrupts. If there is an interrupt, return */ if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2) goto restore; #ifdef CONFIG_DISABLE_HFCLK PRM_CLKSRC_CTRL |= 0x18; /* set sysclk to stop */ #endif /* #ifdef CONFIG_DISABLE_HFCLK */ DEBUG_STATE_CAPTURE(); /* Errata 1.142: * SDRC not sending auto-refresh when OMAP wakes-up from OFF mode */ if (!is_device_type_gp() && is_sil_rev_equal_to(OMAP3430_REV_ES3_0)) { sdrc_power_register = SDRC_POWER_REG; SDRC_POWER_REG &= ~(SDRC_PWR_AUTOCOUNT_MASK | SDRC_PWR_CLKCTRL_MASK); SDRC_POWER_REG |= 0x120; if (target_state.core_state == PRCM_CORE_OFF) save_scratchpad_contents(); } omap_sram_idle(target_state.mpu_state); /* Errata 1.142: * SDRC not sending auto-refresh when OMAP wakes-up from OFF mode */ if (!is_device_type_gp() && is_sil_rev_equal_to(OMAP3430_REV_ES3_0)) SDRC_POWER_REG = sdrc_power_register; restore: #ifdef CONFIG_DISABLE_HFCLK PRM_CLKSRC_CTRL &= ~0x18; #endif /* #ifdef CONFIG_DISABLE_HFCLK */ /* Disabling IO_PAD capabilities */ PM_WKEN_WKUP &= ~(0x100); CM_FCLKEN1_CORE = fclken_core; CM_ICLKEN1_CORE = iclken_core; if (target_state.mpu_state > PRCM_MPU_ACTIVE) { #ifdef _disabled_CONFIG_MPU_OFF /* On ES 2.0, if scratchpad is populated with valid pointer, * warm reset does not work So populate scratchpad restore * address only in cpuidle and suspend calls */ scratchpad_clr_restore_addr(); #endif prcm_set_mpu_domain_state(PRCM_MPU_ACTIVE); if ((cur_state.neon_state == PRCM_ON) && (target_state.mpu_state > PRCM_MPU_INACTIVE)) { u8 pre_state; prcm_force_power_domain_state(DOM_NEON, cur_state.neon_state); prcm_get_pre_power_domain_state(DOM_NEON, &pre_state); if (pre_state == PRCM_OFF) { omap3_restore_neon_context(); } #ifdef CONFIG_HW_SUP_TRANS prcm_set_power_domain_state(DOM_NEON, POWER_DOMAIN_ON, PRCM_AUTO); #endif } } /* Continue core restoration part, only if Core-Sleep is attempted */ if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) { u8 pre_per_state; prcm_set_core_domain_state(PRCM_CORE_ACTIVE); omap24xx_uart_clr_force_idle(); enable_smartreflex(SR1_ID); enable_smartreflex(SR2_ID); /* Turn PER back ON if it was ON before idle. */ if (cur_state.per_state == PRCM_ON) { prcm_force_power_domain_state(DOM_PER, cur_state.per_state); CM_ICLKEN_PER = iclken_per; CM_FCLKEN_PER = fclken_per; prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state); if (pre_per_state == PRCM_OFF) { omap3_restore_per_context(); #ifdef CONFIG_OMAP34XX_OFFMODE context_restore_update(DOM_PER); #endif } #ifdef CONFIG_HW_SUP_TRANS /* Facilitating SWSUP RET, from HWSUP mode */ CM_SLEEPDEP_PER = sleepdep_per; PM_WKDEP_PER = wakedep_per; prcm_set_power_domain_state(DOM_PER, PRCM_ON, PRCM_AUTO); #endif } /* Restore CAM and SGX. */ if (PRCM_ON == cur_state.cam_state) prcm_transition_domain_to(DOM_CAM, PRCM_ON); if (PRCM_ON == cur_state.sgx_state) prcm_transition_domain_to(DOM_SGX, PRCM_ON); /* If we lost CORE context, restore it. */ if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) { #ifdef CONFIG_OMAP34XX_OFFMODE context_restore_update(DOM_CORE1); #endif prcm_restore_registers(); prcm_restore_core_context(target_state.core_state); #ifdef CONFIG_CORE_OFF omap3_restore_core_settings(); #endif } /* Restore DSS settings */ if (omap2_disp_lpr_is_enabled()) { PM_WKEN_DSS = pm_wken_dss; PM_PWSTCTRL_DSS = pm_pwstctrl_dss; CM_CLKSTCTRL_DSS = cm_clkstctrl_dss; CM_FCLKEN_DSS = cm_fclken_dss; CM_ICLKEN_DSS = cm_iclken_dss; CM_AUTOIDLE_DSS = cm_autoidle_dss; } /* At this point CORE and PER domain are back. We can release * the console if we have it. */ if (got_console_lock) { release_console_sem(); } #ifdef CONFIG_OMAP_32K_TIMER /* Errata 1.4 * If a General Purpose Timer (GPTimer) is in posted mode * (TSIRC.POSTED=1), due to internal resynchronizations, values * read in TCRR, TCAR1 and TCAR2 registers right after the * timer interface clock (L4) goes from stopped to active may * not return the expected values. The most common event * leading to this situation occurs upon wake up from idle. * * Software has to wait at least (2 timer interface clock * cycles + 1 timer functional clock cycle) after L4 clock * wakeup before reading TCRR, TCAR1 or TCAR2 registers for * GPTimers in POSTED internal synchro- nization mode, and * before reading WCRR register of the Watchdog timers . The * same workaround must be applied before reading CR and * 32KSYNCNT_REV registers of the synctimer module. * * Wait Period = 2 timer interface clock cycles + * 1 timer functional clock cycle * Interface clock = L4 clock (50MHz worst case). * Functional clock = 32KHz * Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557us * Rounding off the delay value to a safer 50us. */ udelay(GPTIMER_WAIT_DELAY); #endif /* Disable autoidling of GPT1. */ CM_AUTOIDLE_WKUP &= ~(0x1); } DPRINTK("MPU state:%x, CORE state:%x\n", PM_PREPWSTST_MPU, PM_PREPWSTST_CORE); /* Do wakeup event check s*/ post_uart_activity(); /* Update stats for sysfs entries. */ store_prepwst(); return_sleep_time: getnstimeofday(&ts_postidle); #if defined(CONFIG_SYSFS) && defined(DEBUG_BAIL_STATS) ts_last_wake_up = ts_postidle; #endif ts_idle = timespec_sub(ts_postidle, ts_preidle); if (cx->type > OMAP3_STATE_C2) sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle)); DEBUG_STATE_PRINT(core_sleep_flg); local_irq_enable(); local_fiq_enable(); return (u32)timespec_to_ns(&ts_idle)/1000; }
/** * tick_program_event */ int tick_program_event(ktime_t expires, int force) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); return clockevents_program_event(dev, expires, force); }