/* * Routine: cpu_idle_exit * Function: */ void cpu_idle_exit(boolean_t from_reset __unused) { uint64_t new_idle_timeout_ticks = 0x0ULL; cpu_data_t *cpu_data_ptr = getCpuDatap(); #if KPC kpc_idle_exit(); #endif pmap_set_pmap(cpu_data_ptr->cpu_active_thread->map->pmap, current_thread()); if (cpu_data_ptr->cpu_idle_notify) ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); if (cpu_data_ptr->idle_timer_notify != 0) { if (new_idle_timeout_ticks == 0x0ULL) { /* turn off the idle timer */ cpu_data_ptr->idle_timer_deadline = 0x0ULL; } else { /* set the new idle timeout */ clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); } timer_resync_deadlines(); } Idle_load_context(); }
void rtc_timer_start(void) { /* * Force a complete re-evaluation of timer deadlines. */ x86_lcpu()->rtcDeadline = EndOfAllTime; timer_resync_deadlines(); }
static void pmReSyncDeadlines(int cpu) { static boolean_t registered = FALSE; if (!registered) { PM_interrupt_register(&timer_resync_deadlines); registered = TRUE; } if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num) timer_resync_deadlines(); else cpu_PM_interrupt(cpu); }
/* * Set the clock deadline. */ void timer_set_deadline(uint64_t deadline) { rtclock_timer_t *mytimer; spl_t s; cpu_data_t *pp; s = splclock(); /* no interruptions */ pp = current_cpu_datap(); mytimer = &pp->rtclock_timer; /* Point to the timer itself */ mytimer->deadline = deadline; /* Set new expiration time */ mytimer->when_set = mach_absolute_time(); timer_resync_deadlines(); splx(s); }
cpu_idle(void) { cpu_data_t *cpu_data_ptr = getCpuDatap(); uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) Idle_load_context(); if (!SetIdlePop()) Idle_load_context(); lastPop = cpu_data_ptr->rtcPop; pmap_switch_user_ttb(kernel_pmap); cpu_data_ptr->cpu_active_thread = current_thread(); if (cpu_data_ptr->cpu_user_debug) arm_debug_set(NULL); cpu_data_ptr->cpu_user_debug = NULL; if (cpu_data_ptr->cpu_idle_notify) ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); if (cpu_data_ptr->idle_timer_notify != 0) { if (new_idle_timeout_ticks == 0x0ULL) { /* turn off the idle timer */ cpu_data_ptr->idle_timer_deadline = 0x0ULL; } else { /* set the new idle timeout */ clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); } timer_resync_deadlines(); if (cpu_data_ptr->rtcPop != lastPop) SetIdlePop(); } #if KPC kpc_idle(); #endif platform_cache_idle_enter(); cpu_idle_wfi((boolean_t) wfi_fast); platform_cache_idle_exit(); ClearIdlePop(TRUE); cpu_idle_exit(FALSE); }
void timer_queue_expire_local( __unused void *arg) { rtclock_timer_t *mytimer; uint64_t abstime; cpu_data_t *pp; pp = current_cpu_datap(); mytimer = &pp->rtclock_timer; abstime = mach_absolute_time(); mytimer->has_expired = TRUE; mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); mytimer->has_expired = FALSE; mytimer->when_set = mach_absolute_time(); timer_resync_deadlines(); }
void timer_queue_expire_rescan( __unused void *arg) { rtclock_timer_t *mytimer; uint64_t abstime; cpu_data_t *pp; assert(ml_get_interrupts_enabled() == FALSE); pp = current_cpu_datap(); mytimer = &pp->rtclock_timer; abstime = mach_absolute_time(); mytimer->has_expired = TRUE; mytimer->deadline = timer_queue_expire_with_options(&mytimer->queue, abstime, TRUE); mytimer->has_expired = FALSE; mytimer->when_set = mach_absolute_time(); timer_resync_deadlines(); }
/* * Event timer interrupt. * * XXX a drawback of this implementation is that events serviced earlier must not set deadlines * that occur before the entire chain completes. * * XXX a better implementation would use a set of generic callouts and iterate over them */ void timer_intr(int user_mode, uint64_t rip) { uint64_t abstime; rtclock_timer_t *mytimer; cpu_data_t *pp; int64_t latency; uint64_t pmdeadline; boolean_t timer_processed = FALSE; pp = current_cpu_datap(); SCHED_STATS_TIMER_POP(current_processor()); abstime = mach_absolute_time(); /* Get the time now */ /* has a pending clock timer expired? */ mytimer = &pp->rtclock_timer; /* Point to the event timer */ if ((timer_processed = ((mytimer->deadline <= abstime) || (abstime >= (mytimer->queue.earliest_soft_deadline))))) { /* * Log interrupt service latency (-ve value expected by tool) * a non-PM event is expected next. * The requested deadline may be earlier than when it was set * - use MAX to avoid reporting bogus latencies. */ latency = (int64_t) (abstime - MAX(mytimer->deadline, mytimer->when_set)); /* Log zero timer latencies when opportunistically processing * coalesced timers. */ if (latency < 0) { TCOAL_DEBUG(0xEEEE0000, abstime, mytimer->queue.earliest_soft_deadline, abstime - mytimer->queue.earliest_soft_deadline, 0, 0); latency = 0; } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_TRAP_LATENCY | DBG_FUNC_NONE, -latency, ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)), user_mode, 0, 0); mytimer->has_expired = TRUE; /* Remember that we popped */ mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); mytimer->has_expired = FALSE; /* Get the time again since we ran a bit */ abstime = mach_absolute_time(); mytimer->when_set = abstime; } /* is it time for power management state change? */ if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_PM_DEADLINE | DBG_FUNC_START, 0, 0, 0, 0, 0); pmCPUDeadline(pp); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_PM_DEADLINE | DBG_FUNC_END, 0, 0, 0, 0, 0); timer_processed = TRUE; } /* schedule our next deadline */ x86_lcpu()->rtcDeadline = EndOfAllTime; timer_resync_deadlines(); if (__improbable(timer_processed == FALSE)) spurious_timers++; }