/* * Put a CPU into "safe" mode with respect to power. * * Some systems cannot operate at a continuous "normal" speed without * exceeding the thermal design. This is called per-CPU to place the * CPUs into a "safe" operating mode. */ void pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags) { if (pmDispatch != NULL && pmDispatch->pmCPUSafeMode != NULL) pmDispatch->pmCPUSafeMode(lcpu, flags); else { /* * Do something reasonable if the KEXT isn't present. * * We only look at the PAUSE and RESUME flags. The other flag(s) * will not make any sense without the KEXT, so just ignore them. * * We set the CPU's state to indicate that it's halted. If this * is the CPU we're currently running on, then spin until the * state becomes non-halted. */ if (flags & PM_SAFE_FL_PAUSE) { lcpu->state = LCPU_PAUSE; if (lcpu == x86_lcpu()) { while (lcpu->state == LCPU_PAUSE) cpu_pause(); } } /* * Clear the halted flag for the specified CPU, that will * get it out of it's spin loop. */ if (flags & PM_SAFE_FL_RESUME) { lcpu->state = LCPU_RUN; } } }
void rtc_timer_start(void) { /* * Force a complete re-evaluation of timer deadlines. */ x86_lcpu()->rtcDeadline = EndOfAllTime; timer_resync_deadlines(); }
uint64_t setPop( uint64_t time) { uint64_t now; uint64_t pop; /* 0 and EndOfAllTime are special-cases for "clear the timer" */ if (time == 0 || time == EndOfAllTime ) { time = EndOfAllTime; now = 0; pop = rtc_timer->set(0, 0); } else { now = rtc_nanotime_read(); /* The time in nanoseconds */ pop = rtc_timer->set(time, now); } /* Record requested and actual deadlines set */ x86_lcpu()->rtcDeadline = time; x86_lcpu()->rtcPop = pop; return pop - now; }
/* * Event timer interrupt. * * XXX a drawback of this implementation is that events serviced earlier must not set deadlines * that occur before the entire chain completes. * * XXX a better implementation would use a set of generic callouts and iterate over them */ void timer_intr(int user_mode, uint64_t rip) { uint64_t abstime; rtclock_timer_t *mytimer; cpu_data_t *pp; int64_t latency; uint64_t pmdeadline; boolean_t timer_processed = FALSE; pp = current_cpu_datap(); SCHED_STATS_TIMER_POP(current_processor()); abstime = mach_absolute_time(); /* Get the time now */ /* has a pending clock timer expired? */ mytimer = &pp->rtclock_timer; /* Point to the event timer */ if ((timer_processed = ((mytimer->deadline <= abstime) || (abstime >= (mytimer->queue.earliest_soft_deadline))))) { /* * Log interrupt service latency (-ve value expected by tool) * a non-PM event is expected next. * The requested deadline may be earlier than when it was set * - use MAX to avoid reporting bogus latencies. */ latency = (int64_t) (abstime - MAX(mytimer->deadline, mytimer->when_set)); /* Log zero timer latencies when opportunistically processing * coalesced timers. */ if (latency < 0) { TCOAL_DEBUG(0xEEEE0000, abstime, mytimer->queue.earliest_soft_deadline, abstime - mytimer->queue.earliest_soft_deadline, 0, 0); latency = 0; } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_TRAP_LATENCY | DBG_FUNC_NONE, -latency, ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)), user_mode, 0, 0); mytimer->has_expired = TRUE; /* Remember that we popped */ mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); mytimer->has_expired = FALSE; /* Get the time again since we ran a bit */ abstime = mach_absolute_time(); mytimer->when_set = abstime; } /* is it time for power management state change? */ if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_PM_DEADLINE | DBG_FUNC_START, 0, 0, 0, 0, 0); pmCPUDeadline(pp); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_PM_DEADLINE | DBG_FUNC_END, 0, 0, 0, 0, 0); timer_processed = TRUE; } /* schedule our next deadline */ x86_lcpu()->rtcDeadline = EndOfAllTime; timer_resync_deadlines(); if (__improbable(timer_processed == FALSE)) spurious_timers++; }