Пример #1
0
/*
 * Re-evaluate the outstanding deadlines and select the most proximate.
 *
 * Should be called at splclock.
 */
void
timer_resync_deadlines(void)
{
    uint64_t		deadline = EndOfAllTime;
    uint64_t		pmdeadline;
    rtclock_timer_t		*mytimer;
    spl_t			s = splclock();
    cpu_data_t		*pp;
    uint32_t		decr;

    pp = current_cpu_datap();
    if (!pp->cpu_running)
        /* There's really nothing to do if this processor is down */
        return;

    /*
     * If we have a clock timer set, pick that.
     */
    mytimer = &pp->rtclock_timer;
    if (!mytimer->has_expired &&
            0 < mytimer->deadline && mytimer->deadline < EndOfAllTime)
        deadline = mytimer->deadline;

    /*
     * If we have a power management deadline, see if that's earlier.
     */
    pmdeadline = pmCPUGetDeadline(pp);
    if (0 < pmdeadline && pmdeadline < deadline)
        deadline = pmdeadline;

    /*
     * Go and set the "pop" event.
     */
    decr = (uint32_t) setPop(deadline);

    /* Record non-PM deadline for latency tool */
    if (decr != 0 && deadline != pmdeadline) {
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_SET_DEADLINE | DBG_FUNC_NONE,
                                  decr, 2,
                                  deadline,
                                  mytimer->queue.count, 0);
    }
    splx(s);
}
Пример #2
0
/*
 * timer_queue_migrate_cpu() is called from the Power-Management kext
 * when a logical processor goes idle (in a deep C-state) with a distant
 * deadline so that it's timer queue can be moved to another processor.
 * This target processor should be the least idle (most busy) --
 * currently this is the primary processor for the calling thread's package.
 * Locking restrictions demand that the target cpu must be the boot cpu.
 */
uint32_t
timer_queue_migrate_cpu(int target_cpu)
{
    cpu_data_t	*target_cdp = cpu_datap(target_cpu);
    cpu_data_t	*cdp = current_cpu_datap();
    int		ntimers_moved;

    assert(!ml_get_interrupts_enabled());
    assert(target_cpu != cdp->cpu_number);
    assert(target_cpu == master_cpu);

    KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                              DECR_TIMER_MIGRATE | DBG_FUNC_START,
                              target_cpu,
                              cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32),
                              0, 0);

    /*
     * Move timer requests from the local queue to the target processor's.
     * The return value is the number of requests moved. If this is 0,
     * it indicates that the first (i.e. earliest) timer is earlier than
     * the earliest for the target processor. Since this would force a
     * resync, the move of this and all later requests is aborted.
     */
    ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue,
                                        &target_cdp->rtclock_timer.queue);

    /*
     * Assuming we moved stuff, clear local deadline.
     */
    if (ntimers_moved > 0) {
        cdp->rtclock_timer.deadline = EndOfAllTime;
        setPop(EndOfAllTime);
    }

    KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                              DECR_TIMER_MIGRATE | DBG_FUNC_END,
                              target_cpu, ntimers_moved, 0, 0, 0);

    return ntimers_moved;
}
Пример #3
0
void etimer_resync_deadlines(void)
{
    uint64_t deadline;
    uint64_t pmdeadline;
    rtclock_timer_t *mytimer;
    spl_t s = splclock();
    cpu_data_t *pp;
    uint32_t decr;

    pp = current_cpu_datap();
    deadline = EndOfAllTime;

    /*
     * If we have a clock timer set, pick that.
     */
    mytimer = &pp->rt_timer;
    if (!mytimer->has_expired && 0 < mytimer->deadline
        && mytimer->deadline < EndOfAllTime)
        deadline = mytimer->deadline;

    /*
     * Go and set the "pop" event.
     */

    if (deadline > 0 && deadline <= pp->rtcPop) {
        int decr;
        uint64_t now;

        now = mach_absolute_time();
        decr = setPop(deadline);

        if (deadline < now) {
            pp->rtcPop = now + decr;
        } else {
            pp->rtcPop = deadline;
        }
    }

    splx(s);
}
Пример #4
0
/*
 * Re-evaluate the outstanding deadlines and select the most proximate.
 *
 * Should be called at splclock.
 */
void
etimer_resync_deadlines(void)
{
	uint64_t		deadline;
	rtclock_timer_t		*mytimer;
	spl_t			s = splclock();		/* No interruptions please */
	struct per_proc_info	*pp;

	pp = getPerProc();

	deadline = ~0ULL;

	/* if we have a clock timer set sooner, pop on that */
	mytimer = &pp->rtclock_timer;			/* Point to the timer itself */
	if (!mytimer->has_expired && mytimer->deadline > 0)
		deadline = mytimer->deadline;

	/* if we have a power management event coming up, how about that? */
	if (pp->pms.pmsPop > 0 && pp->pms.pmsPop < deadline)
		deadline = pp->pms.pmsPop;
	

	if (deadline > 0 && deadline <= pp->rtcPop) {
		int     decr;
		uint64_t now;

		now = mach_absolute_time();
		decr = setPop(deadline);

		if (deadline < now)
		        pp->rtcPop = now + decr;
		else
		        pp->rtcPop = deadline;

		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE, decr, 2, 0, 0, 0);
	}
	splx(s);
}