Exemple #1
0
x86_saved_state_t *
find_kern_regs(thread_t thread)
{
	if (thread == current_thread() && 
		NULL != current_cpu_datap()->cpu_int_state &&
		!(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
		  current_cpu_datap()->cpu_interrupt_level == 1)) {

		return current_cpu_datap()->cpu_int_state;
	} else {
		return NULL;
	}
}
__private_extern__ kern_return_t
chudxnu_cpu_timer_callback_enter(
	chudxnu_cpu_timer_callback_func_t	func,
	uint32_t				time,
	uint32_t				units)
{
	chudcpu_data_t	*chud_proc_info;
	boolean_t	oldlevel;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	// cancel any existing callback for this cpu
	timer_call_cancel(&(chud_proc_info->cpu_timer_call));

	chud_proc_info->cpu_timer_callback_fn = func;

	clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
	timer_call_setup(&(chud_proc_info->cpu_timer_call),
			 chudxnu_private_cpu_timer_callback, NULL);
	timer_call_enter(&(chud_proc_info->cpu_timer_call),
			 chud_proc_info->t_deadline);

	KERNEL_DEBUG_CONSTANT(
		MACHDBG_CODE(DBG_MACH_CHUD,
			     CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
		(uint32_t) func, time, units, 0, 0);

	ml_set_interrupts_enabled(oldlevel);
	return KERN_SUCCESS;
}
Exemple #3
0
void cpu_powerstats(__unused void *arg) {
	cpu_data_t *cdp = current_cpu_datap();
	__unused int cnum = cdp->cpu_number;
	uint32_t cl = 0, ch = 0, mpl = 0, mph = 0, apl = 0, aph = 0;

	rdmsr_carefully(MSR_IA32_MPERF, &mpl, &mph);
	rdmsr_carefully(MSR_IA32_APERF, &apl, &aph);

	cdp->cpu_mperf = ((uint64_t)mph << 32) | mpl;
	cdp->cpu_aperf = ((uint64_t)aph << 32) | apl;

	uint64_t ctime = mach_absolute_time();
	cdp->cpu_rtime_total += ctime - cdp->cpu_ixtime;
	cdp->cpu_ixtime = ctime;

	rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
	cdp->cpu_c3res = ((uint64_t)ch << 32) | cl;

	rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
	cdp->cpu_c6res = ((uint64_t)ch << 32) | cl;

	rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
	cdp->cpu_c7res = ((uint64_t)ch << 32) | cl;

	if (diag_pmc_enabled) {
		uint64_t insns = read_pmc(FIXED_PMC0);
		uint64_t ucc = read_pmc(FIXED_PMC1);
		uint64_t urc = read_pmc(FIXED_PMC2);
		cdp->cpu_cur_insns = insns;
		cdp->cpu_cur_ucc = ucc;
		cdp->cpu_cur_urc = urc;
	}
}
Exemple #4
0
/* -----------------------------------------------------------------------------
   vmx_on()
	Enter VMX root operation on this CPU.
   -------------------------------------------------------------------------- */
static void
vmx_on(void)
{
	vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
	addr64_t vmxon_region_paddr;
	int result;

	vmx_init();
	
	assert(cpu->specs.vmx_present);

	if (NULL == cpu->vmxon_region)
		panic("vmx_on: VMXON region not allocated");
	vmxon_region_paddr = vmx_paddr(cpu->vmxon_region);

	/*
	 * Enable VMX operation.
	 */
	set_cr4(get_cr4() | CR4_VMXE);
	
	assert(vmx_is_cr0_valid(&cpu->specs));
	assert(vmx_is_cr4_valid(&cpu->specs));
	
	if ((result = __vmxon(&vmxon_region_paddr)) != VMX_SUCCEED) {
		panic("vmx_on: unexpected return %d from __vmxon()", result);
	}
}
Exemple #5
0
static x86_lcpu_t *
pmGetMyLogicalCPU(void)
{
    cpu_data_t	*cpup	= current_cpu_datap();

    return(&cpup->lcpu);
}
Exemple #6
0
static x86_core_t *
pmGetMyCore(void)
{
    cpu_data_t	*cpup	= current_cpu_datap();

    return(cpup->lcpu.core);
}
Exemple #7
0
/**
 * etimer_intr
 *
 * Timer interrupt routine, called from the realtime clock interrupt
 * routine.
 */
void etimer_intr(int inuser, uint64_t iaddr)
{
    uint64_t abstime;
    rtclock_timer_t *mytimer;
    cpu_data_t *pp;
    int32_t latency;

    pp = current_cpu_datap();

    SCHED_STATS_TIMER_POP(current_processor());

    abstime = mach_absolute_time(); /* Get the time now */

    /*
     * has a pending clock timer expired? 
     */
    mytimer = &pp->rt_timer;    /* Point to the event timer */
    if (mytimer->deadline <= abstime) {
        mytimer->has_expired = TRUE;    /* Remember that we popped */
        mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
        mytimer->has_expired = FALSE;
    }

    pp->rtcPop = EndOfAllTime;  /* any real deadline will be earlier */
    /*
     * schedule our next deadline 
     */

    etimer_resync_deadlines();
}
Exemple #8
0
static x86_die_t *
pmGetMyDie(void)
{
    cpu_data_t	*cpup	= current_cpu_datap();

    return(cpup->lcpu.die);
}
Exemple #9
0
/* snapshot current PMCs and update counters in the current thread */
static void
kpc_update_thread_counters( thread_t thread )
{
	uint32_t i;
	uint64_t *tmp = NULL;
	cpu_data_t *cpu = NULL;

	cpu = current_cpu_datap();

	/* 1. stash current PMCs into latest CPU block */
	kpc_get_cpu_counters( FALSE, kpc_thread_classes, 
	                      NULL, cpu->cpu_kpc_buf[1] );

	/* 2. apply delta to old thread */
	if( thread->kpc_buf )
		for( i = 0; i < kpc_thread_classes_count; i++ )
			thread->kpc_buf[i] += cpu->cpu_kpc_buf[1][i] - cpu->cpu_kpc_buf[0][i];

	/* schedule any necessary allocations */
	if( !current_thread()->kpc_buf )
	{
		current_thread()->kperf_flags |= T_KPC_ALLOC;
		act_set_kperf(current_thread());
	}

	/* 3. switch the PMC block pointers */
	tmp = cpu->cpu_kpc_buf[1];
	cpu->cpu_kpc_buf[1] = cpu->cpu_kpc_buf[0];
	cpu->cpu_kpc_buf[0] = tmp;
}
Exemple #10
0
static x86_pkg_t *
pmGetMyPackage(void)
{
    cpu_data_t	*cpup	= current_cpu_datap();

    return(cpup->lcpu.package);
}
Exemple #11
0
/*
 * thread_fast_set_cthread_self: Sets the machine kernel thread ID of the
 * current thread to the given thread ID; fast version for 32-bit processes
 *
 * Parameters:    self                    Thread ID to set
 *                
 * Returns:        0                      Success
 *                !0                      Not success
 */
kern_return_t
thread_fast_set_cthread_self(uint32_t self)
{
	thread_t thread = current_thread();
	pcb_t pcb = thread->machine.pcb;
	struct real_descriptor desc = {
		.limit_low = 1,
		.limit_high = 0,
		.base_low = self & 0xffff,
		.base_med = (self >> 16) & 0xff,
		.base_high = (self >> 24) & 0xff,
		.access = ACC_P|ACC_PL_U|ACC_DATA_W,
		.granularity = SZ_32|SZ_G,
	};

	current_thread()->machine.pcb->cthread_self = (uint64_t) self;	/* preserve old func too */

	/* assign descriptor */
	mp_disable_preemption();
	pcb->cthread_desc = desc;
	*ldt_desc_p(USER_CTHREAD) = desc;
	saved_state32(pcb->iss)->gs = USER_CTHREAD;
	mp_enable_preemption();

	return (USER_CTHREAD);
}

/*
 * thread_fast_set_cthread_self64: Sets the machine kernel thread ID of the
 * current thread to the given thread ID; fast version for 64-bit processes 
 *
 * Parameters:    self                    Thread ID
 *                
 * Returns:        0                      Success
 *                !0                      Not success
 */
kern_return_t
thread_fast_set_cthread_self64(uint64_t self)
{
	pcb_t pcb = current_thread()->machine.pcb;
	cpu_data_t              *cdp;

	/* check for canonical address, set 0 otherwise  */
	if (!IS_USERADDR64_CANONICAL(self))
		self = 0ULL;

	pcb->cthread_self = self;
	mp_disable_preemption();
	cdp = current_cpu_datap();
#if defined(__x86_64__)
	if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) ||
	    (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE)))
		wrmsr64(MSR_IA32_KERNEL_GS_BASE, self);
#endif
	cdp->cpu_uber.cu_user_gs_base = self;
	mp_enable_preemption();
	return (USER_CTHREAD);
}
Exemple #12
0
/**
 * timer_queue_cancel
 *
 * Remove a timer from the queue.
 */
void timer_queue_cancel(mpqueue_head_t * queue, uint64_t deadline,
                        uint64_t new_deadline)
{
    if (queue == &current_cpu_datap()->rt_timer.queue) {
        if (deadline < new_deadline) {
            etimer_set_deadline(new_deadline);
        }
    }
}
Exemple #13
0
/*
 * Called when the CPU is idle.  It calls into the power management kext
 * to determine the best way to idle the CPU.
 */
void
machine_idle(void)
{
    cpu_data_t		*my_cpu		= current_cpu_datap();

    if (my_cpu == NULL)
	goto out;

    my_cpu->lcpu.state = LCPU_IDLE;
    DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
    MARK_CPU_IDLE(cpu_number());

    if (pmInitDone) {
	/*
	 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
	 * were called prior to the CPU PM kext being registered.  We do
	 * this here since we know at this point the values will be first
	 * used since idle is where the decisions using these values is made.
	 */
	if (earlyMaxBusDelay != DELAY_UNSET)
	    ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));

	if (earlyMaxIntDelay != DELAY_UNSET)
	    ml_set_maxintdelay(earlyMaxIntDelay);
    }

    if (pmInitDone
	&& pmDispatch != NULL
	&& pmDispatch->MachineIdle != NULL)
	(*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
    else {
	/*
	 * If no power management, re-enable interrupts and halt.
	 * This will keep the CPU from spinning through the scheduler
	 * and will allow at least some minimal power savings (but it
	 * cause problems in some MP configurations w.r.t. the APIC
	 * stopping during a GV3 transition).
	 */
	pal_hlt();

	/* Once woken, re-disable interrupts. */
	pal_cli();
    }

    /*
     * Mark the CPU as running again.
     */
    MARK_CPU_ACTIVE(cpu_number());
    DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
    my_cpu->lcpu.state = LCPU_RUN;

    /*
     * Re-enable interrupts.
     */
  out:
    pal_sti();
}
Exemple #14
0
void
cpu_sleep(void)
{
	cpu_data_t	*cdp = current_cpu_datap();

	PE_cpu_machine_quiesce(cdp->cpu_id);

	cpu_thread_halt();
}
Exemple #15
0
/*
 * Called when the CPU is to be halted.  It will choose the best C-State
 * to be in.
 */
void
pmCPUHalt(uint32_t reason)
{
    cpu_data_t	*cpup	= current_cpu_datap();

    switch (reason) {
    case PM_HALT_DEBUG:
	cpup->lcpu.state = LCPU_PAUSE;
	pal_stop_cpu(FALSE);
	break;

    case PM_HALT_PANIC:
	cpup->lcpu.state = LCPU_PAUSE;
	pal_stop_cpu(TRUE);
	break;

    case PM_HALT_NORMAL:
    case PM_HALT_SLEEP:
    default:
        pal_cli();

	if (pmInitDone
	    && pmDispatch != NULL
	    && pmDispatch->pmCPUHalt != NULL) {
	    /*
	     * Halt the CPU (and put it in a low power state.
	     */
	    (*pmDispatch->pmCPUHalt)();

	    /*
	     * We've exited halt, so get the CPU schedulable again.
	     * - by calling the fast init routine for a slave, or
	     * - by returning if we're the master processor.
	     */
	    if (cpup->cpu_number != master_cpu) {
		i386_init_slave_fast();
		panic("init_slave_fast returned");
	    }
	} else
	{
	    /*
	     * If no power managment and a processor is taken off-line,
	     * then invalidate the cache and halt it (it will not be able
	     * to be brought back on-line without resetting the CPU).
	     */
	    __asm__ volatile ("wbinvd");
	    cpup->lcpu.state = LCPU_HALT;
	    pal_stop_cpu(FALSE);

	    panic("back from Halt");
	}

	break;
    }
}
Exemple #16
0
__private_extern__ kern_return_t
chudxnu_thread_get_state(
						 thread_t	 	thread, 
						 thread_flavor_t	 	flavor,
						 thread_state_t	 	tstate,
						 mach_msg_type_number_t	*count,
						 boolean_t	 	user_only)
{
	if (user_only) {
		/* We can't get user state for kernel threads */
		if (thread->task == kernel_task)
			return KERN_FAILURE;
		/* this properly handles deciding whether or not the thread is 64 bit or not */
		return machine_thread_get_state(thread, flavor, tstate, count);
	} else {
		// i386 machine_thread_get_kern_state() is different from the PPC version which returns
		// the previous save area - user or kernel - rather than kernel or NULL if no kernel
		// interrupt state available
		
		// the real purpose of this branch is the following:
		// the user doesn't care if the thread states are user or kernel, he
		// just wants the thread state, so we need to determine the proper one
		// to return, kernel or user, for the given thread.
		if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
			// the above are conditions where we possibly can read the kernel
			// state. we still need to determine if this interrupt happened in
			// kernel or user context
			if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
			   current_cpu_datap()->cpu_interrupt_level == 1) {
				// interrupt happened in user land
				return machine_thread_get_state(thread, flavor, tstate, count);
			} else {
				// kernel interrupt.
				return machine_thread_get_kern_state(thread, flavor, tstate, count);
			}
		} else {
            // get the user-mode thread state
			return machine_thread_get_state(thread, flavor, tstate, count);
		}
	}
}
Exemple #17
0
/*
 * Called when a CPU is being restarted after being powered off (as in S3).
 */
void
pmCPUMarkRunning(cpu_data_t *cpu)
{
    cpu_data_t	*cpup	= current_cpu_datap();

    if (pmInitDone
	&& pmDispatch != NULL
	&& pmDispatch->markCPURunning != NULL)
	(*pmDispatch->markCPURunning)(&cpu->lcpu);
    else
	cpup->lcpu.state = LCPU_RUN;
}
Exemple #18
0
void machine_track_platform_idle(boolean_t entry) {
	cpu_data_t		*my_cpu		= current_cpu_datap();

	if (entry) {
		(void)__sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1);
	}
 	else {
 		uint32_t nidle = __sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
 		if (nidle == topoParms.nLThreadsPerPackage) {
 			my_cpu->lcpu.package->package_idle_exits++;
 		}
 	}
}
Exemple #19
0
void
cpu_init(void)
{
	cpu_data_t	*cdp = current_cpu_datap();

	timer_call_queue_init(&cdp->rtclock_timer.queue);
	cdp->rtclock_timer.deadline = EndOfAllTime;

	cdp->cpu_type = cpuid_cputype();
	cdp->cpu_subtype = cpuid_cpusubtype();

	i386_activate_cpu();
}
Exemple #20
0
/*
 * Called when the CPU is to be halted.  It will choose the best C-State
 * to be in.
 */
void
pmCPUHalt(uint32_t reason)
{
    cpu_data_t	*cpup	= current_cpu_datap();

    switch (reason) {
    case PM_HALT_DEBUG:
	cpup->lcpu.state = LCPU_PAUSE;
	noasm ("wbinvd; hlt");
	break;

    case PM_HALT_PANIC:
	cpup->lcpu.state = LCPU_PAUSE;
	noasm ("cli; wbinvd; hlt");
	break;

    case PM_HALT_NORMAL:
    default:
	noasm ("cli");

    if (pmInitDone
	    && pmDispatch != NULL
	    && pmDispatch->pmCPUHalt != NULL) {
	    /*
	     * Halt the CPU (and put it in a low power state.
	     */
	    (*pmDispatch->pmCPUHalt)();

	    /*
	     * We've exited halt, so get the the CPU schedulable again.
	     */
	    i386_init_slave_fast();

	    panic("init_slave_fast returned");
	} else {
	    /*
	     * If no power managment and a processor is taken off-line,
	     * then invalidate the cache and halt it (it will not be able
	     * to be brought back on-line without resetting the CPU).
	     */
	    noasm ("wbinvd");
	    cpup->lcpu.state = LCPU_HALT;
	    noasm ( "wbinvd; hlt" );

	    panic("back from Halt");
	}
	break;
    }
}
Exemple #21
0
static void
pmReSyncDeadlines(int cpu)
{
    static boolean_t	registered	= FALSE;

    if (!registered) {
	PM_interrupt_register(&etimer_resync_deadlines);
	registered = TRUE;
    }

    if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num)
	etimer_resync_deadlines();
    else
	cpu_PM_interrupt(cpu);
}
Exemple #22
0
/**
 * etimer_set_deadline
 *
 * Set the clock deadline.
 */
void etimer_set_deadline(uint64_t deadline)
{
    rtclock_timer_t *mytimer;
    spl_t s;
    cpu_data_t *pp;

    s = splclock();             /* no interruptions */
    pp = current_cpu_datap();

    mytimer = &pp->rt_timer;    /* Point to the timer itself */
    mytimer->deadline = deadline;   /* Set the new expiration time */

    etimer_resync_deadlines();

    splx(s);
}
Exemple #23
0
/**
 * timer_queue_assign
 *
 * Assign a deadline and return the current processor's timer queue.
 */
mpqueue_head_t *timer_queue_assign(uint64_t deadline)
{
    cpu_data_t *cdp = current_cpu_datap();
    mpqueue_head_t *queue;

    if (cdp->cpu_running) {
        queue = &cdp->rt_timer.queue;
        if (deadline < cdp->rt_timer.deadline) {
            etimer_set_deadline(deadline);
        }
    } else {
        queue = &cpu_datap(master_cpu)->rt_timer.queue;
    }

    return (queue);
}
Exemple #24
0
void
cpu_machine_init(
	void)
{
	cpu_data_t	*cdp = current_cpu_datap();

	PE_cpu_machine_init(cdp->cpu_id, !cdp->cpu_boot_complete);
	cdp->cpu_boot_complete = TRUE;
	cdp->cpu_running = TRUE;
	ml_init_interrupt();

#if CONFIG_VMX
	/* for every CPU, get the VT specs */
	vmx_get_specs();
#endif
}
Exemple #25
0
void cpu_pmc_control(void *enablep) {
	boolean_t enable = *(boolean_t *)enablep;
	cpu_data_t	*cdp = current_cpu_datap();

	if (enable) {
		wrmsr64(0x38F, 0x70000000FULL);
		wrmsr64(0x38D, 0x333);
		set_cr4(get_cr4() | CR4_PCE);

	} else {
		wrmsr64(0x38F, 0);
		wrmsr64(0x38D, 0);
		set_cr4((get_cr4() & ~CR4_PCE));
	}
	cdp->cpu_fixed_pmcs_enabled = enable;
}
/*
 * chudxnu_cpu_signal_handler() is called from the IPI handler
 * when a CHUD signal arrives from another processor.
 */
__private_extern__ void
chudxnu_cpu_signal_handler(void)
{
	chudcpu_signal_request_t	*reqp;
	chudcpu_data_t			*chudinfop;

	chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;

	mpdequeue_head(&(chudinfop->cpu_request_queue),
		       (queue_entry_t *) &reqp);
	while (reqp != NULL) {
		chudxnu_private_cpu_signal_handler(reqp->req_code);
		reqp->req_sync = 0;
		mpdequeue_head(&(chudinfop->cpu_request_queue),
			       (queue_entry_t *) &reqp);
	}
}
Exemple #27
0
/*
 * Re-evaluate the outstanding deadlines and select the most proximate.
 *
 * Should be called at splclock.
 */
void
timer_resync_deadlines(void)
{
    uint64_t		deadline = EndOfAllTime;
    uint64_t		pmdeadline;
    rtclock_timer_t		*mytimer;
    spl_t			s = splclock();
    cpu_data_t		*pp;
    uint32_t		decr;

    pp = current_cpu_datap();
    if (!pp->cpu_running)
        /* There's really nothing to do if this processor is down */
        return;

    /*
     * If we have a clock timer set, pick that.
     */
    mytimer = &pp->rtclock_timer;
    if (!mytimer->has_expired &&
            0 < mytimer->deadline && mytimer->deadline < EndOfAllTime)
        deadline = mytimer->deadline;

    /*
     * If we have a power management deadline, see if that's earlier.
     */
    pmdeadline = pmCPUGetDeadline(pp);
    if (0 < pmdeadline && pmdeadline < deadline)
        deadline = pmdeadline;

    /*
     * Go and set the "pop" event.
     */
    decr = (uint32_t) setPop(deadline);

    /* Record non-PM deadline for latency tool */
    if (decr != 0 && deadline != pmdeadline) {
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_SET_DEADLINE | DBG_FUNC_NONE,
                                  decr, 2,
                                  deadline,
                                  mytimer->queue.count, 0);
    }
    splx(s);
}
Exemple #28
0
/**
 * cpu_init
 *
 * Initialize more core processor data for CPU #0 during initialization.
 */
void
cpu_init(void)
{
	cpu_data_t	*cdp = current_cpu_datap();

	timer_call_initialize_queue(&cdp->rt_timer.queue);
	cdp->rt_timer.deadline = EndOfAllTime;

	cdp->cpu_type = CPU_TYPE_ARM;
#if defined(_ARM_ARCH_7)
	cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7;
#elif defined(_ARM_ARCH_V6)
    cdp->cpu_subtype = CPU_SUBTYPE_ARM_V6;
#else
    cdp->cpu_subtype = CPU_SUBTYPE_ARM_ALL;
#endif

}
Exemple #29
0
void
thread_tell_urgency(int urgency,
    uint64_t rt_period,
    uint64_t rt_deadline,
    thread_t nthread)
{
	uint64_t	urgency_notification_time_start, delta;
	boolean_t	urgency_assert = (urgency_notification_assert_abstime_threshold != 0);
	assert(get_preemption_level() > 0 || ml_get_interrupts_enabled() == FALSE);
#if	DEBUG
	urgency_stats[cpu_number() % 64][urgency]++;
#endif
	if (!pmInitDone
	    || pmDispatch == NULL
	    || pmDispatch->pmThreadTellUrgency == NULL)
		return;

	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, 0, 0);

	if (__improbable((urgency_assert == TRUE)))
		urgency_notification_time_start = mach_absolute_time();

	current_cpu_datap()->cpu_nthread = nthread;
	pmDispatch->pmThreadTellUrgency(urgency, rt_period, rt_deadline);

	if (__improbable((urgency_assert == TRUE))) {
		delta = mach_absolute_time() - urgency_notification_time_start;

		if (__improbable(delta > urgency_notification_max_recorded)) {
			/* This is not synchronized, but it doesn't matter
			 * if we (rarely) miss an event, as it is statistically
			 * unlikely that it will never recur.
			 */
			urgency_notification_max_recorded = delta;

			if (__improbable((delta > urgency_notification_assert_abstime_threshold) && !machine_timeout_suspended()))
				panic("Urgency notification callout %p exceeded threshold, 0x%llx abstime units", pmDispatch->pmThreadTellUrgency, delta);
		}
	}

	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0);
}
Exemple #30
0
void
timer_queue_expire_local(
    __unused void			*arg)
{
    rtclock_timer_t		*mytimer;
    uint64_t			abstime;
    cpu_data_t			*pp;

    pp = current_cpu_datap();

    mytimer = &pp->rtclock_timer;
    abstime = mach_absolute_time();

    mytimer->has_expired = TRUE;
    mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
    mytimer->has_expired = FALSE;
    mytimer->when_set = mach_absolute_time();

    timer_resync_deadlines();
}