Пример #1
0
void
timer_call_shutdown(
	processor_t			processor)
{
	timer_call_t		call;
	queue_t				queue, myqueue;

	assert(processor != current_processor());

	queue = &PROCESSOR_DATA(processor, timer_call_queue);
	myqueue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

	simple_lock(&timer_call_lock);

	call = TC(queue_first(queue));

	while (!queue_end(queue, qe(call))) {
		_delayed_call_dequeue(call);

		_delayed_call_enqueue(myqueue, call);

		call = TC(queue_first(queue));
	}

	call = TC(queue_first(myqueue));

	if (!queue_end(myqueue, qe(call)))
		_set_delayed_call_timer(call);

	simple_unlock(&timer_call_lock);
}
Пример #2
0
/*
 *	load_context:
 *
 *	Start the first thread on a processor.
 */
static void
load_context(
	thread_t		thread)
{
	processor_t		processor = current_processor();


#define load_context_kprintf(x...) /* kprintf("load_context: " x) */

	load_context_kprintf("calling machine_set_current_thread\n");
	machine_set_current_thread(thread);

	load_context_kprintf("calling processor_up\n");
	processor_up(processor);

	PMAP_ACTIVATE_KERNEL(processor->cpu_id);

	/*
	 * Acquire a stack if none attached.  The panic
	 * should never occur since the thread is expected
	 * to have reserved stack.
	 */
	load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread,
			     thread->kernel_stack, thread->machine.kstackptr);
	if (!thread->kernel_stack) {
		load_context_kprintf("calling stack_alloc_try\n");
		if (!stack_alloc_try(thread))
			panic("load_context");
	}

	/*
	 * The idle processor threads are not counted as
	 * running for load calculations.
	 */
	if (!(thread->state & TH_IDLE))
		sched_run_incr();

	processor->active_thread = thread;
	processor->current_pri = thread->sched_pri;
	processor->current_thmode = thread->sched_mode;
	processor->deadline = UINT64_MAX;
	thread->last_processor = processor;

	processor->last_dispatch = mach_absolute_time();
	timer_start(&thread->system_timer, processor->last_dispatch);
	PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;

	timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch);
	PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);

	PMAP_ACTIVATE_USER(thread, processor->cpu_id);

	load_context_kprintf("calling machine_load_context\n");
	machine_load_context(thread);
	/*NOTREACHED*/
}
Пример #3
0
void
processor_data_init(
	processor_t		processor)
{
	(void)memset(&processor->processor_data, 0, sizeof (processor_data_t));

	timer_init(&PROCESSOR_DATA(processor, idle_state));
	timer_init(&PROCESSOR_DATA(processor, system_state));
	timer_init(&PROCESSOR_DATA(processor, user_state));

	PROCESSOR_DATA(processor, debugger_state).db_current_op = DBOP_NONE;
}
Пример #4
0
void
stack_free_stack(
	vm_offset_t		stack)
{
	struct stack_cache	*cache;
	spl_t				s;

	s = splsched();
	cache = &PROCESSOR_DATA(current_processor(), stack_cache);
	if (cache->count < STACK_CACHE_SIZE) {
		stack_next(stack) = cache->free;
		cache->free = stack;
		cache->count++;
	}
	else {
		stack_lock();
		stack_next(stack) = stack_free_list;
		stack_free_list = stack;
		if (++stack_free_count > stack_free_hiwat)
			stack_free_hiwat = stack_free_count;
		stack_free_delta++;
		stack_unlock();
	}
	splx(s);
}
Пример #5
0
/*
 *Complete the shutdown and place the processor offline.
 *
 *	Called at splsched in the shutdown context.
 */
void
processor_offline(
	processor_t			processor)
{
	thread_t			new_thread, old_thread = processor->active_thread;

	new_thread = processor->idle_thread;
	processor->active_thread = new_thread;
	processor->current_pri = IDLEPRI;
	processor->current_thmode = TH_MODE_NONE;
	processor->deadline = UINT64_MAX;
	new_thread->last_processor = processor;

	processor->last_dispatch = mach_absolute_time();
	timer_stop(PROCESSOR_DATA(processor, thread_timer), processor->last_dispatch);

	machine_set_current_thread(new_thread);

	thread_dispatch(old_thread, new_thread);

	PMAP_DEACTIVATE_KERNEL(processor->cpu_id);

	cpu_sleep();
	panic("zombie processor");
	/*NOTREACHED*/
}
Пример #6
0
boolean_t
timer_call_cancel(
	timer_call_t			call)
{
	boolean_t		result = TRUE;
	spl_t			s;

	s = splclock();
	simple_lock(&timer_call_lock);

	if (call->state == DELAYED) {
		queue_t			queue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

		if (queue_first(queue) == qe(call)) {
			_delayed_call_dequeue(call);

			if (!queue_empty(queue))
				_set_delayed_call_timer((timer_call_t)queue_first(queue));
		}
		else
			_delayed_call_dequeue(call);
	}
	else
		result = FALSE;

	simple_unlock(&timer_call_lock);
	splx(s);

	return (result);
}
Пример #7
0
boolean_t
timer_call_enter1(
	timer_call_t			call,
	timer_call_param_t		param1,
	uint64_t				deadline)
{
	boolean_t		result = TRUE;
	queue_t			queue;
	spl_t			s;

	s = splclock();
	simple_lock(&timer_call_lock);

	if (call->state == DELAYED)
		_delayed_call_dequeue(call);
	else
		result = FALSE;

	call->param1	= param1;
	call->deadline	= deadline;

	queue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

	_delayed_call_enqueue(queue, call);

	if (queue_first(queue) == qe(call))
		_set_delayed_call_timer(call);

	simple_unlock(&timer_call_lock);
	splx(s);

	return (result);
}
Пример #8
0
/*
 *	stack_alloc_try:
 *
 *	Non-blocking attempt to allocate a
 *	stack for a thread.
 *
 *	Returns TRUE on success.
 *
 *	Called at splsched.
 */
boolean_t
stack_alloc_try(
	thread_t		thread)
{
	struct stack_cache	*cache;
	vm_offset_t			stack;

	cache = &PROCESSOR_DATA(current_processor(), stack_cache);
	stack = cache->free;
	if (stack != 0) {
		cache->free = stack_next(stack);
		cache->count--;
	}
	else {
		if (stack_free_list != 0) {
			stack_lock();
			stack = stack_free_list;
			if (stack != 0) {
				stack_free_list = stack_next(stack);
				stack_free_count--;
				stack_free_delta--;
			}
			stack_unlock();
		}
	}

	if (stack != 0 || (stack = thread->reserved_stack) != 0) {
		machine_stack_attach(thread, stack);
		return (TRUE);
	}

	return (FALSE);
}
Пример #9
0
/*
 * Complete the shutdown and place the processor offline.
 *
 * Called at splsched in the shutdown context.
 * This performs a minimal thread_invoke() to the idle thread,
 * so it needs to be kept in sync with what thread_invoke() does.
 *
 * The onlining half of this is done in load_context().
 */
void
processor_offline(
	processor_t			processor)
{
	assert(processor == current_processor());
	assert(processor->active_thread == current_thread());

	thread_t old_thread = processor->active_thread;
	thread_t new_thread = processor->idle_thread;

	processor->active_thread = new_thread;
	processor->current_pri = IDLEPRI;
	processor->current_thmode = TH_MODE_NONE;
	processor->starting_pri = IDLEPRI;
	processor->current_sfi_class = SFI_CLASS_KERNEL;
	processor->deadline = UINT64_MAX;
	new_thread->last_processor = processor;

	uint64_t ctime = mach_absolute_time();

	processor->last_dispatch = ctime;
	old_thread->last_run_time = ctime;

	/* Update processor->thread_timer and ->kernel_timer to point to the new thread */
	thread_timer_event(ctime, &new_thread->system_timer);
	PROCESSOR_DATA(processor, kernel_timer) = &new_thread->system_timer;

	timer_stop(PROCESSOR_DATA(processor, current_state), ctime);

	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
	                          MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
	                          old_thread->reason, (uintptr_t)thread_tid(new_thread),
	                          old_thread->sched_pri, new_thread->sched_pri, 0);

	machine_set_current_thread(new_thread);

	thread_dispatch(old_thread, new_thread);

	PMAP_DEACTIVATE_KERNEL(processor->cpu_id);

	cpu_sleep();
	panic("zombie processor");
	/*NOTREACHED*/
}
Пример #10
0
int64_t dtrace_calc_thread_recent_vtime(thread_t thread)
{
	if (thread != THREAD_NULL) {
		processor_t             processor = current_processor();
		uint64_t 				abstime = mach_absolute_time();
		timer_t					timer;

		timer = PROCESSOR_DATA(processor, thread_timer);

		return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
				(abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
	} else
		return 0;
}
Пример #11
0
static void
timer_call_interrupt(uint64_t timestamp)
{
	timer_call_t		call;
	queue_t				queue;

	simple_lock(&timer_call_lock);

	queue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

	call = TC(queue_first(queue));

	while (!queue_end(queue, qe(call))) {
		if (call->deadline <= timestamp) {
			timer_call_func_t		func;
			timer_call_param_t		param0, param1;

			_delayed_call_dequeue(call);

			func = call->func;
			param0 = call->param0;
			param1 = call->param1;

			simple_unlock(&timer_call_lock);

			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI,
							   2)
							| DBG_FUNC_START,
					      (unsigned int)func,
					      (unsigned int)param0,
					      (unsigned int)param1, 0, 0);

#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_TMR3(callout__start, timer_call_func_t, func, 
										timer_call_param_t, param0, 
										timer_call_param_t, param1);
#endif

			(*func)(param0, param1);

#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_TMR3(callout__end, timer_call_func_t, func, 
										timer_call_param_t, param0, 
										timer_call_param_t, param1);
#endif

			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI,
							   2)
							| DBG_FUNC_END,
					      (unsigned int)func,
					      (unsigned int)param0,
					      (unsigned int)param1, 0, 0);

			simple_lock(&timer_call_lock);
		} else
			break;

		call = TC(queue_first(queue));
	}

	if (!queue_end(queue, qe(call)))
		_set_delayed_call_timer(call);

	simple_unlock(&timer_call_lock);
}
Пример #12
0
uint64_t
timer_queue_expire_with_options(
	mpqueue_head_t		*queue,
	uint64_t		deadline,
	boolean_t		rescan)
{
	timer_call_t	call = NULL;
	uint32_t tc_iterations = 0;
	DBG("timer_queue_expire(%p,)\n", queue);

	uint64_t cur_deadline = deadline;
	timer_queue_lock_spin(queue);

	while (!queue_empty(&queue->head)) {
		/* Upon processing one or more timer calls, refresh the
		 * deadline to account for time elapsed in the callout
		 */
		if (++tc_iterations > 1)
			cur_deadline = mach_absolute_time();

		if (call == NULL)
			call = TIMER_CALL(queue_first(&queue->head));

		if (call->soft_deadline <= cur_deadline) {
			timer_call_func_t		func;
			timer_call_param_t		param0, param1;

			TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0);
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_EXPIRE | DBG_FUNC_NONE,
				call,
				call->soft_deadline,
				CE(call)->deadline,
				CE(call)->entry_time, 0);

			/* Bit 0 of the "soft" deadline indicates that
			 * this particular timer call is rate-limited
			 * and hence shouldn't be processed before its
			 * hard deadline.
			 */
			if ((call->soft_deadline & 0x1) &&
			    (CE(call)->deadline > cur_deadline)) {
				if (rescan == FALSE)
					break;
			}

			if (!simple_lock_try(&call->lock)) {
				/* case (2b) lock inversion, dequeue and skip */
				timer_queue_expire_lock_skips++;
				timer_call_entry_dequeue_async(call);
				call = NULL;
				continue;
			}

			timer_call_entry_dequeue(call);

			func = CE(call)->func;
			param0 = CE(call)->param0;
			param1 = CE(call)->param1;

			simple_unlock(&call->lock);
			timer_queue_unlock(queue);

			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_START,
				call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);

#if CONFIG_DTRACE
			DTRACE_TMR7(callout__start, timer_call_func_t, func,
			    timer_call_param_t, param0, unsigned, call->flags,
			    0, (call->ttd >> 32),
			    (unsigned) (call->ttd & 0xFFFFFFFF), call);
#endif
			/* Maintain time-to-deadline in per-processor data
			 * structure for thread wakeup deadline statistics.
			 */
			uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
			*ttdp = call->ttd;
			(*func)(param0, param1);
			*ttdp = 0;
#if CONFIG_DTRACE
			DTRACE_TMR4(callout__end, timer_call_func_t, func,
			    param0, param1, call);
#endif

			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_END,
				call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
			call = NULL;
			timer_queue_lock_spin(queue);
		} else {
			if (__probable(rescan == FALSE)) {
Пример #13
0
/*
 * Called when the CPU is idle.  It calls into the power management kext
 * to determine the best way to idle the CPU.
 */
void
machine_idle(void)
{
	cpu_data_t		*my_cpu		= current_cpu_datap();
	__unused uint32_t	cnum = my_cpu->cpu_number;
	uint64_t		ctime, rtime, itime;
#if CST_DEMOTION_DEBUG
	processor_t		cproc = my_cpu->cpu_processor;
	uint64_t		cwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total);
#endif /* CST_DEMOTION_DEBUG */
	uint64_t esdeadline, ehdeadline;
	boolean_t do_process_pending_timers = FALSE;

	ctime = mach_absolute_time();
	esdeadline = my_cpu->rtclock_timer.queue.earliest_soft_deadline;
	ehdeadline = my_cpu->rtclock_timer.deadline;
/* Determine if pending timers exist */    
	if ((ctime >= esdeadline) && (ctime < ehdeadline) &&
	    ((ehdeadline - ctime) < idle_entry_timer_processing_hdeadline_threshold)) {
		idle_pending_timers_processed++;
		do_process_pending_timers = TRUE;
		goto machine_idle_exit;
	} else {
		TCOAL_DEBUG(0xCCCC0000, ctime, my_cpu->rtclock_timer.queue.earliest_soft_deadline, my_cpu->rtclock_timer.deadline, idle_pending_timers_processed, 0);
	}
    
	my_cpu->lcpu.state = LCPU_IDLE;
	DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
	MARK_CPU_IDLE(cnum);

	rtime = ctime - my_cpu->cpu_ixtime;

	my_cpu->cpu_rtime_total += rtime;
	machine_classify_interval(rtime, &my_cpu->cpu_rtimes[0], &cpu_rtime_bins[0], CPU_RTIME_BINS);
#if CST_DEMOTION_DEBUG
	uint32_t cl = 0, ch = 0;
	uint64_t c3res, c6res, c7res;
	rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
	c3res = ((uint64_t)ch << 32) | cl;
	rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
	c6res = ((uint64_t)ch << 32) | cl;
	rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
	c7res = ((uint64_t)ch << 32) | cl;
#endif

	if (pmInitDone) {
		/*
		 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
		 * were called prior to the CPU PM kext being registered.  We do
		 * this here since we know at this point the values will be first
		 * used since idle is where the decisions using these values is made.
		 */
		if (earlyMaxBusDelay != DELAY_UNSET)
			ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));
		if (earlyMaxIntDelay != DELAY_UNSET)
			ml_set_maxintdelay(earlyMaxIntDelay);
	}

	if (pmInitDone
	    && pmDispatch != NULL
	    && pmDispatch->MachineIdle != NULL)
		(*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
	else {
		/*
		 * If no power management, re-enable interrupts and halt.
		 * This will keep the CPU from spinning through the scheduler
		 * and will allow at least some minimal power savings (but it
		 * cause problems in some MP configurations w.r.t. the APIC
		 * stopping during a GV3 transition).
		 */
		pal_hlt();
		/* Once woken, re-disable interrupts. */
		pal_cli();
	}

	/*
	 * Mark the CPU as running again.
	 */
	MARK_CPU_ACTIVE(cnum);
	DBGLOG(cpu_handle, cnum, MP_UNIDLE);
	my_cpu->lcpu.state = LCPU_RUN;
	uint64_t ixtime = my_cpu->cpu_ixtime = mach_absolute_time();
	itime = ixtime - ctime;
	my_cpu->cpu_idle_exits++;
        my_cpu->cpu_itime_total += itime;
    	machine_classify_interval(itime, &my_cpu->cpu_itimes[0], &cpu_itime_bins[0], CPU_ITIME_BINS);
#if CST_DEMOTION_DEBUG
	cl = ch = 0;
	rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
	c3res = (((uint64_t)ch << 32) | cl) - c3res;
	rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
	c6res = (((uint64_t)ch << 32) | cl) - c6res;
	rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
	c7res = (((uint64_t)ch << 32) | cl) - c7res;

	uint64_t ndelta = itime - tmrCvt(c3res + c6res + c7res, tscFCvtt2n);
	KERNEL_DEBUG_CONSTANT(0xcead0000, ndelta, itime, c7res, c6res, c3res);
	if ((itime > 1000000) && (ndelta > 250000))
		KERNEL_DEBUG_CONSTANT(0xceae0000, ndelta, itime, c7res, c6res, c3res);
#endif

	machine_idle_exit:
	/*
	 * Re-enable interrupts.
	 */

	pal_sti();

	if (do_process_pending_timers) {
		TCOAL_DEBUG(0xBBBB0000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, idle_pending_timers_processed, 0);

		/* Adjust to reflect that this isn't truly a package idle exit */
		__sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
		lapic_timer_swi(); /* Trigger software timer interrupt */
		__sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1);

		TCOAL_DEBUG(0xBBBB0000 | DBG_FUNC_END, ctime, esdeadline, idle_pending_timers_processed, 0, 0);
	}
#if CST_DEMOTION_DEBUG
	uint64_t nwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total);

	if ((nwakeups == cwakeups) && (topoParms.nLThreadsPerPackage == my_cpu->lcpu.package->num_idle)) {
		KERNEL_DEBUG_CONSTANT(0xceaa0000, cwakeups, 0, 0, 0, 0);
	}
#endif    
}
Пример #14
0
kern_return_t
processor_info(
	register processor_t	processor,
	processor_flavor_t		flavor,
	host_t					*host,
	processor_info_t		info,
	mach_msg_type_number_t	*count)
{
	register int	cpu_id, state;
	kern_return_t	result;

	if (processor == PROCESSOR_NULL)
		return (KERN_INVALID_ARGUMENT);

	cpu_id = processor->cpu_id;

	switch (flavor) {

	case PROCESSOR_BASIC_INFO:
	{
		register processor_basic_info_t		basic_info;

		if (*count < PROCESSOR_BASIC_INFO_COUNT)
			return (KERN_FAILURE);

		basic_info = (processor_basic_info_t) info;
		basic_info->cpu_type = slot_type(cpu_id);
		basic_info->cpu_subtype = slot_subtype(cpu_id);
		state = processor->state;
		if (state == PROCESSOR_OFF_LINE)
			basic_info->running = FALSE;
		else
			basic_info->running = TRUE;
		basic_info->slot_num = cpu_id;
		if (processor == master_processor) 
			basic_info->is_master = TRUE;
		else
			basic_info->is_master = FALSE;

		*count = PROCESSOR_BASIC_INFO_COUNT;
		*host = &realhost;

	    return (KERN_SUCCESS);
	}

	case PROCESSOR_CPU_LOAD_INFO:
	{
		register processor_cpu_load_info_t	cpu_load_info;

	    if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
			return (KERN_FAILURE);

	    cpu_load_info = (processor_cpu_load_info_t) info;
		cpu_load_info->cpu_ticks[CPU_STATE_USER] =
							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
		cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
		cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval);
		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;

	    *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
	    *host = &realhost;

	    return (KERN_SUCCESS);
	}

	default:
	    result = cpu_info(flavor, cpu_id, info, count);
	    if (result == KERN_SUCCESS)
			*host = &realhost;		   

	    return (result);
	}
}
Пример #15
0
kern_return_t
host_statistics64(
	host_t				host,
	host_flavor_t			flavor,
	host_info64_t			info,
	mach_msg_type_number_t		*count)
{
	uint32_t	i;
	
	if (host == HOST_NULL)
		return (KERN_INVALID_HOST);
	
	switch(flavor) {

		case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
		{
			register processor_t		processor;
			register vm_statistics64_t	stat;
			vm_statistics64_data_t		host_vm_stat;

			if (*count < HOST_VM_INFO64_COUNT)
				return (KERN_FAILURE);

			processor = processor_list;
			stat = &PROCESSOR_DATA(processor, vm_stat);
			host_vm_stat = *stat;

			if (processor_count > 1) {
				simple_lock(&processor_list_lock);

				while ((processor = processor->processor_list) != NULL) {
					stat = &PROCESSOR_DATA(processor, vm_stat);

					host_vm_stat.zero_fill_count +=	stat->zero_fill_count;
					host_vm_stat.reactivations += stat->reactivations;
					host_vm_stat.pageins += stat->pageins;
					host_vm_stat.pageouts += stat->pageouts;
					host_vm_stat.faults += stat->faults;
					host_vm_stat.cow_faults += stat->cow_faults;
					host_vm_stat.lookups += stat->lookups;
					host_vm_stat.hits += stat->hits;
				}

				simple_unlock(&processor_list_lock);
			}

			stat = (vm_statistics64_t) info;

			stat->free_count = vm_page_free_count + vm_page_speculative_count;
			stat->active_count = vm_page_active_count;

			if (vm_page_local_q) {
				for (i = 0; i < vm_page_local_q_count; i++) {
					struct vpl	*lq;
				
					lq = &vm_page_local_q[i].vpl_un.vpl;

					stat->active_count += lq->vpl_count;
				}
			}
			stat->inactive_count = vm_page_inactive_count;
#if CONFIG_EMBEDDED
			stat->wire_count = vm_page_wire_count;
#else
			stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
#endif
			stat->zero_fill_count = host_vm_stat.zero_fill_count;
			stat->reactivations = host_vm_stat.reactivations;
			stat->pageins = host_vm_stat.pageins;
			stat->pageouts = host_vm_stat.pageouts;
			stat->faults = host_vm_stat.faults;
			stat->cow_faults = host_vm_stat.cow_faults;
			stat->lookups = host_vm_stat.lookups;
			stat->hits = host_vm_stat.hits;
		
			/* rev1 added "purgable" info */
			stat->purgeable_count = vm_page_purgeable_count;
			stat->purges = vm_page_purged_count;
		
			/* rev2 added "speculative" info */
			stat->speculative_count = vm_page_speculative_count;

			*count = HOST_VM_INFO64_COUNT;	

			return(KERN_SUCCESS);
		}

		case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
		{
			vm_extmod_statistics_t		out_extmod_statistics;

			if (*count < HOST_EXTMOD_INFO64_COUNT)
				return (KERN_FAILURE);

			out_extmod_statistics = (vm_extmod_statistics_t) info;
			*out_extmod_statistics = host_extmod_statistics;

			*count = HOST_EXTMOD_INFO64_COUNT;	

			return(KERN_SUCCESS);
		}

		default: /* If we didn't recognize the flavor, send to host_statistics */
			return(host_statistics(host, flavor, (host_info_t) info, count)); 
	}
}
Пример #16
0
kern_return_t
host_statistics(
	host_t					host,
	host_flavor_t			flavor,
	host_info_t				info,
	mach_msg_type_number_t	*count)
{
	uint32_t	i;

	if (host == HOST_NULL)
		return (KERN_INVALID_HOST);
	
	switch(flavor) {

	case HOST_LOAD_INFO:
	{
		host_load_info_t	load_info;

		if (*count < HOST_LOAD_INFO_COUNT)
			return (KERN_FAILURE);

		load_info = (host_load_info_t) info;

		bcopy((char *) avenrun,
			  (char *) load_info->avenrun, sizeof avenrun);
		bcopy((char *) mach_factor,
			  (char *) load_info->mach_factor, sizeof mach_factor);

		*count = HOST_LOAD_INFO_COUNT;
		return (KERN_SUCCESS);
	}

	case HOST_VM_INFO:
	{
		register processor_t		processor;
		register vm_statistics64_t	stat;
		vm_statistics64_data_t		host_vm_stat;
		vm_statistics_t			stat32;
		mach_msg_type_number_t		original_count;
                
		if (*count < HOST_VM_INFO_REV0_COUNT)
			return (KERN_FAILURE);

		processor = processor_list;
		stat = &PROCESSOR_DATA(processor, vm_stat);
		host_vm_stat = *stat;

		if (processor_count > 1) {
			simple_lock(&processor_list_lock);

			while ((processor = processor->processor_list) != NULL) {
				stat = &PROCESSOR_DATA(processor, vm_stat);

				host_vm_stat.zero_fill_count +=	stat->zero_fill_count;
				host_vm_stat.reactivations += stat->reactivations;
				host_vm_stat.pageins += stat->pageins;
				host_vm_stat.pageouts += stat->pageouts;
				host_vm_stat.faults += stat->faults;
				host_vm_stat.cow_faults += stat->cow_faults;
				host_vm_stat.lookups += stat->lookups;
				host_vm_stat.hits += stat->hits;
			}

			simple_unlock(&processor_list_lock);
		}

		stat32 = (vm_statistics_t) info;

		stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
		stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
		
		if (vm_page_local_q) {
			for (i = 0; i < vm_page_local_q_count; i++) {
				struct vpl	*lq;

				lq = &vm_page_local_q[i].vpl_un.vpl;

				stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
			}
		}
		stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
#if CONFIG_EMBEDDED
		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
#else
		stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
#endif
		stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
		stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
		stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
		stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
		stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
		stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
		stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
		stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);

		/*
		 * Fill in extra info added in later revisions of the
		 * vm_statistics data structure.  Fill in only what can fit
		 * in the data structure the caller gave us !
		 */
		original_count = *count;
		*count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
		if (original_count >= HOST_VM_INFO_REV1_COUNT) {
			/* rev1 added "purgeable" info */
			stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
			stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
			*count = HOST_VM_INFO_REV1_COUNT;
		}

		if (original_count >= HOST_VM_INFO_REV2_COUNT) {
			/* rev2 added "speculative" info */
			stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
			*count = HOST_VM_INFO_REV2_COUNT;
		}

		/* rev3 changed some of the fields to be 64-bit*/

		return (KERN_SUCCESS);
	}
                
	case HOST_CPU_LOAD_INFO:
	{
		register processor_t	processor;
		host_cpu_load_info_t	cpu_load_info;

		if (*count < HOST_CPU_LOAD_INFO_COUNT)
			return (KERN_FAILURE);

#define GET_TICKS_VALUE(processor, state, timer)			 \
MACRO_BEGIN								 \
	cpu_load_info->cpu_ticks[(state)] +=				 \
		(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, timer)) \
				/ hz_tick_interval);			 \
MACRO_END

		cpu_load_info = (host_cpu_load_info_t)info;
		cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
		cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
		cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;

		simple_lock(&processor_list_lock);

		for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
			timer_data_t	idle_temp;
			timer_t		idle_state;

			GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state);
			if (precise_user_kernel_time) {
				GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state);
			} else {
				/* system_state may represent either sys or user */
				GET_TICKS_VALUE(processor, CPU_STATE_USER, system_state);
			}

			idle_state = &PROCESSOR_DATA(processor, idle_state);
			idle_temp = *idle_state;

			if (PROCESSOR_DATA(processor, current_state) != idle_state ||
			    timer_grab(&idle_temp) != timer_grab(idle_state))
				GET_TICKS_VALUE(processor, CPU_STATE_IDLE, idle_state);
			else {
				timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp);

				cpu_load_info->cpu_ticks[CPU_STATE_IDLE] +=
					(uint32_t)(timer_grab(&idle_temp) / hz_tick_interval);
			}
		}
		simple_unlock(&processor_list_lock);

		*count = HOST_CPU_LOAD_INFO_COUNT;

		return (KERN_SUCCESS);
	}

	case HOST_EXPIRED_TASK_INFO:
	{
		if (*count < TASK_POWER_INFO_COUNT) {
			return (KERN_FAILURE);
		}

		task_power_info_t tinfo = (task_power_info_t)info;

		tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
		tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;

		tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
		tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;

		tinfo->total_user = dead_task_statistics.total_user_time;
		tinfo->total_system = dead_task_statistics.total_system_time;

		return (KERN_SUCCESS);
	}

	default:
		return (KERN_INVALID_ARGUMENT);
	}
}
Пример #17
0
kern_return_t
processor_info(
	processor_t	processor,
	processor_flavor_t		flavor,
	host_t					*host,
	processor_info_t		info,
	mach_msg_type_number_t	*count)
{
	int	cpu_id, state;
	kern_return_t	result;

	if (processor == PROCESSOR_NULL)
		return (KERN_INVALID_ARGUMENT);

	cpu_id = processor->cpu_id;

	switch (flavor) {

	case PROCESSOR_BASIC_INFO:
	{
		processor_basic_info_t		basic_info;

		if (*count < PROCESSOR_BASIC_INFO_COUNT)
			return (KERN_FAILURE);

		basic_info = (processor_basic_info_t) info;
		basic_info->cpu_type = slot_type(cpu_id);
		basic_info->cpu_subtype = slot_subtype(cpu_id);
		state = processor->state;
		if (state == PROCESSOR_OFF_LINE)
			basic_info->running = FALSE;
		else
			basic_info->running = TRUE;
		basic_info->slot_num = cpu_id;
		if (processor == master_processor) 
			basic_info->is_master = TRUE;
		else
			basic_info->is_master = FALSE;

		*count = PROCESSOR_BASIC_INFO_COUNT;
		*host = &realhost;

	    return (KERN_SUCCESS);
	}

	case PROCESSOR_CPU_LOAD_INFO:
	{
		processor_cpu_load_info_t	cpu_load_info;
		timer_t		idle_state;
		uint64_t	idle_time_snapshot1, idle_time_snapshot2;
		uint64_t	idle_time_tstamp1, idle_time_tstamp2;

		/*
		 * We capture the accumulated idle time twice over
		 * the course of this function, as well as the timestamps
		 * when each were last updated. Since these are
		 * all done using non-atomic racy mechanisms, the
		 * most we can infer is whether values are stable.
		 * timer_grab() is the only function that can be
		 * used reliably on another processor's per-processor
		 * data.
		 */

		if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT)
			return (KERN_FAILURE);

		cpu_load_info = (processor_cpu_load_info_t) info;
		if (precise_user_kernel_time) {
			cpu_load_info->cpu_ticks[CPU_STATE_USER] =
							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
			cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
							(uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
		} else {
			uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
				timer_grab(&PROCESSOR_DATA(processor, system_state));

			cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
			cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
		}

		idle_state = &PROCESSOR_DATA(processor, idle_state);
		idle_time_snapshot1 = timer_grab(idle_state);
		idle_time_tstamp1 = idle_state->tstamp;

		/*
		 * Idle processors are not continually updating their
		 * per-processor idle timer, so it may be extremely
		 * out of date, resulting in an over-representation
		 * of non-idle time between two measurement
		 * intervals by e.g. top(1). If we are non-idle, or
		 * have evidence that the timer is being updated
		 * concurrently, we consider its value up-to-date.
		 */
		if (PROCESSOR_DATA(processor, current_state) != idle_state) {
			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
							(uint32_t)(idle_time_snapshot1 / hz_tick_interval);
		} else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
				   (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
			/* Idle timer is being updated concurrently, second stamp is good enough */
			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
							(uint32_t)(idle_time_snapshot2 / hz_tick_interval);
		} else {
			/*
			 * Idle timer may be very stale. Fortunately we have established
			 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
			 */
			idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
				
			cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
				(uint32_t)(idle_time_snapshot1 / hz_tick_interval);
		}

		cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;

	    *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
	    *host = &realhost;

	    return (KERN_SUCCESS);
	}

	default:
	    result = cpu_info(flavor, cpu_id, info, count);
	    if (result == KERN_SUCCESS)
			*host = &realhost;		   

	    return (result);
	}
}