コード例 #1
0
ファイル: processor.c プロジェクト: aglab2/darwin-xnu
/*
 *	Initialize the given processor_set structure.
 */
void
pset_init(
	processor_set_t		pset,
	pset_node_t			node)
{
	if (pset != &pset0) {
		/* Scheduler state for pset0 initialized in sched_init() */
		SCHED(pset_init)(pset);
		SCHED(rt_init)(pset);
	}

	queue_init(&pset->active_queue);
	queue_init(&pset->idle_queue);
	queue_init(&pset->idle_secondary_queue);
	queue_init(&pset->unused_queue);
	pset->online_processor_count = 0;
	pset->active_processor_count = 0;
	pset->load_average = 0;
	pset->cpu_set_low = pset->cpu_set_hi = 0;
	pset->cpu_set_count = 0;
	pset->cpu_bitmask = 0;
	pset->recommended_bitmask = ~0ULL;
	pset->pending_AST_cpu_mask = 0;
#if defined(CONFIG_SCHED_DEFERRED_AST)
	pset->pending_deferred_AST_cpu_mask = 0;
#endif
	pset->pending_spill_cpu_mask = 0;
	pset_lock_init(pset);
	pset->pset_self = IP_NULL;
	pset->pset_name_self = IP_NULL;
	pset->pset_list = PROCESSOR_SET_NULL;
	pset->node = node;
	pset->pset_cluster_type = PSET_SMP;
	pset->pset_cluster_id = 0;
}
コード例 #2
0
ファイル: syscall_subr.c プロジェクト: JackieXie168/xnu
boolean_t
swtch_pri(
__unused	struct swtch_pri_args *args)
{
	register processor_t	myprocessor;
	boolean_t				result;

	disable_preemption();
	myprocessor = current_processor();
	if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
		mp_enable_preemption();

		return (FALSE);
	}
	enable_preemption();

	counter(c_swtch_pri_block++);

	thread_depress_abstime(thread_depress_time);

	thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);

	thread_depress_abort_internal(current_thread());

	disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	return (result);
}
コード例 #3
0
ファイル: host.c プロジェクト: DJHartley/xnu
kern_return_t
get_sched_statistics( 
		struct _processor_statistics_np *out, 
		uint32_t *count)
{
	processor_t processor;

	if (!sched_stats_active) {
		return KERN_FAILURE;
	}

	simple_lock(&processor_list_lock);
	
	if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
		simple_unlock(&processor_list_lock);
		return KERN_FAILURE;
	}

	processor = processor_list;
	while (processor) {
		struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;

		out->ps_cpuid 			= processor->cpu_id;
		out->ps_csw_count 		= stats->csw_count;
		out->ps_preempt_count 		= stats->preempt_count;
		out->ps_preempted_rt_count 	= stats->preempted_rt_count;
		out->ps_preempted_by_rt_count 	= stats->preempted_by_rt_count;
		out->ps_rt_sched_count		= stats->rt_sched_count;
		out->ps_interrupt_count 	= stats->interrupt_count;
		out->ps_ipi_count 		= stats->ipi_count;
		out->ps_timer_pop_count 	= stats->timer_pop_count;
		out->ps_runq_count_sum 		= SCHED(processor_runq_stats_count_sum)(processor);
		out->ps_idle_transitions	= stats->idle_transitions;
		out->ps_quantum_timer_expirations	= stats->quantum_timer_expirations;

		out++;
		processor = processor->processor_list;
	}

	*count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));

	simple_unlock(&processor_list_lock);

	/* And include RT Queue information */
	bzero(out, sizeof(*out));
	out->ps_cpuid = (-1);
	out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
	out++;
	*count += (uint32_t)sizeof(struct _processor_statistics_np);

	/* And include Fair Share Queue information at the end */
	bzero(out, sizeof(*out));
	out->ps_cpuid = (-2);
	out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
	*count += (uint32_t)sizeof(struct _processor_statistics_np);
	
	return KERN_SUCCESS;
}
コード例 #4
0
ファイル: locks.c プロジェクト: Apple-FOSS-Mirror/xnu
/*
 * lck_rw_clear_promotion: Undo priority promotions when the last RW
 * lock is released by a thread (if a promotion was active)
 */
void lck_rw_clear_promotion(thread_t thread)
{
	assert(thread->rwlock_count == 0);

	/* Cancel any promotions if the thread had actually blocked while holding a RW lock */
	spl_t s = splsched();

	thread_lock(thread);

	if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
		thread->sched_flags &= ~TH_SFLAG_RW_PROMOTED;

		if (thread->sched_flags & TH_SFLAG_PROMOTED) {
			/* Thread still has a mutex promotion */
		} else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
							      thread->sched_pri, DEPRESSPRI, 0, 0, 0);
			
			set_sched_pri(thread, DEPRESSPRI);
		} else {
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
								  thread->sched_pri, thread->priority, 0, 0, 0);
			
			SCHED(compute_priority)(thread, FALSE);
		}
	}

	thread_unlock(thread);
	splx(s);
}
コード例 #5
0
ファイル: thread_act.c プロジェクト: Apple-FOSS-Mirror/xnu
/*
 * install_special_handler_locked:
 *
 *	Do the work of installing the special_handler.
 *
 *	Called with the thread mutex and scheduling lock held.
 */
void
install_special_handler_locked(
	thread_t				thread)
{
	ReturnHandler	**rh;

	/* The work handler must always be the last ReturnHandler on the list,
	   because it can do tricky things like detach the thr_act.  */
	for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
		continue;

	if (rh != &thread->special_handler.next)
		*rh = &thread->special_handler;

	/*
	 * Temporarily undepress, so target has
	 * a chance to do locking required to
	 * block itself in special_handler().
	 */
	if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
		SCHED(compute_priority)(thread, TRUE);

	thread_ast_set(thread, AST_APC);

	if (thread == current_thread())
		ast_propagate(thread->ast);
	else {
		processor_t		processor = thread->last_processor;

		if (	processor != PROCESSOR_NULL					&&
				processor->state == PROCESSOR_RUNNING		&&
				processor->active_thread == thread			)
			cause_ast_check(processor);
	}
}
コード例 #6
0
ファイル: processor.c プロジェクト: aglab2/darwin-xnu
processor_set_t
pset_create(
	pset_node_t			node)
{
	/* some schedulers do not support multiple psets */
	if (SCHED(multiple_psets_enabled) == FALSE)
		return processor_pset(master_processor);

	processor_set_t		*prev, pset = kalloc(sizeof (*pset));

	if (pset != PROCESSOR_SET_NULL) {
		pset_init(pset, node);

		simple_lock(&pset_node_lock);

		prev = &node->psets;
		while (*prev != PROCESSOR_SET_NULL)
			prev = &(*prev)->pset_list;

		*prev = pset;

		simple_unlock(&pset_node_lock);
	}

	return (pset);
}
コード例 #7
0
ファイル: machine.c プロジェクト: wzw19890321/xnu-1
/*
 * Called with interrupts disabled.
 */
void
processor_doshutdown(
	processor_t			processor)
{
	thread_t			old_thread, self = current_thread();
	processor_t			prev;
	processor_set_t			pset;

	/*
	 *	Get onto the processor to shutdown
	 */
	prev = thread_bind(processor);
	thread_block(THREAD_CONTINUE_NULL);

	assert(processor->state == PROCESSOR_SHUTDOWN);

#if CONFIG_DTRACE
	if (dtrace_cpu_state_changed_hook)
		(*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
#endif

	ml_cpu_down();

#if HIBERNATION
	if (processor_avail_count < 2) {
		hibernate_vm_lock();
		hibernate_vm_unlock();
	}
#endif

	pset = processor->processor_set;
	pset_lock(pset);
	processor->state = PROCESSOR_OFF_LINE;
	--pset->online_processor_count;
	(void)hw_atomic_sub(&processor_avail_count, 1);
	commpage_update_active_cpus();
	SCHED(processor_queue_shutdown)(processor);
	/* pset lock dropped */

	/*
	 * Continue processor shutdown in shutdown context.
	 *
	 * We save the current context in machine_processor_shutdown in such a way
	 * that when this thread is next invoked it will return from here instead of
	 * from the machine_switch_context() in thread_invoke like a normal context switch.
	 *
	 * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever
	 * thread invoked back to this one. (Usually, it's another processor's idle thread.)
	 *
	 * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync
	 * with thread_invoke.
	 */
	thread_bind(prev);
	old_thread = machine_processor_shutdown(self, processor_offline, processor);

	thread_dispatch(old_thread, self);
}
コード例 #8
0
ファイル: locks.c プロジェクト: Apple-FOSS-Mirror/xnu
/*
 * Routine: 	lck_mtx_unlock_wakeup
 *
 * Invoked on unlock when there is contention.
 *
 * Called with the interlock locked.
 */
void
lck_mtx_unlock_wakeup (
	lck_mtx_t			*lck,
	thread_t			holder)
{
	thread_t		thread = current_thread();
	lck_mtx_t		*mutex;

	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
		mutex = lck;
	else
		mutex = &lck->lck_mtx_ptr->lck_mtx;

	if (thread != holder)
		panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder);

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);

	assert(mutex->lck_mtx_waiters > 0);
	thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));

	if (thread->promotions > 0) {
		spl_t		s = splsched();

		thread_lock(thread);
		if (	--thread->promotions == 0				&&
				(thread->sched_flags & TH_SFLAG_PROMOTED)		) {
			thread->sched_flags &= ~TH_SFLAG_PROMOTED;

			if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
				/* Thread still has a RW lock promotion */
			} else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
				KERNEL_DEBUG_CONSTANT(
					MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
						  thread->sched_pri, DEPRESSPRI, 0, lck, 0);

				set_sched_pri(thread, DEPRESSPRI);
			}
			else {
				if (thread->priority < thread->sched_pri) {
					KERNEL_DEBUG_CONSTANT(
						MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
															DBG_FUNC_NONE,
							thread->sched_pri, thread->priority,
									0, lck, 0);
				}

				SCHED(compute_priority)(thread, FALSE);
			}
		}
		thread_unlock(thread);
		splx(s);
	}

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
}
コード例 #9
0
ファイル: processor.c プロジェクト: aglab2/darwin-xnu
/*
 *	Initialize the given processor for the cpu
 *	indicated by cpu_id, and assign to the
 *	specified processor set.
 */
void
processor_init(
	processor_t			processor,
	int					cpu_id,
	processor_set_t		pset)
{
	spl_t		s;

	if (processor != master_processor) {
		/* Scheduler state for master_processor initialized in sched_init() */
		SCHED(processor_init)(processor);
	}

	processor->state = PROCESSOR_OFF_LINE;
	processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
	processor->processor_set = pset;
	processor_state_update_idle(processor);
	processor->starting_pri = MINPRI;
	processor->cpu_id = cpu_id;
	timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
	processor->quantum_end = UINT64_MAX;
	processor->deadline = UINT64_MAX;
	processor->first_timeslice = FALSE;
	processor->processor_primary = processor; /* no SMT relationship known at this point */
	processor->processor_secondary = NULL;
	processor->is_SMT = FALSE;
	processor->is_recommended = (pset->recommended_bitmask & (1ULL << cpu_id)) ? TRUE : FALSE;
	processor->processor_self = IP_NULL;
	processor_data_init(processor);
	processor->processor_list = NULL;

	s = splsched();
	pset_lock(pset);
	bit_set(pset->cpu_bitmask, cpu_id);
	if (pset->cpu_set_count++ == 0)
		pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
	else {
		pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
		pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
	}
	pset_unlock(pset);
	splx(s);

	simple_lock(&processor_list_lock);
	if (processor_list == NULL)
		processor_list = processor;
	else
		processor_list_tail->processor_list = processor;
	processor_list_tail = processor;
	processor_count++;
	assert(cpu_id < MAX_SCHED_CPUS);
	processor_array[cpu_id] = processor;
	simple_unlock(&processor_list_lock);
}
コード例 #10
0
ファイル: sched_traditional.c プロジェクト: wzw19890321/xnu-1
/*
 *	sched_traditional_processor_queue_shutdown:
 *
 *	Shutdown a processor run queue by
 *	re-dispatching non-bound threads.
 *
 *	Associated pset must be locked, and is
 *	returned unlocked.
 */
static void
sched_traditional_processor_queue_shutdown(processor_t processor)
{
	processor_set_t         pset    = processor->processor_set;
	run_queue_t             rq      = runq_for_processor(processor);
	queue_t                 queue   = rq->queues + rq->highq;
	int                     pri     = rq->highq;
	int                     count   = rq->count;
	thread_t                next, thread;
	queue_head_t            tqueue;

	queue_init(&tqueue);

	while (count > 0) {
		thread = (thread_t)(uintptr_t)queue_first(queue);
		while (!queue_end(queue, (queue_entry_t)thread)) {
			next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);

			if (thread->bound_processor == PROCESSOR_NULL) {
				remqueue((queue_entry_t)thread);

				thread->runq = PROCESSOR_NULL;
				SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
				runq_consider_decr_bound_count(processor, thread);
				rq->count--;
				if (SCHED(priority_is_urgent)(pri)) {
					rq->urgency--; assert(rq->urgency >= 0);
				}
				if (queue_empty(queue)) {
					bitmap_clear(rq->bitmap, pri);
					rq->highq = bitmap_first(rq->bitmap, NRQS);
				}

				enqueue_tail(&tqueue, (queue_entry_t)thread);
			}
			count--;

			thread = next;
		}

		queue--; pri--;
	}

	pset_unlock(pset);

	while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) {
		thread_lock(thread);

		thread_setrun(thread, SCHED_TAILQ);

		thread_unlock(thread);
	}
}
コード例 #11
0
ファイル: syscall_subr.c プロジェクト: JackieXie168/xnu
static void
swtch_continue(void)
{
	register processor_t	myprocessor;
    boolean_t				result;

    disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	thread_syscall_return(result);
	/*NOTREACHED*/
}
コード例 #12
0
ファイル: thread_policy.c プロジェクト: JackieXie168/xnu
/*
 * Reset thread to default state in preparation for termination
 * Called with thread mutex locked
 *
 * Always called on current thread, so we don't need a run queue remove
 */
void
thread_policy_reset(
	thread_t		thread)
{
	spl_t		s;

	assert(thread == current_thread());

	s = splsched();
	thread_lock(thread);

	assert_thread_sched_count(thread);

	if (thread->sched_flags & TH_SFLAG_FAILSAFE)
		sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);

	assert_thread_sched_count(thread);

	if (thread->sched_flags & TH_SFLAG_THROTTLED)
		sched_set_thread_throttled(thread, FALSE);

	assert_thread_sched_count(thread);

	assert(thread->BG_COUNT == 0);

	/* At this point, the various demotions should be inactive */
	assert(!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK));
	assert(!(thread->sched_flags & TH_SFLAG_THROTTLED));
	assert(!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK));

	/* Reset thread back to task-default basepri and mode  */
	sched_mode_t newmode = SCHED(initial_thread_sched_mode)(thread->task);

	sched_set_thread_mode(thread, newmode);

	thread->importance = 0;

	sched_set_thread_base_priority(thread, thread->task_priority);

	/* Prevent further changes to thread base priority or mode */
	thread->policy_reset = 1;

	assert(thread->BG_COUNT == 0);
	assert_thread_sched_count(thread);

	thread_unlock(thread);
	splx(s);
}
コード例 #13
0
ファイル: machine.c プロジェクト: JackieXie168/xnu
/*
 * Called with interrupts disabled.
 */
void
processor_doshutdown(
	processor_t			processor)
{
	thread_t			old_thread, self = current_thread();
	processor_t			prev;
	processor_set_t			pset;

	/*
	 *	Get onto the processor to shutdown
	 */
	prev = thread_bind(processor);
	thread_block(THREAD_CONTINUE_NULL);

	assert(processor->state == PROCESSOR_SHUTDOWN);

#if CONFIG_DTRACE
	if (dtrace_cpu_state_changed_hook)
		(*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
#endif

	ml_cpu_down();

#if HIBERNATION
	if (processor_avail_count < 2) {
		hibernate_vm_lock();
		hibernate_vm_unlock();
	}
#endif

	pset = processor->processor_set;
	pset_lock(pset);
	processor->state = PROCESSOR_OFF_LINE;
	--pset->online_processor_count;
	(void)hw_atomic_sub(&processor_avail_count, 1);
	commpage_update_active_cpus();
	SCHED(processor_queue_shutdown)(processor);
	/* pset lock dropped */

	/*
	 *	Continue processor shutdown in shutdown context.
	 */
	thread_bind(prev);
	old_thread = machine_processor_shutdown(self, processor_offline, processor);

	thread_dispatch(old_thread, self);
}
コード例 #14
0
ファイル: sched_traditional.c プロジェクト: wzw19890321/xnu-1
/*
 *	sched_traditional_choose_thread_from_runq:
 *
 *	Locate a thread to execute from the processor run queue
 *	and return it.  Only choose a thread with greater or equal
 *	priority.
 *
 *	Associated pset must be locked.  Returns THREAD_NULL
 *	on failure.
 */
static thread_t
sched_traditional_choose_thread_from_runq(
                                          processor_t     processor,
                                          run_queue_t     rq,
                                          int             priority)
{
	queue_t         queue   = rq->queues + rq->highq;
	int             pri     = rq->highq;
	int             count   = rq->count;
	thread_t        thread;

	while (count > 0 && pri >= priority) {
		thread = (thread_t)(uintptr_t)queue_first(queue);
		while (!queue_end(queue, (queue_entry_t)thread)) {
			if (thread->bound_processor == PROCESSOR_NULL ||
			    thread->bound_processor == processor) {
				remqueue((queue_entry_t)thread);

				thread->runq = PROCESSOR_NULL;
				SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
				rq->count--;
				if (SCHED(priority_is_urgent)(pri)) {
					rq->urgency--; assert(rq->urgency >= 0);
				}
				if (queue_empty(queue)) {
					bitmap_clear(rq->bitmap, pri);
					rq->highq = bitmap_first(rq->bitmap, NRQS);
				}

				return (thread);
			}
			count--;

			thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
		}

		queue--; pri--;
	}

	return (THREAD_NULL);
}
コード例 #15
0
int prio_sched_destroy(struct sched_t* scheduler)
{
	int toReturn = 0, i;
	queue_t** schd_queue = SCHED_QUEUE(scheduler);

	for (i = 0; i < LOWEST_PRIO; ++i)
	{
		if (queue_size(schd_queue[i]) == 0)
		{
			queue_destroy(schd_queue[i]);
			schd_queue[i] = NULL;
		}
		else
		{
			toReturn = -1;
		}
	}

	free(SCHED(scheduler));
	free(scheduler);

	return toReturn;
}
コード例 #16
0
ファイル: thread.c プロジェクト: CptFrazz/xnu
/*
 * Create a new thread.
 * Doesn't start the thread running.
 */
static kern_return_t
thread_create_internal(
	task_t					parent_task,
	integer_t				priority,
	thread_continue_t		continuation,
	int						options,
#define TH_OPTION_NONE		0x00
#define TH_OPTION_NOCRED	0x01
#define TH_OPTION_NOSUSP	0x02
	thread_t				*out_thread)
{
	thread_t				new_thread;
	static thread_t			first_thread = THREAD_NULL;

	/*
	 *	Allocate a thread and initialize static fields
	 */
	if (first_thread == THREAD_NULL)
		new_thread = first_thread = current_thread();
	new_thread = (thread_t)zalloc(thread_zone);
	if (new_thread == THREAD_NULL)
		return (KERN_RESOURCE_SHORTAGE);

	if (new_thread != first_thread)
		*new_thread = thread_template;

#ifdef MACH_BSD
	new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
	if (new_thread->uthread == NULL) {
		zfree(thread_zone, new_thread);
		return (KERN_RESOURCE_SHORTAGE);
	}
#endif  /* MACH_BSD */

	if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
#ifdef MACH_BSD
		void *ut = new_thread->uthread;

		new_thread->uthread = NULL;
		/* cred free may not be necessary */
		uthread_cleanup(parent_task, ut, parent_task->bsd_info);
		uthread_cred_free(ut);
		uthread_zone_free(ut);
#endif  /* MACH_BSD */

		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	new_thread->task = parent_task;

	thread_lock_init(new_thread);
	wake_lock_init(new_thread);

	lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);

	ipc_thread_init(new_thread);
	queue_init(&new_thread->held_ulocks);

	new_thread->continuation = continuation;

	lck_mtx_lock(&tasks_threads_lock);
	task_lock(parent_task);

	if (	!parent_task->active || parent_task->halting ||
			((options & TH_OPTION_NOSUSP) != 0 &&
			 	parent_task->suspend_count > 0)	||
			(parent_task->thread_count >= task_threadmax &&
				parent_task != kernel_task)		) {
		task_unlock(parent_task);
		lck_mtx_unlock(&tasks_threads_lock);

#ifdef MACH_BSD
		{
			void *ut = new_thread->uthread;

			new_thread->uthread = NULL;
			uthread_cleanup(parent_task, ut, parent_task->bsd_info);
			/* cred free may not be necessary */
			uthread_cred_free(ut);
			uthread_zone_free(ut);
		}
#endif  /* MACH_BSD */
		ipc_thread_disable(new_thread);
		ipc_thread_terminate(new_thread);
		lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
		machine_thread_destroy(new_thread);
		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	/* New threads inherit any default state on the task */
	machine_thread_inherit_taskwide(new_thread, parent_task);

	task_reference_internal(parent_task);

	if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
		/*
		 * This task has a per-thread CPU limit; make sure this new thread
		 * gets its limit set too, before it gets out of the kernel.
		 */
		set_astledger(new_thread);
	}
	new_thread->t_threadledger = LEDGER_NULL;	/* per thread ledger is not inherited */
	new_thread->t_ledger = new_thread->task->ledger;
	if (new_thread->t_ledger)
		ledger_reference(new_thread->t_ledger);

	/* Cache the task's map */
	new_thread->map = parent_task->map;

	/* Chain the thread onto the task's list */
	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
	parent_task->thread_count++;
	
	/* So terminating threads don't need to take the task lock to decrement */
	hw_atomic_add(&parent_task->active_thread_count, 1);

	/* Protected by the tasks_threads_lock */
	new_thread->thread_id = ++thread_unique_id;

	queue_enter(&threads, new_thread, thread_t, threads);
	threads_count++;

	timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
	timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);

#if CONFIG_COUNTERS
	/*
	 * If parent task has any reservations, they need to be propagated to this
	 * thread.
	 */
	new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? 
		THREAD_PMC_FLAG : 0U;
#endif

	/* Set the thread's scheduling parameters */
	new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
	new_thread->sched_flags = 0;
	new_thread->max_priority = parent_task->max_priority;
	new_thread->task_priority = parent_task->priority;
	new_thread->priority = (priority < 0)? parent_task->priority: priority;
	if (new_thread->priority > new_thread->max_priority)
		new_thread->priority = new_thread->max_priority;
#if CONFIG_EMBEDDED 
	if (new_thread->priority < MAXPRI_THROTTLE) {
		new_thread->priority = MAXPRI_THROTTLE;
	}
#endif /* CONFIG_EMBEDDED */
	new_thread->importance =
					new_thread->priority - new_thread->task_priority;
#if CONFIG_EMBEDDED
	new_thread->saved_importance = new_thread->importance;
	/* apple ios daemon starts all threads in darwin background */
	if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
		/* Cannot use generic routines here so apply darwin bacground directly */
		new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
		/* set thread self backgrounding */
		new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg;
		/* priority will get recomputed suitably bit later */
		new_thread->importance = INT_MIN;
		/* to avoid changes to many pri compute routines, set the effect of those here */
		new_thread->priority = MAXPRI_THROTTLE;
	}
#endif /* CONFIG_EMBEDDED */

#if defined(CONFIG_SCHED_TRADITIONAL)
	new_thread->sched_stamp = sched_tick;
	new_thread->pri_shift = sched_pri_shift;
#endif
	SCHED(compute_priority)(new_thread, FALSE);

	new_thread->active = TRUE;

	*out_thread = new_thread;

	{
		long	dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;

		kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);

		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
			(vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);

		kdbg_trace_string(parent_task->bsd_info,
							&dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);

		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
			dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
	}

	DTRACE_PROC1(lwp__create, thread_t, *out_thread);

	return (KERN_SUCCESS);
}
コード例 #17
0
ファイル: thread.c プロジェクト: CptFrazz/xnu
kern_return_t
thread_info_internal(
	register thread_t		thread,
	thread_flavor_t			flavor,
	thread_info_t			thread_info_out,	/* ptr to OUT array */
	mach_msg_type_number_t	*thread_info_count)	/*IN/OUT*/
{
	int						state, flags;
	spl_t					s;

	if (thread == THREAD_NULL)
		return (KERN_INVALID_ARGUMENT);

	if (flavor == THREAD_BASIC_INFO) {
	    register thread_basic_info_t	basic_info;

	    if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

	    basic_info = (thread_basic_info_t) thread_info_out;

	    s = splsched();
	    thread_lock(thread);

	    /* fill in info */

	    thread_read_times(thread, &basic_info->user_time,
									&basic_info->system_time);

		/*
		 *	Update lazy-evaluated scheduler info because someone wants it.
		 */
		if (SCHED(can_update_priority)(thread))
			SCHED(update_priority)(thread);

		basic_info->sleep_time = 0;

		/*
		 *	To calculate cpu_usage, first correct for timer rate,
		 *	then for 5/8 ageing.  The correction factor [3/5] is
		 *	(1/(5/8) - 1).
		 */
		basic_info->cpu_usage = 0;
#if defined(CONFIG_SCHED_TRADITIONAL)
		if (sched_tick_interval) {
			basic_info->cpu_usage =	(integer_t)(((uint64_t)thread->cpu_usage
										* TH_USAGE_SCALE) /	sched_tick_interval);
			basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
		}
#endif
		
		if (basic_info->cpu_usage > TH_USAGE_SCALE)
			basic_info->cpu_usage = TH_USAGE_SCALE;

		basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)?
												POLICY_TIMESHARE: POLICY_RR);

	    flags = 0;
		if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread)
			flags |= TH_FLAGS_IDLE;

	    if (!thread->kernel_stack)
			flags |= TH_FLAGS_SWAPPED;

	    state = 0;
	    if (thread->state & TH_TERMINATE)
			state = TH_STATE_HALTED;
	    else
		if (thread->state & TH_RUN)
			state = TH_STATE_RUNNING;
	    else
		if (thread->state & TH_UNINT)
			state = TH_STATE_UNINTERRUPTIBLE;
	    else
		if (thread->state & TH_SUSP)
			state = TH_STATE_STOPPED;
	    else
		if (thread->state & TH_WAIT)
			state = TH_STATE_WAITING;

	    basic_info->run_state = state;
	    basic_info->flags = flags;

	    basic_info->suspend_count = thread->user_stop_count;

	    thread_unlock(thread);
	    splx(s);

	    *thread_info_count = THREAD_BASIC_INFO_COUNT;

	    return (KERN_SUCCESS);
	}
	else
	if (flavor == THREAD_IDENTIFIER_INFO) {
	    register thread_identifier_info_t	identifier_info;

	    if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

	    identifier_info = (thread_identifier_info_t) thread_info_out;

	    s = splsched();
	    thread_lock(thread);

	    identifier_info->thread_id = thread->thread_id;
	    identifier_info->thread_handle = thread->machine.cthread_self;
	    if(thread->task->bsd_info) {
	    	identifier_info->dispatch_qaddr =  identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
	    } else {
		    thread_unlock(thread);
		    splx(s);
		    return KERN_INVALID_ARGUMENT;
	    }

	    thread_unlock(thread);
	    splx(s);
	    return KERN_SUCCESS;
	}
	else
	if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
		policy_timeshare_info_t		ts_info;

		if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

		ts_info = (policy_timeshare_info_t)thread_info_out;

	    s = splsched();
		thread_lock(thread);

	    if (thread->sched_mode != TH_MODE_TIMESHARE) {
	    	thread_unlock(thread);
			splx(s);

			return (KERN_INVALID_POLICY);
	    }

		ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
		if (ts_info->depressed) {
			ts_info->base_priority = DEPRESSPRI;
			ts_info->depress_priority = thread->priority;
		}
		else {
			ts_info->base_priority = thread->priority;
			ts_info->depress_priority = -1;
		}

		ts_info->cur_priority = thread->sched_pri;
		ts_info->max_priority =	thread->max_priority;

		thread_unlock(thread);
	    splx(s);

		*thread_info_count = POLICY_TIMESHARE_INFO_COUNT;

		return (KERN_SUCCESS);	
	}
	else
	if (flavor == THREAD_SCHED_FIFO_INFO) {
		if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

		return (KERN_INVALID_POLICY);
	}
	else
	if (flavor == THREAD_SCHED_RR_INFO) {
		policy_rr_info_t			rr_info;
		uint32_t quantum_time;
		uint64_t quantum_ns;
		
		if (*thread_info_count < POLICY_RR_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

		rr_info = (policy_rr_info_t) thread_info_out;

	    s = splsched();
		thread_lock(thread);

	    if (thread->sched_mode == TH_MODE_TIMESHARE) {
	    	thread_unlock(thread);
			splx(s);

			return (KERN_INVALID_POLICY);
	    }

		rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
		if (rr_info->depressed) {
			rr_info->base_priority = DEPRESSPRI;
			rr_info->depress_priority = thread->priority;
		}
		else {
			rr_info->base_priority = thread->priority;
			rr_info->depress_priority = -1;
		}

		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
		
		rr_info->max_priority = thread->max_priority;
	    rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);

		thread_unlock(thread);
	    splx(s);

		*thread_info_count = POLICY_RR_INFO_COUNT;

		return (KERN_SUCCESS);	
	}

	return (KERN_INVALID_ARGUMENT);
}
コード例 #18
0
ファイル: host.c プロジェクト: DJHartley/xnu
kern_return_t
host_info(
	host_t					host,
	host_flavor_t			flavor,
	host_info_t				info,
	mach_msg_type_number_t	*count)
{

	if (host == HOST_NULL)
		return (KERN_INVALID_ARGUMENT);
	
	switch (flavor) {

	case HOST_BASIC_INFO:
	{
		register host_basic_info_t	basic_info;
		register int				master_id;

		/*
		 *	Basic information about this host.
		 */
		if (*count < HOST_BASIC_INFO_OLD_COUNT)
			return (KERN_FAILURE);

		basic_info = (host_basic_info_t) info;

		basic_info->memory_size = machine_info.memory_size;
		basic_info->max_cpus = machine_info.max_cpus;
		basic_info->avail_cpus = processor_avail_count;
		master_id = master_processor->cpu_id;
		basic_info->cpu_type = slot_type(master_id);
		basic_info->cpu_subtype = slot_subtype(master_id);

		if (*count >= HOST_BASIC_INFO_COUNT) {
			basic_info->cpu_threadtype = slot_threadtype(master_id);
			basic_info->physical_cpu = machine_info.physical_cpu;
			basic_info->physical_cpu_max = machine_info.physical_cpu_max;
			basic_info->logical_cpu = machine_info.logical_cpu;
			basic_info->logical_cpu_max = machine_info.logical_cpu_max;
			basic_info->max_mem = machine_info.max_mem;

			*count = HOST_BASIC_INFO_COUNT;
		} else {
			*count = HOST_BASIC_INFO_OLD_COUNT;
		}

		return (KERN_SUCCESS);
	}

	case HOST_SCHED_INFO:
	{
		register host_sched_info_t	sched_info;
		uint32_t quantum_time;
		uint64_t quantum_ns;

		/*
		 *	Return scheduler information.
		 */
		if (*count < HOST_SCHED_INFO_COUNT)
			return (KERN_FAILURE);

		sched_info = (host_sched_info_t) info;

		quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
		absolutetime_to_nanoseconds(quantum_time, &quantum_ns);

		sched_info->min_timeout = 
			sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);

		*count = HOST_SCHED_INFO_COUNT;

		return (KERN_SUCCESS);
	}

	case HOST_RESOURCE_SIZES:
	{ 
		/*
		 * Return sizes of kernel data structures
		 */
		if (*count < HOST_RESOURCE_SIZES_COUNT)
			return (KERN_FAILURE);

		/* XXX Fail until ledgers are implemented */
		return (KERN_INVALID_ARGUMENT);
	}
                  
	case HOST_PRIORITY_INFO:
	{
		register host_priority_info_t	priority_info;

		if (*count < HOST_PRIORITY_INFO_COUNT)
			return (KERN_FAILURE);

		priority_info = (host_priority_info_t) info;

		priority_info->kernel_priority	= MINPRI_KERNEL;
		priority_info->system_priority	= MINPRI_KERNEL;
		priority_info->server_priority	= MINPRI_RESERVED;
		priority_info->user_priority	= BASEPRI_DEFAULT;
		priority_info->depress_priority	= DEPRESSPRI;
		priority_info->idle_priority	= IDLEPRI;
		priority_info->minimum_priority	= MINPRI_USER;
		priority_info->maximum_priority	= MAXPRI_RESERVED;

		*count = HOST_PRIORITY_INFO_COUNT;

		return (KERN_SUCCESS);
	}

	/*
	 * Gestalt for various trap facilities.
	 */
	case HOST_MACH_MSG_TRAP:
	case HOST_SEMAPHORE_TRAPS:
	{
		*count = 0;
		return (KERN_SUCCESS);
	}

	default:
		return (KERN_INVALID_ARGUMENT);
	}
}