コード例 #1
0
kern_return_t
thread_policy_get(
	thread_t				thread,
	thread_policy_flavor_t	flavor,
	thread_policy_t			policy_info,
	mach_msg_type_number_t	*count,
	boolean_t				*get_default)
{
	kern_return_t			result = KERN_SUCCESS;
	spl_t					s;

	if (thread == THREAD_NULL)
		return (KERN_INVALID_ARGUMENT);

	thread_mtx_lock(thread);
	if (!thread->active) {
		thread_mtx_unlock(thread);

		return (KERN_TERMINATED);
	}

	switch (flavor) {

	case THREAD_EXTENDED_POLICY:
	{
		boolean_t		timeshare = TRUE;

		if (!(*get_default)) {
			s = splsched();
			thread_lock(thread);

			if (	!(thread->sched_mode & TH_MODE_REALTIME)	&&
					!(thread->safe_mode & TH_MODE_REALTIME)			) {
				if (!(thread->sched_mode & TH_MODE_FAILSAFE))
					timeshare = (thread->sched_mode & TH_MODE_TIMESHARE) != 0;
				else
					timeshare = (thread->safe_mode & TH_MODE_TIMESHARE) != 0;
			}
			else
				*get_default = TRUE;

			thread_unlock(thread);
			splx(s);
		}

		if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
			thread_extended_policy_t	info;

			info = (thread_extended_policy_t)policy_info;
			info->timeshare = timeshare;
		}

		break;
	}

	case THREAD_TIME_CONSTRAINT_POLICY:
	{
		thread_time_constraint_policy_t		info;

		if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_time_constraint_policy_t)policy_info;

		if (!(*get_default)) {
			s = splsched();
			thread_lock(thread);

			if (	(thread->sched_mode & TH_MODE_REALTIME)	||
					(thread->safe_mode & TH_MODE_REALTIME)		) {
				info->period = thread->realtime.period;
				info->computation = thread->realtime.computation;
				info->constraint = thread->realtime.constraint;
				info->preemptible = thread->realtime.preemptible;
			}
			else
				*get_default = TRUE;

			thread_unlock(thread);
			splx(s);
		}

		if (*get_default) {
			info->period = 0;
			info->computation = std_quantum / 2;
			info->constraint = std_quantum;
			info->preemptible = TRUE;
		}

		break;
	}

	case THREAD_PRECEDENCE_POLICY:
	{
		thread_precedence_policy_t		info;

		if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_precedence_policy_t)policy_info;

		if (!(*get_default)) {
			s = splsched();
			thread_lock(thread);

			info->importance = thread->importance;

			thread_unlock(thread);
			splx(s);
		}
		else
			info->importance = 0;

		break;
	}

	case THREAD_AFFINITY_POLICY:
	{
		thread_affinity_policy_t		info;

		if (!thread_affinity_is_supported()) {
			result = KERN_NOT_SUPPORTED;
			break;
		}
		if (*count < THREAD_AFFINITY_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_affinity_policy_t)policy_info;

		if (!(*get_default))
			info->affinity_tag = thread_affinity_get(thread);
		else
			info->affinity_tag = THREAD_AFFINITY_TAG_NULL;

		break;
	}

	default:
		result = KERN_INVALID_ARGUMENT;
		break;
	}

	thread_mtx_unlock(thread);

	return (result);
}
コード例 #2
0
kern_return_t
processor_start(
	processor_t			processor)
{
	processor_set_t		pset;
	thread_t			thread;   
	kern_return_t		result;
	spl_t				s;

	if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL)
		return (KERN_INVALID_ARGUMENT);

	if (processor == master_processor) {
		processor_t		prev;

		prev = thread_bind(processor);
		thread_block(THREAD_CONTINUE_NULL);

		result = cpu_start(processor->cpu_id);

		thread_bind(prev);

		return (result);
	}

	s = splsched();
	pset = processor->processor_set;
	pset_lock(pset);
	if (processor->state != PROCESSOR_OFF_LINE) {
		pset_unlock(pset);
		splx(s);

		return (KERN_FAILURE);
	}

	processor->state = PROCESSOR_START;
	pset_unlock(pset);
	splx(s);

	/*
	 *	Create the idle processor thread.
	 */
	if (processor->idle_thread == THREAD_NULL) {
		result = idle_thread_create(processor);
		if (result != KERN_SUCCESS) {
			s = splsched();
			pset_lock(pset);
			processor->state = PROCESSOR_OFF_LINE;
			pset_unlock(pset);
			splx(s);

			return (result);
		}
	}

	/*
	 *	If there is no active thread, the processor
	 *	has never been started.  Create a dedicated
	 *	start up thread.
	 */
	if (	processor->active_thread == THREAD_NULL		&&
			processor->next_thread == THREAD_NULL		) {
		result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
		if (result != KERN_SUCCESS) {
			s = splsched();
			pset_lock(pset);
			processor->state = PROCESSOR_OFF_LINE;
			pset_unlock(pset);
			splx(s);

			return (result);
		}

		s = splsched();
		thread_lock(thread);
		thread->bound_processor = processor;
		processor->next_thread = thread;
		thread->state = TH_RUN;
		thread_unlock(thread);
		splx(s);

		thread_deallocate(thread);
	}

	if (processor->processor_self == IP_NULL)
		ipc_processor_init(processor);

	result = cpu_start(processor->cpu_id);
	if (result != KERN_SUCCESS) {
		s = splsched();
		pset_lock(pset);
		processor->state = PROCESSOR_OFF_LINE;
		pset_unlock(pset);
		splx(s);

		return (result);
	}

	ipc_processor_enable(processor);

	return (KERN_SUCCESS);
}
コード例 #3
0
kern_return_t
thread_info_internal(
	register thread_t		thread,
	thread_flavor_t			flavor,
	thread_info_t			thread_info_out,	/* ptr to OUT array */
	mach_msg_type_number_t	*thread_info_count)	/*IN/OUT*/
{
	int						state, flags;
	spl_t					s;

	if (thread == THREAD_NULL)
		return (KERN_INVALID_ARGUMENT);

	if (flavor == THREAD_BASIC_INFO) {
	    register thread_basic_info_t	basic_info;

	    if (*thread_info_count < THREAD_BASIC_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

	    basic_info = (thread_basic_info_t) thread_info_out;

	    s = splsched();
	    thread_lock(thread);

	    /* fill in info */

	    thread_read_times(thread, &basic_info->user_time,
									&basic_info->system_time);

		/*
		 *	Update lazy-evaluated scheduler info because someone wants it.
		 */
		if (thread->sched_stamp != sched_tick)
			update_priority(thread);

		basic_info->sleep_time = 0;

		/*
		 *	To calculate cpu_usage, first correct for timer rate,
		 *	then for 5/8 ageing.  The correction factor [3/5] is
		 *	(1/(5/8) - 1).
		 */
		basic_info->cpu_usage =	(integer_t)(((uint64_t)thread->cpu_usage
									* TH_USAGE_SCALE) /	sched_tick_interval);
		basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;

		if (basic_info->cpu_usage > TH_USAGE_SCALE)
			basic_info->cpu_usage = TH_USAGE_SCALE;

		basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)?
												POLICY_TIMESHARE: POLICY_RR);

	    flags = 0;
		if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread)
			flags |= TH_FLAGS_IDLE;

	    if (!thread->kernel_stack)
			flags |= TH_FLAGS_SWAPPED;

	    state = 0;
	    if (thread->state & TH_TERMINATE)
			state = TH_STATE_HALTED;
	    else
		if (thread->state & TH_RUN)
			state = TH_STATE_RUNNING;
	    else
		if (thread->state & TH_UNINT)
			state = TH_STATE_UNINTERRUPTIBLE;
	    else
		if (thread->state & TH_SUSP)
			state = TH_STATE_STOPPED;
	    else
		if (thread->state & TH_WAIT)
			state = TH_STATE_WAITING;

	    basic_info->run_state = state;
	    basic_info->flags = flags;

	    basic_info->suspend_count = thread->user_stop_count;

	    thread_unlock(thread);
	    splx(s);

	    *thread_info_count = THREAD_BASIC_INFO_COUNT;

	    return (KERN_SUCCESS);
	}
	else
	if (flavor == THREAD_IDENTIFIER_INFO) {
	    register thread_identifier_info_t	identifier_info;

	    if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

	    identifier_info = (thread_identifier_info_t) thread_info_out;

	    s = splsched();
	    thread_lock(thread);

	    identifier_info->thread_id = thread->thread_id;
#if defined(__ppc__) || defined(__arm__)
	    identifier_info->thread_handle = thread->machine.cthread_self;
#else
	    identifier_info->thread_handle = thread->machine.pcb->cthread_self;
#endif
	    if(thread->task->bsd_info) {
	    	identifier_info->dispatch_qaddr =  identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info);
	    } else {
		    thread_unlock(thread);
		    splx(s);
		    return KERN_INVALID_ARGUMENT;
	    }

	    thread_unlock(thread);
	    splx(s);
	    return KERN_SUCCESS;
	}
	else
	if (flavor == THREAD_SCHED_TIMESHARE_INFO) {
		policy_timeshare_info_t		ts_info;

		if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

		ts_info = (policy_timeshare_info_t)thread_info_out;

	    s = splsched();
		thread_lock(thread);

	    if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
	    	thread_unlock(thread);
			splx(s);

			return (KERN_INVALID_POLICY);
	    }

		ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
		if (ts_info->depressed) {
			ts_info->base_priority = DEPRESSPRI;
			ts_info->depress_priority = thread->priority;
		}
		else {
			ts_info->base_priority = thread->priority;
			ts_info->depress_priority = -1;
		}

		ts_info->cur_priority = thread->sched_pri;
		ts_info->max_priority =	thread->max_priority;

		thread_unlock(thread);
	    splx(s);

		*thread_info_count = POLICY_TIMESHARE_INFO_COUNT;

		return (KERN_SUCCESS);	
	}
	else
	if (flavor == THREAD_SCHED_FIFO_INFO) {
		if (*thread_info_count < POLICY_FIFO_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

		return (KERN_INVALID_POLICY);
	}
	else
	if (flavor == THREAD_SCHED_RR_INFO) {
		policy_rr_info_t			rr_info;

		if (*thread_info_count < POLICY_RR_INFO_COUNT)
			return (KERN_INVALID_ARGUMENT);

		rr_info = (policy_rr_info_t) thread_info_out;

	    s = splsched();
		thread_lock(thread);

	    if (thread->sched_mode & TH_MODE_TIMESHARE) {
	    	thread_unlock(thread);
			splx(s);

			return (KERN_INVALID_POLICY);
	    }

		rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0;
		if (rr_info->depressed) {
			rr_info->base_priority = DEPRESSPRI;
			rr_info->depress_priority = thread->priority;
		}
		else {
			rr_info->base_priority = thread->priority;
			rr_info->depress_priority = -1;
		}

		rr_info->max_priority = thread->max_priority;
	    rr_info->quantum = std_quantum_us / 1000;

		thread_unlock(thread);
	    splx(s);

		*thread_info_count = POLICY_RR_INFO_COUNT;

		return (KERN_SUCCESS);	
	}

	return (KERN_INVALID_ARGUMENT);
}
コード例 #4
0
ファイル: ipc_mqueue.c プロジェクト: Apple-FOSS-Mirror/xnu
/*
 *	Routine:	ipc_mqueue_post
 *	Purpose:
 *		Post a message to a waiting receiver or enqueue it.  If a
 *		receiver is waiting, we can release our reserved space in
 *		the message queue.
 *
 *	Conditions:
 *		If we need to queue, our space in the message queue is reserved.
 */
void
ipc_mqueue_post(
	register ipc_mqueue_t 	mqueue,
	register ipc_kmsg_t		kmsg)
{
	spl_t s;

	/*
	 *	While the msg queue	is locked, we have control of the
	 *  kmsg, so the ref in	it for the port is still good.
	 *
	 *	Check for a receiver for the message.
	 */
	s = splsched();
	imq_lock(mqueue);
	for (;;) {
		wait_queue_t waitq = &mqueue->imq_wait_queue;
		thread_t receiver;
		mach_msg_size_t msize;

		receiver = wait_queue_wakeup64_identity_locked(
							waitq,
							IPC_MQUEUE_RECEIVE,
							THREAD_AWAKENED,
							FALSE);
		/* waitq still locked, thread locked */

		if (receiver == THREAD_NULL) {
			/* 
			 * no receivers; queue kmsg
			 */
			assert(mqueue->imq_msgcount > 0);
			ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
			break;
		}
	
		/*
		 * If the receiver waited with a facility not directly
		 * related to Mach messaging, then it isn't prepared to get
		 * handed the message directly.  Just set it running, and
		 * go look for another thread that can.
		 */
		if (receiver->ith_state != MACH_RCV_IN_PROGRESS) {
				  thread_unlock(receiver);
				  continue;
		}

	
		/*
		 * We found a waiting thread.
		 * If the message is too large or the scatter list is too small
		 * the thread we wake up will get that as its status.
		 */
		msize =	ipc_kmsg_copyout_size(kmsg, receiver->map);
		if (receiver->ith_msize <
				(msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver), receiver->ith_option))) {
			receiver->ith_msize = msize;
			receiver->ith_state = MACH_RCV_TOO_LARGE;
		} else {
			receiver->ith_state = MACH_MSG_SUCCESS;
		}

		/*
		 * If there is no problem with the upcoming receive, or the
		 * receiver thread didn't specifically ask for special too
		 * large error condition, go ahead and select it anyway.
		 */
		if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
		    !(receiver->ith_option & MACH_RCV_LARGE)) {

			receiver->ith_kmsg = kmsg;
			receiver->ith_seqno = mqueue->imq_seqno++;
			thread_unlock(receiver);

			/* we didn't need our reserved spot in the queue */
			ipc_mqueue_release_msgcount(mqueue);
			break;
		}

		/*
		 * Otherwise, this thread needs to be released to run
		 * and handle its error without getting the message.  We
		 * need to go back and pick another one.
		 */
		receiver->ith_receiver_name = mqueue->imq_receiver_name;
		receiver->ith_kmsg = IKM_NULL;
		receiver->ith_seqno = 0;
		thread_unlock(receiver);
	}

	imq_unlock(mqueue);
	splx(s);
	
	current_task()->messages_sent++;
	return;
}
コード例 #5
0
/*
 *	Routine:	semaphore_signal_internal
 *
 *		Signals the semaphore as direct.  
 *	Assumptions:
 *		Semaphore is locked.
 */
kern_return_t
semaphore_signal_internal(
	semaphore_t		semaphore,
	thread_t		thread,
	int				options)
{
	kern_return_t kr;
	spl_t  spl_level;

	spl_level = splsched();
	semaphore_lock(semaphore);

	if (!semaphore->active) {
		semaphore_unlock(semaphore);
		splx(spl_level);
		return KERN_TERMINATED;
	}

	if (thread != THREAD_NULL) {
		if (semaphore->count < 0) {
			kr = wait_queue_wakeup64_thread_locked(
			        	&semaphore->wait_queue,
					SEMAPHORE_EVENT,
					thread,
					THREAD_AWAKENED,
					TRUE);  /* unlock? */
		} else {
			semaphore_unlock(semaphore);
			kr = KERN_NOT_WAITING;
		}
		splx(spl_level);
		return kr;
	} 

	if (options & SEMAPHORE_SIGNAL_ALL) {
		int old_count = semaphore->count;

		if (old_count < 0) {
			semaphore->count = 0;  /* always reset */
			kr = wait_queue_wakeup64_all_locked(
					&semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED,
					TRUE);		/* unlock? */
		} else {
			if (options & SEMAPHORE_SIGNAL_PREPOST)
				semaphore->count++;
			semaphore_unlock(semaphore);
			kr = KERN_SUCCESS;
		}
		splx(spl_level);
		return kr;
	}
	
	if (semaphore->count < 0) {
		if (wait_queue_wakeup64_one_locked(
					&semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED,
					FALSE) == KERN_SUCCESS) {
			semaphore_unlock(semaphore);
			splx(spl_level);
			return KERN_SUCCESS;
		} else
			semaphore->count = 0;  /* all waiters gone */
	}

	if (options & SEMAPHORE_SIGNAL_PREPOST) {
		semaphore->count++;
	}

	semaphore_unlock(semaphore);
	splx(spl_level);
	return KERN_NOT_WAITING;
}
コード例 #6
0
ファイル: sync_lock.c プロジェクト: CptFrazz/xnu
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}
コード例 #7
0
ファイル: startup.c プロジェクト: rohsaini/mkunity
/*
 * Now running in a thread.  Create the rest of the kernel threads
 * and the bootstrap task.
 */
void
start_kernel_threads(void)
{
	register int	i;

	/*
	 *	Create the idle threads and the other
	 *	service threads.
	 */
	for (i = 0; i < NCPUS; i++) {
	    if (machine_slot[i].is_cpu) {
		thread_t	th;
		spl_t	s;
		processor_t processor = cpu_to_processor(i);

		(void) thread_create_at(kernel_task, &th, idle_thread);
		s=splsched();
		thread_lock(th);
		thread_bind_locked(th, processor);
		processor->idle_thread = th;
		/*(void) thread_resume(th->top_act);*/
		th->state |= TH_RUN;
		thread_setrun( th, TRUE, TAIL_Q);
		thread_unlock( th );
		splx(s);
	    }
	}

	(void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
#if	THREAD_SWAPPER
	(void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
	(void) kernel_thread(kernel_task, swapout_thread, (char *) 0);
#endif	/* THREAD_SWAPPER */
#if	TASK_SWAPPER
	if (task_swap_on) {
		(void) kernel_thread(kernel_task, task_swapper, (char *) 0);
		(void) kernel_thread(kernel_task, task_swap_swapout_thread,
				     (char *) 0);
	}
#endif	/* TASK_SWAPPER */
	(void) kernel_thread(kernel_task, sched_thread, (char *) 0);
	(void) kernel_thread(kernel_task, timeout_thread, (char *) 0);
#if	NORMA_VM
	(void) kernel_thread(kernel_task, vm_object_thread, (char *) 0);
#endif	/* NORMA_VM */

	/*
	 *	Create the clock service.
	 */
	clock_service_create();

	/*
	 *	Create the device service.
	 */
	device_service_create();

	/*
	 *	Initialize distributed services, starting
	 *	with distributed IPC and progressing to any
	 *	services layered on top of that.
	 *
	 *	This stub exists even in non-NORMA systems.
	 */
	norma_bootstrap();

	/*
	 *	Initialize any testing services blocking the main kernel
	 *	thread so that the in-kernel tests run without interference
	 *	from other boot time activities. We will resume this thread
	 *	in kernel_test_thread(). 
	 */

#if	KERNEL_TEST

	/*
	 *	Initialize the lock that will be used to guard
	 *	variables that will be used in the test synchronization
	 *	scheme.
	 */ 
		simple_lock_init(&kernel_test_lock, ETAP_MISC_KERNEL_TEST);

#if	PARAGON860
	{
		char		*s;
		unsigned int	firstnode;

		/*
		 *	Only start up loopback tests on boot node.
		 */
		if ((s = (char *) getbootenv("BOOT_FIRST_NODE")) == 0)
			panic("startup");
		firstnode = atoi(s);
		(void) kernel_thread(kernel_task, kernel_test_thread,
				     (char * )(dipc_node_self() == 
						       (node_name) firstnode));
	}
#else	/* PARAGON860 */
	(void) kernel_thread(kernel_task, kernel_test_thread, (char *) 0);
#endif	/* PARAGON860 */
	{

		/*
		 *	The synchronization scheme uses a simple lock, two
		 *	booleans and the wakeup event. The wakeup event will
		 *	be posted by kernel_test_thread().
		 */
		spl_t s;
		s = splsched();
		simple_lock(&kernel_test_lock);
		while(!kernel_test_thread_sync_done){
			assert_wait((event_t) &start_kernel_threads, FALSE);
			start_kernel_threads_blocked = TRUE;
			simple_unlock(&kernel_test_lock);
			splx(s);
			thread_block((void (*)(void)) 0);
			s = splsched();
			simple_lock(&kernel_test_lock);
			start_kernel_threads_blocked = FALSE;
		}
		kernel_test_thread_sync_done = FALSE; /* Reset for next use */
		simple_unlock(&kernel_test_lock);
		splx(s);
	}


#endif	/* KERNEL_TEST */
	/*
	 *	Start the user bootstrap.
	 */
	bootstrap_create();

#if	XPR_DEBUG
	xprinit();		/* XXX */
#endif	/* XPR_DEBUG */

#if	NCPUS > 1
	/*
	 *	Create the shutdown thread.
	 */
	(void) kernel_thread(kernel_task, action_thread, (char *) 0);

	/*
	 *	Allow other CPUs to run.
	 *
	 * (this must be last, to allow bootstrap_create to fiddle with
	 *  its child thread before some cpu tries to run it)
	 */
	start_other_cpus();
#endif	/* NCPUS > 1 */

	/*
	 *	Become the pageout daemon.
	 */
	(void) spllo();
	vm_pageout();
	/*NOTREACHED*/
}
コード例 #8
0
ファイル: locks.c プロジェクト: MACasuba/MACasuba-Utils-git
/*
 * Routine: 	lck_mtx_lock_wait
 *
 * Invoked in order to wait on contention.
 *
 * Called with the interlock locked and
 * returns it unlocked.
 */
void
lck_mtx_lock_wait (
	lck_mtx_t			*lck,
	thread_t			holder)
{
	thread_t		self = current_thread();
	lck_mtx_t		*mutex;
	integer_t		priority;
	spl_t			s = splsched();
#if	CONFIG_DTRACE
	uint64_t		sleep_start = 0;

	if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
		sleep_start = mach_absolute_time();
	}
#endif

	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
		mutex = lck;
	else
		mutex = &lck->lck_mtx_ptr->lck_mtx;

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);

	priority = self->sched_pri;
	if (priority < self->priority)
		priority = self->priority;
	if (priority < BASEPRI_DEFAULT)
		priority = BASEPRI_DEFAULT;

	thread_lock(holder);
	if (mutex->lck_mtx_pri == 0)
		holder->promotions++;
	holder->sched_mode |= TH_MODE_PROMOTED;
	if (		mutex->lck_mtx_pri < priority	&&
				holder->sched_pri < priority		) {
		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
					holder->sched_pri, priority, (int)holder, (int)lck, 0);

		set_sched_pri(holder, priority);
	}
	thread_unlock(holder);
	splx(s);

	if (mutex->lck_mtx_pri < priority)
		mutex->lck_mtx_pri = priority;
	if (self->pending_promoter[self->pending_promoter_index] == NULL) {
		self->pending_promoter[self->pending_promoter_index] = mutex;
		mutex->lck_mtx_waiters++;
	}
	else
	if (self->pending_promoter[self->pending_promoter_index] != mutex) {
		self->pending_promoter[++self->pending_promoter_index] = mutex;
		mutex->lck_mtx_waiters++;
	}

	assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
	lck_mtx_ilk_unlock(mutex);

	thread_block(THREAD_CONTINUE_NULL);

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
#if	CONFIG_DTRACE
	/*
	 * Record the Dtrace lockstat probe for blocking, block time
	 * measured from when we were entered.
	 */
	if (sleep_start) {
		if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
			LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
			    mach_absolute_time() - sleep_start);
		} else {
			LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
			    mach_absolute_time() - sleep_start);
		}
	}
#endif
}
コード例 #9
0
ファイル: task_swap.c プロジェクト: rohsaini/mkunity
/*
 *	Process an AST_SWAPOUT.
 */
void
swapout_ast()
{
	spl_t		s;
	thread_act_t	act;
	thread_t	thread;

	act = current_act();

	/*
	 * Task is being swapped out.  First mark it as suspended
	 * and halted, then call thread_swapout_enqueue to put
	 * the thread on the queue for task_swap_swapout_threads
	 * to swap out the thread.
	 */
	/*
	 * Don't swap unswappable threads
	 */
	thread = act_lock_thread(act);
	s = splsched();
	if (thread)
		thread_lock(thread);
	if ((act->ast & AST_SWAPOUT) == 0) {
		/*
		 * Race with task_swapin. Abort swapout.
		 */
		task_swap_ast_aborted++;	/* not locked XXX */
		if (thread)
			thread_unlock(thread);
		splx(s);
		act_unlock_thread(act);
	} else if (act->swap_state == TH_SW_IN) {
		/*
		 * Mark swap_state as TH_SW_TASK_SWAPPING to avoid
		 * race with thread swapper, which will only
		 * swap thread if swap_state is TH_SW_IN.
		 * This way, the thread can only be swapped by
		 * the task swapping mechanism.
		 */
		act->swap_state |= TH_SW_TASK_SWAPPING;
		/* assert(act->suspend_count == 0); XXX ? */
		if (thread)
			thread_unlock(thread);
		if (act->suspend_count++ == 0)	/* inline thread_hold */
			install_special_handler(act);
		/* self->state |= TH_HALTED; */
		thread_ast_clear(act, AST_SWAPOUT);
		/*
		 * Initialize the swap_queue fields to allow an extra
		 * queue_remove() in task_swapin if we lose the race
		 * (task_swapin can be called before we complete
		 * thread_swapout_enqueue).
		 */
		queue_init((queue_t) &act->swap_queue);
		splx(s);
		act_unlock_thread(act);
		/* this must be called at normal interrupt level */
		thread_swapout_enqueue(act);
	} else {
		/* thread isn't swappable; continue running */
		assert(act->swap_state == TH_SW_UNSWAPPABLE);
		if (thread)
			thread_unlock(thread);
		thread_ast_clear(act, AST_SWAPOUT);
		splx(s);
		act_unlock_thread(act);
	}
}
コード例 #10
0
ファイル: sync_sema.c プロジェクト: JackieXie168/xnu
/*
 *	Routine:	semaphore_signal_internal
 *
 *		Signals the semaphore as direct.  
 *	Assumptions:
 *		Semaphore is locked.
 */
kern_return_t
semaphore_signal_internal(
	semaphore_t		semaphore,
	thread_t		thread,
	int				options)
{
	kern_return_t kr;
	spl_t  spl_level;

	spl_level = splsched();
	semaphore_lock(semaphore);

	if (!semaphore->active) {
		semaphore_unlock(semaphore);
		splx(spl_level);
		return KERN_TERMINATED;
	}

	if (thread != THREAD_NULL) {
		if (semaphore->count < 0) {
			kr = waitq_wakeup64_thread_locked(
					&semaphore->waitq,
					SEMAPHORE_EVENT,
					thread,
					THREAD_AWAKENED,
					WAITQ_UNLOCK);
			/* waitq/semaphore is unlocked */
		} else {
			kr = KERN_NOT_WAITING;
			semaphore_unlock(semaphore);
		}
		splx(spl_level);
		return kr;
	} 

	if (options & SEMAPHORE_SIGNAL_ALL) {
		int old_count = semaphore->count;

		kr = KERN_NOT_WAITING;
		if (old_count < 0) {
			semaphore->count = 0;  /* always reset */
			kr = waitq_wakeup64_all_locked(
					&semaphore->waitq,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED, NULL,
					WAITQ_ALL_PRIORITIES,
					WAITQ_UNLOCK);
			/* waitq / semaphore is unlocked */
		} else {
			if (options & SEMAPHORE_SIGNAL_PREPOST)
				semaphore->count++;
			kr = KERN_SUCCESS;
			semaphore_unlock(semaphore);
		}
		splx(spl_level);
		return kr;
	}
	
	if (semaphore->count < 0) {
		kr = waitq_wakeup64_one_locked(
					&semaphore->waitq,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED, NULL,
					WAITQ_ALL_PRIORITIES,
					WAITQ_KEEP_LOCKED);
		if (kr == KERN_SUCCESS) {
			semaphore_unlock(semaphore);
			splx(spl_level);
			return KERN_SUCCESS;
		} else {
			semaphore->count = 0;  /* all waiters gone */
		}
	}

	if (options & SEMAPHORE_SIGNAL_PREPOST) {
		semaphore->count++;
	}

	semaphore_unlock(semaphore);
	splx(spl_level);
	return KERN_NOT_WAITING;
}
コード例 #11
0
ファイル: machine.c プロジェクト: JackieXie168/xnu
kern_return_t
processor_shutdown(
	processor_t			processor)
{
	processor_set_t		pset;
	spl_t				s;

	s = splsched();
	pset = processor->processor_set;
	pset_lock(pset);
	if (processor->state == PROCESSOR_OFF_LINE) {
		/*
		 * Success if already shutdown.
		 */
		pset_unlock(pset);
		splx(s);

		return (KERN_SUCCESS);
	}

	if (processor->state == PROCESSOR_START) {
		/*
		 * Failure if currently being started.
		 */
		pset_unlock(pset);
		splx(s);

		return (KERN_FAILURE);
	}

	/*
	 * If the processor is dispatching, let it finish.
	 */
	while (processor->state == PROCESSOR_DISPATCHING) {
		pset_unlock(pset);
		splx(s);
		delay(1);
		s = splsched();
		pset_lock(pset);
	}

	/*
	 * Success if already being shutdown.
	 */
	if (processor->state == PROCESSOR_SHUTDOWN) {
		pset_unlock(pset);
		splx(s);

		return (KERN_SUCCESS);
	}

	if (processor->state == PROCESSOR_IDLE)
		remqueue((queue_entry_t)processor);
	else
	if (processor->state == PROCESSOR_RUNNING)
		remqueue((queue_entry_t)processor);

	processor->state = PROCESSOR_SHUTDOWN;

	pset_unlock(pset);

	processor_doshutdown(processor);
	splx(s);

	cpu_exit_wait(processor->cpu_id);

	return (KERN_SUCCESS);
}
コード例 #12
0
ファイル: mach_port.c プロジェクト: OpenDarwin-CVS/SEDarwin
kern_return_t
mach_port_get_attributes(
	ipc_space_t		space,
	mach_port_name_t	name,
	int			flavor,
        mach_port_info_t	info,
        mach_msg_type_number_t	*count)
{
	ipc_port_t port;
	kern_return_t kr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

        switch (flavor) {
        case MACH_PORT_LIMITS_INFO: {
                mach_port_limits_t *lp = (mach_port_limits_t *)info;

                if (*count < MACH_PORT_LIMITS_INFO_COUNT)
                        return KERN_FAILURE;

                if (!MACH_PORT_VALID(name)) {
			*count = 0;
			break;
		}
			
                kr = ipc_port_translate_receive(space, name, &port);
                if (kr != KERN_SUCCESS)
                        return kr;
                /* port is locked and active */

                lp->mpl_qlimit = port->ip_messages.imq_qlimit;
                *count = MACH_PORT_LIMITS_INFO_COUNT;
                ip_unlock(port);
                break;
        }

        case MACH_PORT_RECEIVE_STATUS: {
                mach_port_status_t *statusp = (mach_port_status_t *)info;
		spl_t s;
                
                if (*count < MACH_PORT_RECEIVE_STATUS_COUNT)
                        return KERN_FAILURE;
                
		if (!MACH_PORT_VALID(name))
			return KERN_INVALID_RIGHT;

                kr = ipc_port_translate_receive(space, name, &port);
                if (kr != KERN_SUCCESS)
                        return kr;
                /* port is locked and active */

		statusp->mps_pset = port->ip_pset_count;

		s = splsched();
		imq_lock(&port->ip_messages);
		statusp->mps_seqno = port->ip_messages.imq_seqno;
                statusp->mps_qlimit = port->ip_messages.imq_qlimit;
                statusp->mps_msgcount = port->ip_messages.imq_msgcount;
		imq_unlock(&port->ip_messages);
		splx(s);

                statusp->mps_mscount = port->ip_mscount;
                statusp->mps_sorights = port->ip_sorights;
                statusp->mps_srights = port->ip_srights > 0;
                statusp->mps_pdrequest = port->ip_pdrequest != IP_NULL;
                statusp->mps_nsrequest = port->ip_nsrequest != IP_NULL;
		statusp->mps_flags = 0;

                *count = MACH_PORT_RECEIVE_STATUS_COUNT;
                ip_unlock(port);
                break;
        }
	
	case MACH_PORT_DNREQUESTS_SIZE: {
		ipc_port_request_t	table;
		
                if (*count < MACH_PORT_DNREQUESTS_SIZE_COUNT)
                        return KERN_FAILURE;

		if (!MACH_PORT_VALID(name)) {
			*(int *)info = 0;
			break;
		}

                kr = ipc_port_translate_receive(space, name, &port);
                if (kr != KERN_SUCCESS)
                        return kr;
                /* port is locked and active */
		
		table = port->ip_dnrequests;
		if (table == IPR_NULL)
			*(int *)info = 0;
		else
			*(int *)info = table->ipr_size->its_size;
                *count = MACH_PORT_DNREQUESTS_SIZE_COUNT;
                ip_unlock(port);
		break;
	}

        default:
		return KERN_INVALID_ARGUMENT;
                /*NOTREACHED*/
        }                

	return KERN_SUCCESS;
}
コード例 #13
0
/*
 *	thread_switch:
 *
 *	Context switch.  User may supply thread hint.
 *
 *	Fixed priority threads that call this get what they asked for
 *	even if that violates priority order.
 */
kern_return_t thread_switch(
	mach_port_t 		thread_name,
	int 			option,
	mach_msg_timeout_t 	option_time)
{
    thread_t			cur_thread = current_thread();
    processor_t			myprocessor;
    ipc_port_t			port;

    /*
     *	Process option.
     */
    switch (option) {
	case SWITCH_OPTION_NONE:
	    /*
	     *	Nothing to do.
	     */
	    break;

	case SWITCH_OPTION_DEPRESS:
	    /*
	     *	Depress priority for given time.
	     */
	    thread_depress_priority(cur_thread, option_time);
	    break;

	case SWITCH_OPTION_WAIT:
	    thread_will_wait_with_timeout(cur_thread, option_time);
	    break;

	default:
	    return(KERN_INVALID_ARGUMENT);
    }

#ifndef MIGRATING_THREADS /* XXX thread_run defunct */
    /*
     *	Check and act on thread hint if appropriate.
     */
    if ((thread_name != 0) &&
	(ipc_port_translate_send(cur_thread->task->itk_space,
				 thread_name, &port) == KERN_SUCCESS)) {
	    /* port is locked, but it might not be active */

	    /*
	     *	Get corresponding thread.
	     */
	    if (ip_active(port) && (ip_kotype(port) == IKOT_THREAD)) {
		thread_t thread;
		spl_t s;

		thread = (thread_t) port->ip_kobject;
		/*
		 *	Check if the thread is in the right pset. Then
		 *	pull it off its run queue.  If it
		 *	doesn't come, then it's not eligible.
		 */
		s = splsched();
		thread_lock(thread);
		if ((thread->processor_set == cur_thread->processor_set)
		    && (rem_runq(thread) != RUN_QUEUE_NULL)) {
			/*
			 *	Hah, got it!!
			 */
			thread_unlock(thread);
			(void) splx(s);
			ip_unlock(port);
			/* XXX thread might disappear on us now? */
#if	MACH_FIXPRI
			if (thread->policy == POLICY_FIXEDPRI) {
			    myprocessor = current_processor();
			    myprocessor->quantum = thread->sched_data;
			    myprocessor->first_quantum = TRUE;
			}
#endif	/* MACH_FIXPRI */
			counter(c_thread_switch_handoff++);
			thread_run(thread_switch_continue, thread);
			/*
			 *  Restore depressed priority
			 */
			if (cur_thread->depress_priority >= 0)
				(void) thread_depress_abort(cur_thread);

			return(KERN_SUCCESS);
		}
		thread_unlock(thread);
		(void) splx(s);
	    }
	    ip_unlock(port);
    }
#endif /* not MIGRATING_THREADS */

    /*
     *	No handoff hint supplied, or hint was wrong.  Call thread_block() in
     *	hopes of running something else.  If nothing else is runnable,
     *	thread_block will detect this.  WARNING: thread_switch with no
     *	option will not do anything useful if the thread calling it is the
     *	highest priority thread (can easily happen with a collection
     *	of timesharing threads).
     */
#if	NCPUS > 1
    myprocessor = current_processor();
    if (myprocessor->processor_set->runq.count > 0 ||
	myprocessor->runq.count > 0)
#endif	/* NCPUS > 1 */
    {
	counter(c_thread_switch_block++);
	thread_block(thread_switch_continue);
    }

    /*
     *  Restore depressed priority
     */
    if (cur_thread->depress_priority >= 0)
	(void) thread_depress_abort(cur_thread);
    return(KERN_SUCCESS);
}
コード例 #14
0
kern_return_t
thread_policy_set(
	thread_t				thread,
	thread_policy_flavor_t	flavor,
	thread_policy_t			policy_info,
	mach_msg_type_number_t	count)
{
	kern_return_t			result = KERN_SUCCESS;
	spl_t					s;

	if (thread == THREAD_NULL)
		return (KERN_INVALID_ARGUMENT);

	thread_mtx_lock(thread);
	if (!thread->active) {
		thread_mtx_unlock(thread);

		return (KERN_TERMINATED);
	}

	if (thread->static_param) {
		thread_mtx_unlock(thread);

		return (KERN_SUCCESS);
	}

	switch (flavor) {

	case THREAD_EXTENDED_POLICY:
	{
		boolean_t				timeshare = TRUE;

		if (count >= THREAD_EXTENDED_POLICY_COUNT) {
			thread_extended_policy_t	info;

			info = (thread_extended_policy_t)policy_info;
			timeshare = info->timeshare;
		}

		s = splsched();
		thread_lock(thread);

		if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
			integer_t	oldmode = (thread->sched_mode & TH_MODE_TIMESHARE);

			thread->sched_mode &= ~TH_MODE_REALTIME;

			if (timeshare && !oldmode) {
				thread->sched_mode |= TH_MODE_TIMESHARE;

				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
					sched_share_incr();
			}
			else
			if (!timeshare && oldmode) {
				thread->sched_mode &= ~TH_MODE_TIMESHARE;

				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
					sched_share_decr();
			}

			thread_recompute_priority(thread);
		}
		else {
			thread->safe_mode &= ~TH_MODE_REALTIME;

			if (timeshare)
				thread->safe_mode |= TH_MODE_TIMESHARE;
			else
				thread->safe_mode &= ~TH_MODE_TIMESHARE;
		}

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_TIME_CONSTRAINT_POLICY:
	{
		thread_time_constraint_policy_t		info;

		if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_time_constraint_policy_t)policy_info;
		if (	info->constraint < info->computation	||
				info->computation > max_rt_quantum		||
				info->computation < min_rt_quantum		) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		s = splsched();
		thread_lock(thread);

		thread->realtime.period = info->period;
		thread->realtime.computation = info->computation;
		thread->realtime.constraint = info->constraint;
		thread->realtime.preemptible = info->preemptible;

		if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
			if (thread->sched_mode & TH_MODE_TIMESHARE) {
				thread->sched_mode &= ~TH_MODE_TIMESHARE;

				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
					sched_share_decr();
			}
			thread->sched_mode |= TH_MODE_REALTIME;
			thread_recompute_priority(thread);
		}
		else {
			thread->safe_mode &= ~TH_MODE_TIMESHARE;
			thread->safe_mode |= TH_MODE_REALTIME;
		}

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_PRECEDENCE_POLICY:
	{
		thread_precedence_policy_t		info;

		if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_precedence_policy_t)policy_info;

		s = splsched();
		thread_lock(thread);

		thread->importance = info->importance;

		thread_recompute_priority(thread);

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_AFFINITY_POLICY:
	{
		thread_affinity_policy_t	info;

		if (!thread_affinity_is_supported()) {
			result = KERN_NOT_SUPPORTED;
			break;
		}
		if (count < THREAD_AFFINITY_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_affinity_policy_t) policy_info;
		/*
		 * Unlock the thread mutex here and
		 * return directly after calling thread_affinity_set().
		 * This is necessary for correct lock ordering because
		 * thread_affinity_set() takes the task lock.
		 */
		thread_mtx_unlock(thread);
		return thread_affinity_set(thread, info->affinity_tag);
	}
	default:
		result = KERN_INVALID_ARGUMENT;
		break;
	}

	thread_mtx_unlock(thread);

	return (result);
}
コード例 #15
0
ファイル: vm_machdep.c プロジェクト: MarginC/kame
void
cpu_exit(struct lwp *l)
{
	(void) splsched();
	switch_exit(l, lwp_exit2);
}
コード例 #16
0
ファイル: task_swap.c プロジェクト: rohsaini/mkunity
/*
 *	task_swapout:
 * 	A reference to the task must be held.
 *
 *	Start swapping out a task by sending an AST_SWAPOUT to each thread.
 *	When the threads reach a clean point, they queue themselves up on the
 *	swapout_thread_q to be swapped out by the task_swap_swapout_thread.
 *	The task can be swapped in at any point in this process.
 *
 *	A task will not be fully swapped out (i.e. its map residence count
 *	at zero) until all currently-swapped threads run and reach
 *	a clean point, at which time they will be swapped again,
 *	decrementing the swap_ast_waiting count on the task.
 *
 *	Locking: no locks held upon entry and exit.
 *		 Task_lock is held throughout this function.
 */
kern_return_t
task_swapout(task_t task)
{
	thread_act_t thr_act;
	thread_t thread;
	queue_head_t *list;
	int s;

	task_swapout_lock();
	task_lock(task);
	/*
	 * NOTE: look into turning these into assertions if they
	 * are invariants.
	 */
	if ((task->swap_state != TASK_SW_IN) || (!task->active)) {
		task_unlock(task);
		task_swapout_unlock();
		return(KERN_FAILURE);
	}
	if (task->swap_flags & TASK_SW_ELIGIBLE) {
		queue_remove(&eligible_tasks, task, task_t, swapped_tasks);
		task->swap_flags &= ~TASK_SW_ELIGIBLE;
	}
	task_swapout_unlock();

	/* set state to avoid races with task_swappable(FALSE) */
	task->swap_state = TASK_SW_GOING_OUT;
	task->swap_rss = pmap_resident_count(task->map->pmap);
	task_swaprss_out += task->swap_rss;
	task->swap_ast_waiting = task->thr_act_count;

	/*
	 * halt all threads in this task:
	 * We don't need the thread list lock for traversal.
	 */
	list = &task->thr_acts;
	thr_act = (thread_act_t) queue_first(list);
	while (!queue_end(list, (queue_entry_t) thr_act)) {
		boolean_t swappable;
		thread_act_t ract;

		thread = act_lock_thread(thr_act);
		s = splsched();
		if (!thread)
			swappable = (thr_act->swap_state != TH_SW_UNSWAPPABLE);
		else {
			thread_lock(thread);
			swappable = TRUE;
			for (ract = thread->top_act; ract; ract = ract->lower)
				if (ract->swap_state == TH_SW_UNSWAPPABLE) {
					swappable = FALSE;
					break;
				}
		}
		if (swappable)
			thread_ast_set(thr_act, AST_SWAPOUT);
		if (thread)
			thread_unlock(thread);
		splx(s);
		assert((thr_act->ast & AST_TERMINATE) == 0);
		act_unlock_thread(thr_act);
		thr_act = (thread_act_t) queue_next(&thr_act->thr_acts);
	}

	task->swap_stamp = sched_tick;
	task->swap_nswap++;
	assert((task->swap_flags&TASK_SW_WANT_IN) == 0);
	/* put task on the queue of swapped out tasks */
	task_swapper_lock();
#if	TASK_SW_DEBUG
	if (task_swap_debug && on_swapped_list(task)) {
		printf("task 0x%X already on list\n", task);
		Debugger("");
	}
#endif	/* TASK_SW_DEBUG */
	queue_enter(&swapped_tasks, task, task_t, swapped_tasks);
	tasks_swapped_out++;
	task_swapouts++;
	task_swapper_unlock();
	task_unlock(task);

	return(KERN_SUCCESS);
}
コード例 #17
0
ファイル: data_device.c プロジェクト: rohsaini/mkunity
/*
 * Manage next request event
 */
void
datadev_request(
    datadev_t ddp)
{
    kern_return_t rc;
    io_req_t ior;
    spl_t s;

    s = splsched();
    mutex_lock(&datadev_lock);
    if (ddp != (datadev_t)0) {
	/*
	 * Queue current request
	 */
	queue_enter(&datadev_wait, ddp, datadev_t, dd_chain);
    }

    /*
     * Try to start next request
     */
    if (queue_empty(&datadev_wait) || datadev_ior == (io_req_t)0) {
	/*
	 * No request or no pending read
	 */
	mutex_unlock(&datadev_lock);
	splx(s);
	return;
    }

    /*
     * Extract first waiting request
     */
    ddp = (datadev_t)queue_first(&datadev_wait);

    /*
     * Extract pending I/O request
     */
    ior = datadev_ior;
    datadev_ior = (io_req_t)0;

    /*
     * Allocate read memory
     */
    if (ior->io_count < ddp->dd_size) {
	/*
	 * Return size error for this request
	 */
	mutex_unlock(&datadev_lock);
	splx(s);
	ior->io_error = D_INVALID_SIZE;

    } else {
	/*
	 * Move waiting request from the waiting queue to the active one.
	 */
	queue_remove(&datadev_wait, ddp, datadev_t, dd_chain);
	queue_enter(&datadev_curr, ddp, datadev_t, dd_chain);
	mutex_unlock(&datadev_lock);
	splx(s);

	/*
	 * Activate the request
	 */
	bcopy(ddp->dd_name, ior->io_data, ddp->dd_size);
	ddp->dd_dev = ior->io_unit;
	ior->io_residual = ior->io_count - ddp->dd_size;
	ior->io_error = D_SUCCESS;
    }

    io_completed(ior, FALSE);
}
コード例 #18
0
ファイル: task_swap.c プロジェクト: rohsaini/mkunity
/*
 *	task_swap_swapout_thread: [exported]
 *
 *	Executes as a separate kernel thread.
 *	Its job is to swap out threads that have been halted by AST_SWAPOUT.
 */
void
task_swap_swapout_thread(void)
{
	thread_act_t thr_act;
	thread_t thread, nthread;
	task_t task;
	int s;

	thread_swappable(current_act(), FALSE);
	stack_privilege(current_thread());

	spllo();

	while (TRUE) {
		task_swapper_lock();
		while (! queue_empty(&swapout_thread_q)) {

			queue_remove_first(&swapout_thread_q, thr_act,
					   thread_act_t, swap_queue);
			/*
			 * If we're racing with task_swapin, we need
			 * to make it safe for it to do remque on the
			 * thread, so make its links point to itself.
			 * Allowing this ugliness is cheaper than 
			 * making task_swapin search the entire queue.
			 */
			act_lock(thr_act);
			queue_init((queue_t) &thr_act->swap_queue);
			act_unlock(thr_act);
			task_swapper_unlock();
			/*
			 * Wait for thread's RUN bit to be deasserted.
			 */
			thread = act_lock_thread(thr_act);
			if (thread == THREAD_NULL)
				act_unlock_thread(thr_act);
			else {
				boolean_t r;

				thread_reference(thread);
				thread_hold(thr_act);
				act_unlock_thread(thr_act);
				r = thread_stop_wait(thread);
				nthread = act_lock_thread(thr_act);
				thread_release(thr_act);
				thread_deallocate(thread);
				act_unlock_thread(thr_act);
				if (!r || nthread != thread) {
					task_swapper_lock();
					continue;
				}
			}
			task = thr_act->task;
			task_lock(task);
			/* 
			 * we can race with swapin, which would set the
			 * state to TASK_SW_IN. 
			 */
			if ((task->swap_state != TASK_SW_OUT) &&
			    (task->swap_state != TASK_SW_GOING_OUT)) {
				task_unlock(task);
				task_swapper_lock();
				TASK_STATS_INCR(task_sw_race_in_won);
				if (thread != THREAD_NULL)
					thread_unstop(thread);
				continue;
			}
			nthread = act_lock_thread(thr_act);
			if (nthread != thread || thr_act->active == FALSE) {
				act_unlock_thread(thr_act);
				task_unlock(task);
				task_swapper_lock();
				TASK_STATS_INCR(task_sw_act_inactive);
				if (thread != THREAD_NULL)
					thread_unstop(thread);
				continue;
			}
			s = splsched();
			if (thread != THREAD_NULL)
				thread_lock(thread);
			/* 
			 * Thread cannot have been swapped out yet because
			 * TH_SW_TASK_SWAPPING was set in AST.  If task_swapin
			 * beat us here, we either wouldn't have found it on
			 * the queue, or the task->swap_state would have
			 * changed.  The synchronization is on the
			 * task's swap_state and the task_lock.
			 * The thread can't be swapped in any other way
			 * because its task has been swapped.
			 */
			assert(thr_act->swap_state & TH_SW_TASK_SWAPPING);
			assert(thread == THREAD_NULL ||
			       !(thread->state & (TH_SWAPPED_OUT|TH_RUN)));
			assert((thr_act->swap_state & TH_SW_STATE) == TH_SW_IN);
			/* assert(thread->state & TH_HALTED); */
			/* this also clears TH_SW_TASK_SWAPPING flag */
			thr_act->swap_state = TH_SW_GOING_OUT;
			if (thread != THREAD_NULL) {
				if (thread->top_act == thr_act) {
					thread->state |= TH_SWAPPED_OUT;
					/*
					 * Once we unlock the task, things can happen
					 * to the thread, so make sure it's consistent
					 * for thread_swapout.
					 */
				}
				thread->ref_count++;
				thread_unlock(thread);
				thread_unstop(thread);
			}
			splx(s);
			act_locked_act_reference(thr_act);
			act_unlock_thread(thr_act);
			task_unlock(task);

			thread_swapout(thr_act);	/* do the work */

			if (thread != THREAD_NULL)
				thread_deallocate(thread);
			act_deallocate(thr_act);
			task_swapper_lock();
		}
		assert_wait((event_t)&swapout_thread_q, FALSE);
		task_swapper_unlock();
		thread_block((void (*)(void)) 0);
	}
}
コード例 #19
0
ファイル: sync_lock.c プロジェクト: CptFrazz/xnu
kern_return_t
lock_handoff (lock_set_t lock_set, int lock_id)
{
	ulock_t   ulock;
	int	  wait_result;


	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;

 retry:
	lock_set_lock(lock_set);

	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	if (ulock->holder != current_thread()) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}
	
	/*
	 *  If the accepting thread (the receiver) is already waiting
	 *  to accept the lock from the handoff thread (the sender),
	 *  then perform the hand-off now.
	 */

	if (ulock->accept_wait) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	thread;
		spl_t		s;

		/*
		 *  See who the lucky devil is, if he is still there waiting.
		 */
		s = splsched();
		wait_queue_lock(wq);
		thread = wait_queue_wakeup64_identity_locked(
					   wq,
					   LOCK_SET_HANDOFF,
					   THREAD_AWAKENED,
					   TRUE);
		/* wait queue unlocked, thread locked */

		/*
		 *  Transfer lock ownership
		 */
		if (thread != THREAD_NULL) {
			/* 
			 * The thread we are transferring to will try
			 * to take the lock on the ulock, and therefore
			 * will wait for us complete the handoff even
			 * through we set the thread running.
			 */
			thread_unlock(thread);
			splx(s);
			
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, thread);
			ulock->accept_wait = FALSE;
			ulock_unlock(ulock);
			return KERN_SUCCESS;
		} else {

			/*
			 * OOPS.  The accepting thread must have been aborted.
			 * and is racing back to clear the flag that says is
			 * waiting for an accept.  He will clear it when we
			 * release the lock, so just fall thru and wait for
			 * the next accept thread (that's the way it is
			 * specified).
			 */
			splx(s);
		}
	}

	/*
	 * Indicate that there is a hand-off thread waiting, and then wait
	 * for an accepting thread.
	 */
	ulock->ho_wait = TRUE;
	wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
			       LOCK_SET_HANDOFF,
			       THREAD_ABORTSAFE, 0);
	ulock_unlock(ulock);

	if (wait_result == THREAD_WAITING)
		wait_result = thread_block(THREAD_CONTINUE_NULL);

	/*
	 *  If the thread was woken-up via some action other than
	 *  lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
	 *  then we need to clear the ulock's handoff state.
	 */
	switch (wait_result) {


	case THREAD_AWAKENED:
		/*
		 * we take the ulock lock to syncronize with the
		 * thread that is accepting ownership.
		 */
		ulock_lock(ulock);
		assert(ulock->holder != current_thread());
		ulock_unlock(ulock);
		return KERN_SUCCESS;

	case THREAD_INTERRUPTED:
		ulock_lock(ulock);
		assert(ulock->holder == current_thread());
		ulock->ho_wait = FALSE;
		ulock_unlock(ulock);
		return KERN_ABORTED;

	case THREAD_RESTART:
		goto retry;
	}

	panic("lock_handoff");
	return KERN_FAILURE;
}
コード例 #20
0
ファイル: task_swap.c プロジェクト: rohsaini/mkunity
kern_return_t
task_swapin(task_t task, boolean_t make_unswappable)
{
	register queue_head_t	*list;
	register thread_act_t	thr_act, next;
	thread_t		thread;
	int			s;
	boolean_t		swappable = TRUE;

	task_lock(task);
	switch (task->swap_state) {
	    case TASK_SW_OUT:
			{
			vm_map_t map = task->map;
			/*
			 * Task has made it all the way out, which means
			 * that vm_map_res_deallocate has been done; set 
			 * state to TASK_SW_COMING_IN, then bring map
			 * back in.  We could actually be racing with
			 * the thread_swapout_enqueue, which does the
			 * vm_map_res_deallocate, but that race is covered.
			 */
			task->swap_state = TASK_SW_COMING_IN;
			assert(task->swap_ast_waiting == 0);
			assert(map->res_count >= 0);
			task_unlock(task);
			mutex_lock(&map->s_lock);
			vm_map_res_reference(map);
			mutex_unlock(&map->s_lock);
			task_lock(task);
			assert(task->swap_state == TASK_SW_COMING_IN);
			}
			break;

	    case TASK_SW_GOING_OUT:
			/*
			 * Task isn't all the way out yet.  There is
			 * still at least one thread not swapped, and
			 * vm_map_res_deallocate has not been done.
			 */
			task->swap_state = TASK_SW_COMING_IN;
			assert(task->swap_ast_waiting > 0 ||
			       (task->swap_ast_waiting == 0 &&
				task->thr_act_count == 0));
			assert(task->map->res_count > 0);
			TASK_STATS_INCR(task_sw_race_going_out);
			break;
	    case TASK_SW_IN:
			assert(task->map->res_count > 0);
#if	TASK_SW_DEBUG
			task_swapper_lock();
			if (task_swap_debug && on_swapped_list(task)) {
				printf("task 0x%X on list, state is SW_IN\n",
					task);
				Debugger("");
			}
			task_swapper_unlock();
#endif	/* TASK_SW_DEBUG */
			TASK_STATS_INCR(task_sw_race_in);
			if (make_unswappable) {
				task->swap_state = TASK_SW_UNSWAPPABLE;
				task_unlock(task);
				task_swapout_ineligible(task);
			} else
				task_unlock(task);
			return(KERN_SUCCESS);
	    case TASK_SW_COMING_IN:
			/* 
			 * Raced with another task_swapin and lost;
			 * wait for other one to complete first
			 */
			assert(task->map->res_count >= 0);
			/*
			 * set MAKE_UNSWAPPABLE so that whoever is swapping
			 * the task in will make it unswappable, and return
			 */
			if (make_unswappable)
				task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE;
			task->swap_flags |= TASK_SW_WANT_IN;
			assert_wait((event_t)&task->swap_state, FALSE);
			task_unlock(task);
			thread_block((void (*)(void)) 0);
			TASK_STATS_INCR(task_sw_race_coming_in);
			return(KERN_SUCCESS);
	    case TASK_SW_UNSWAPPABLE:
			/* 
			 * This can happen, since task_terminate 
			 * unconditionally calls task_swapin.
			 */
			task_unlock(task);
			return(KERN_SUCCESS);
	    default:
			panic("task_swapin bad state");
			break;
	}
	if (make_unswappable)
		task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE;
	assert(task->swap_state == TASK_SW_COMING_IN);
	task_swapper_lock();
#if	TASK_SW_DEBUG
	if (task_swap_debug && !on_swapped_list(task)) {
		printf("task 0x%X not on list\n", task);
		Debugger("");
	}
#endif	/* TASK_SW_DEBUG */
	queue_remove(&swapped_tasks, task, task_t, swapped_tasks);
	tasks_swapped_out--;
	task_swapins++;
	task_swapper_unlock();

	/*
	 * Iterate through all threads for this task and 
	 * release them, as required.  They may not have been swapped
	 * out yet.  The task remains locked throughout.
	 */
	list = &task->thr_acts;
	thr_act = (thread_act_t) queue_first(list);
	while (!queue_end(list, (queue_entry_t) thr_act)) {
		boolean_t need_to_release;
		next = (thread_act_t) queue_next(&thr_act->thr_acts);
		/*
		 * Keep task_swapper_lock across thread handling
		 * to synchronize with task_swap_swapout_thread
		 */
		task_swapper_lock();
		thread = act_lock_thread(thr_act);
		s = splsched();
		if (thr_act->ast & AST_SWAPOUT) {
			/* thread hasn't gotten the AST yet, just clear it */
			thread_ast_clear(thr_act, AST_SWAPOUT);
			need_to_release = FALSE;
			TASK_STATS_INCR(task_sw_before_ast);
			splx(s);
			act_unlock_thread(thr_act);
		} else {
			/*
			 * If AST_SWAPOUT was cleared, then thread_hold,
			 * or equivalent was done.
			 */
			need_to_release = TRUE;
			/*
			 * Thread has hit AST, but it may not have
			 * been dequeued yet, so we need to check.
			 * NOTE: the thread may have been dequeued, but
			 * has not yet been swapped (the task_swapper_lock
			 * has been dropped, but the thread is not yet
			 * locked), and the TH_SW_TASK_SWAPPING flag may 
			 * not have been cleared.  In this case, we will do 
			 * an extra remque, which the task_swap_swapout_thread
			 * has made safe, and clear the flag, which is also
			 * checked by the t_s_s_t before doing the swapout.
			 */
			if (thread)
				thread_lock(thread);
			if (thr_act->swap_state & TH_SW_TASK_SWAPPING) {
				/* 
				 * hasn't yet been dequeued for swapout,
				 * so clear flags and dequeue it first.
				 */
				thr_act->swap_state &= ~TH_SW_TASK_SWAPPING;
				assert(thr_act->thread == THREAD_NULL || 
				       !(thr_act->thread->state &
					 TH_SWAPPED_OUT));
				queue_remove(&swapout_thread_q, thr_act,
					     thread_act_t, swap_queue);
				TASK_STATS_INCR(task_sw_before_swap);
			} else {
				TASK_STATS_INCR(task_sw_after_swap);
				/*
				 * It's possible that the thread was
				 * made unswappable before hitting the
				 * AST, in which case it's still running.
				 */
				if (thr_act->swap_state == TH_SW_UNSWAPPABLE) {
					need_to_release = FALSE;
					TASK_STATS_INCR(task_sw_unswappable);
				}
			}
			if (thread)
				thread_unlock(thread);
			splx(s);
			act_unlock_thread(thr_act);
		}
		task_swapper_unlock();

		/* 
		 * thread_release will swap in the thread if it's been
		 * swapped out.
		 */
		if (need_to_release) {
			act_lock_thread(thr_act);
			thread_release(thr_act);
			act_unlock_thread(thr_act);
		}
		thr_act = next;
	}

	if (task->swap_flags & TASK_SW_MAKE_UNSWAPPABLE) {
		task->swap_flags &= ~TASK_SW_MAKE_UNSWAPPABLE;
		task->swap_state = TASK_SW_UNSWAPPABLE;
		swappable = FALSE;
	} else {
		task->swap_state = TASK_SW_IN;
	}

	task_swaprss_in += pmap_resident_count(task->map->pmap);
	task_swap_total_time += sched_tick - task->swap_stamp;
	/* note when task came back in */
	task->swap_stamp = sched_tick;
	if (task->swap_flags & TASK_SW_WANT_IN) {
		task->swap_flags &= ~TASK_SW_WANT_IN;
		thread_wakeup((event_t)&task->swap_state);
	}
	assert((task->swap_flags & TASK_SW_ELIGIBLE) == 0);
	task_unlock(task);
#if	TASK_SW_DEBUG
	task_swapper_lock();
	if (task_swap_debug && on_swapped_list(task)) {
		printf("task 0x%X on list at end of swap in\n", task);
		Debugger("");
	}
	task_swapper_unlock();
#endif	/* TASK_SW_DEBUG */
	/*
	 * Make the task eligible to be swapped again
	 */
	if (swappable)
		task_swapout_eligible(task);
	return(KERN_SUCCESS);
}
コード例 #21
0
ファイル: ipc_mqueue.c プロジェクト: Apple-FOSS-Mirror/xnu
/*
 *	Routine:	ipc_mqueue_add
 *	Purpose:
 *		Associate the portset's mqueue with the port's mqueue.
 *		This has to be done so that posting the port will wakeup
 *		a portset waiter.  If there are waiters on the portset
 *		mqueue and messages on the port mqueue, try to match them
 *		up now.
 *	Conditions:
 *		May block.
 */
kern_return_t
ipc_mqueue_add(
	ipc_mqueue_t	 port_mqueue,
	ipc_mqueue_t	 set_mqueue,
	wait_queue_link_t wql)
{
	wait_queue_t	 port_waitq = &port_mqueue->imq_wait_queue;
	wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
	ipc_kmsg_queue_t kmsgq;
	ipc_kmsg_t       kmsg, next;
	kern_return_t	 kr;
	spl_t		 s;

	kr = wait_queue_link_noalloc(port_waitq, set_waitq, wql);
	if (kr != KERN_SUCCESS)
		return kr;

	/*
	 * Now that the set has been added to the port, there may be
	 * messages queued on the port and threads waiting on the set
	 * waitq.  Lets get them together.
	 */
	s = splsched();
	imq_lock(port_mqueue);
	kmsgq = &port_mqueue->imq_messages;
	for (kmsg = ipc_kmsg_queue_first(kmsgq);
	     kmsg != IKM_NULL;
	     kmsg = next) {
		next = ipc_kmsg_queue_next(kmsgq, kmsg);

		for (;;) {
			thread_t th;
			mach_msg_size_t msize;

			th = wait_queue_wakeup64_identity_locked(
						port_waitq,
						IPC_MQUEUE_RECEIVE,
						THREAD_AWAKENED,
						FALSE);
			/* waitq/mqueue still locked, thread locked */

			if (th == THREAD_NULL)
				goto leave;

			/*
			 * If the receiver waited with a facility not directly
			 * related to Mach messaging, then it isn't prepared to get
			 * handed the message directly.  Just set it running, and
			 * go look for another thread that can.
			 */
			if (th->ith_state != MACH_RCV_IN_PROGRESS) {
				  thread_unlock(th);
				  continue;
			}

			/*
			 * Found a receiver. see if they can handle the message
			 * correctly (the message is not too large for them, or
			 * they didn't care to be informed that the message was
			 * too large).  If they can't handle it, take them off
			 * the list and let them go back and figure it out and
			 * just move onto the next.
			 */
			msize = ipc_kmsg_copyout_size(kmsg, th->map);
			if (th->ith_msize <
					(msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(th), th->ith_option))) {
				th->ith_state = MACH_RCV_TOO_LARGE;
				th->ith_msize = msize;
				if (th->ith_option & MACH_RCV_LARGE) {
					/*
					 * let him go without message
					 */
					th->ith_receiver_name = port_mqueue->imq_receiver_name;
					th->ith_kmsg = IKM_NULL;
					th->ith_seqno = 0;
					thread_unlock(th);
					continue; /* find another thread */
				}
			} else {
				th->ith_state = MACH_MSG_SUCCESS;
			}

			/*
			 * This thread is going to take this message,
			 * so give it to him.
			 */
			ipc_kmsg_rmqueue(kmsgq, kmsg);
			ipc_mqueue_release_msgcount(port_mqueue);

			th->ith_kmsg = kmsg;
			th->ith_seqno = port_mqueue->imq_seqno++;
			thread_unlock(th);
			break;  /* go to next message */
		}
			
	}
 leave:
	imq_unlock(port_mqueue);
	splx(s);
	return KERN_SUCCESS;
}
コード例 #22
0
ファイル: task_swap.c プロジェクト: rohsaini/mkunity
task_t
pick_outtask(void)
{
	register task_t		task;
	register task_t		target_task = TASK_NULL;
	unsigned long		task_rss;
	unsigned long		target_rss = 0;
	boolean_t		wired;
	boolean_t		active;
	int			nactive = 0;

	task_swapout_lock();
	if (queue_empty(&eligible_tasks)) {
		/* not likely to happen */
		task_swapout_unlock();
		return(TASK_NULL);
	}
	task = (task_t)queue_first(&eligible_tasks);
	while (!queue_end(&eligible_tasks, (queue_entry_t)task)) {
		int s;
		register thread_act_t thr_act;
		thread_t th;
		

		task_lock(task);
#if	MACH_RT
		/*
		 * Don't swap real-time tasks.
		 * XXX Should we enforce that or can we let really critical
		 * tasks use task_swappable() to make sure they never end up
		 * n the eligible list ?
		 */
		if (task->policy & POLICYCLASS_FIXEDPRI) {
			goto tryagain;
		}
#endif	/* MACH_RT */
		if (!task->active) {
			TASK_STATS_INCR(inactive_task_count);
			goto tryagain;
		}
		if (task->res_act_count == 0) {
			TASK_STATS_INCR(empty_task_count);
			goto tryagain;
		}
		assert(!queue_empty(&task->thr_acts));
		thr_act = (thread_act_t)queue_first(&task->thr_acts);
		active = FALSE;
		th = act_lock_thread(thr_act);
		s = splsched();
		if (th != THREAD_NULL)
			thread_lock(th);
		if ((th == THREAD_NULL) ||
		    (th->state == TH_RUN) ||
		    (th->state & TH_WAIT)) {
			/*
		 	 * thread is "active": either runnable 
			 * or sleeping.  Count it and examine 
			 * it further below.
	 		 */
			nactive++;
			active = TRUE;
		}
		if (th != THREAD_NULL)
			thread_unlock(th);
		splx(s);
		act_unlock_thread(thr_act);
		if (active &&
		    (task->swap_state == TASK_SW_IN) &&
		    ((sched_tick - task->swap_stamp) > min_res_time)) {
			long rescount = pmap_resident_count(task->map->pmap);
			/*
			 * thread must be "active", task must be swapped
			 * in and resident for at least min_res_time
			 */
#if 0
/* DEBUG Test round-robin strategy.  Picking biggest task could cause extreme
 * unfairness to such large interactive programs as xterm.  Instead, pick the
 * first task that has any pages resident:
 */
			if (rescount > 1) {
				task->ref_count++;
				target_task = task;
				task_unlock(task);
				task_swapout_unlock();
				return(target_task);
			}
#else
			if (rescount > target_rss) {
				/*
				 * task is not swapped, and it has the
				 * largest rss seen so far.
				 */
				task->ref_count++;
				target_rss = rescount;
				assert(target_task != task);
				if (target_task != TASK_NULL)
					task_deallocate(target_task);
				target_task = task;
			}
#endif
		}
tryagain:
		task_unlock(task);
		task = (task_t)queue_next(&task->swapped_tasks);
	}
	task_swapout_unlock();
	/* only swap out if there are at least min_active_tasks */
	if (nactive < min_active_tasks) {
		if (target_task != TASK_NULL) {
			task_deallocate(target_task);
			target_task = TASK_NULL;
		}
	}
	return(target_task);
}
コード例 #23
0
ファイル: ipc_mqueue.c プロジェクト: Apple-FOSS-Mirror/xnu
wait_result_t
ipc_mqueue_receive_on_thread(
        ipc_mqueue_t            mqueue,
	mach_msg_option_t       option,
	mach_msg_size_t         max_size,
	mach_msg_timeout_t      rcv_timeout,
	int                     interruptible,
	thread_t                thread)
{
	ipc_kmsg_queue_t        kmsgs;
	wait_result_t           wresult;
	uint64_t		deadline;
	spl_t                   s;

	s = splsched();
	imq_lock(mqueue);
	
	if (imq_is_set(mqueue)) {
		queue_t q;

		q = &mqueue->imq_preposts;

		/*
		 * If we are waiting on a portset mqueue, we need to see if
		 * any of the member ports have work for us.  Ports that
		 * have (or recently had) messages will be linked in the
		 * prepost queue for the portset. By holding the portset's
		 * mqueue lock during the search, we tie up any attempts by
		 * mqueue_deliver or portset membership changes that may
		 * cross our path.
		 */
	search_set:
		while(!queue_empty(q)) {
			wait_queue_link_t wql;
			ipc_mqueue_t port_mq;

			queue_remove_first(q, wql, wait_queue_link_t, wql_preposts);
			assert(!wql_is_preposted(wql));

			/*
			 * This is a lock order violation, so we have to do it
			 * "softly," putting the link back on the prepost list
			 * if it fails (at the tail is fine since the order of
			 * handling messages from different sources in a set is
			 * not guaranteed and we'd like to skip to the next source
			 * if one is available).
			 */
			port_mq = (ipc_mqueue_t)wql->wql_queue;
			if (!imq_lock_try(port_mq)) {
				queue_enter(q, wql, wait_queue_link_t, wql_preposts);
				imq_unlock(mqueue);
				splx(s);
				mutex_pause(0);
				s = splsched();
				imq_lock(mqueue);
				goto search_set; /* start again at beginning - SMP */
			}

			/*
			 * If there are no messages on this queue, just skip it
			 * (we already removed the link from the set's prepost queue).
			 */
			kmsgs = &port_mq->imq_messages;
			if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) {
				imq_unlock(port_mq);
				continue;
			}

			/*
			 * There are messages, so reinsert the link back
			 * at the tail of the preposted queue (for fairness)
			 * while we still have the portset mqueue locked.
			 */
			queue_enter(q, wql, wait_queue_link_t, wql_preposts);
			imq_unlock(mqueue);

			/*
			 * Continue on to handling the message with just
			 * the port mqueue locked.
			 */
			ipc_mqueue_select_on_thread(port_mq, option, max_size, thread);
			imq_unlock(port_mq);
			splx(s);
			return THREAD_NOT_WAITING;
			
		}

	} else {

		/*
		 * Receive on a single port. Just try to get the messages.
		 */
	  	kmsgs = &mqueue->imq_messages;
		if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
			ipc_mqueue_select_on_thread(mqueue, option, max_size, thread);
			imq_unlock(mqueue);
			splx(s);
			return THREAD_NOT_WAITING;
		}
	}
	
	/*
	 * Looks like we'll have to block.  The mqueue we will
	 * block on (whether the set's or the local port's) is
	 * still locked.
	 */
	if (option & MACH_RCV_TIMEOUT) {
		if (rcv_timeout == 0) {
			imq_unlock(mqueue);
			splx(s);
			thread->ith_state = MACH_RCV_TIMED_OUT;
			return THREAD_NOT_WAITING;
		}
	}

	thread_lock(thread);
	thread->ith_state = MACH_RCV_IN_PROGRESS;
	thread->ith_option = option;
	thread->ith_msize = max_size;

	if (option & MACH_RCV_TIMEOUT)
		clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline);
	else
		deadline = 0;

	wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue,
						  IPC_MQUEUE_RECEIVE,
						  interruptible, 
						  TIMEOUT_URGENCY_USER_NORMAL,
						  deadline, 0,
						  thread);
	/* preposts should be detected above, not here */
	if (wresult == THREAD_AWAKENED)
		panic("ipc_mqueue_receive_on_thread: sleep walking");

	thread_unlock(thread);
	imq_unlock(mqueue);
	splx(s);
	return wresult;
}
コード例 #24
0
ファイル: locks.c プロジェクト: JackieXie168/xnu
/*
 * Routine: 	lck_mtx_lock_wait
 *
 * Invoked in order to wait on contention.
 *
 * Called with the interlock locked and
 * returns it unlocked.
 */
void
lck_mtx_lock_wait (
	lck_mtx_t			*lck,
	thread_t			holder)
{
	thread_t		self = current_thread();
	lck_mtx_t		*mutex;
	__kdebug_only uintptr_t	trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
	__kdebug_only uintptr_t	trace_holder = VM_KERNEL_UNSLIDE_OR_PERM(holder);
	integer_t		priority;
	spl_t			s = splsched();
#if	CONFIG_DTRACE
	uint64_t		sleep_start = 0;

	if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
		sleep_start = mach_absolute_time();
	}
#endif

	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
		mutex = lck;
	else
		mutex = &lck->lck_mtx_ptr->lck_mtx;

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, trace_lck, trace_holder, 0, 0, 0);

	priority = self->sched_pri;
	if (priority < self->base_pri)
		priority = self->base_pri;
	if (priority < BASEPRI_DEFAULT)
		priority = BASEPRI_DEFAULT;

	/* Do not promote past promotion ceiling */
	priority = MIN(priority, MAXPRI_PROMOTE);

	thread_lock(holder);
	if (mutex->lck_mtx_pri == 0)
		holder->promotions++;
	holder->sched_flags |= TH_SFLAG_PROMOTED;
	if (mutex->lck_mtx_pri < priority && holder->sched_pri < priority) {
		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
					holder->sched_pri, priority, trace_holder, trace_lck, 0);
		set_sched_pri(holder, priority);
	}
	thread_unlock(holder);
	splx(s);

	if (mutex->lck_mtx_pri < priority)
		mutex->lck_mtx_pri = priority;
	if (self->pending_promoter[self->pending_promoter_index] == NULL) {
		self->pending_promoter[self->pending_promoter_index] = mutex;
		mutex->lck_mtx_waiters++;
	}
	else
	if (self->pending_promoter[self->pending_promoter_index] != mutex) {
		self->pending_promoter[++self->pending_promoter_index] = mutex;
		mutex->lck_mtx_waiters++;
	}

	assert_wait(LCK_MTX_EVENT(mutex), THREAD_UNINT);
	lck_mtx_ilk_unlock(mutex);

	thread_block(THREAD_CONTINUE_NULL);

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
#if	CONFIG_DTRACE
	/*
	 * Record the Dtrace lockstat probe for blocking, block time
	 * measured from when we were entered.
	 */
	if (sleep_start) {
		if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
			LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
			    mach_absolute_time() - sleep_start);
		} else {
			LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
			    mach_absolute_time() - sleep_start);
		}
	}
#endif
}
コード例 #25
0
/*
 *	Routine:	semaphore_wait_internal
 *
 *		Decrements the semaphore count by one.  If the count is
 *		negative after the decrement, the calling thread blocks
 *		(possibly at a continuation and/or with a timeout).
 *
 *	Assumptions:
 *		The reference
 *		A reference is held on the signal semaphore.
 */
kern_return_t
semaphore_wait_internal(
	semaphore_t		wait_semaphore,
	semaphore_t		signal_semaphore,
	mach_timespec_t		*wait_timep,
	void 			(*caller_cont)(kern_return_t))
{
	boolean_t			nonblocking;
	int					wait_result;
	spl_t				spl_level;
	kern_return_t		kr = KERN_ALREADY_WAITING;

	spl_level = splsched();
	semaphore_lock(wait_semaphore);

	/*
	 * Decide if we really have to wait.
	 */
	nonblocking = (wait_timep != (mach_timespec_t *)0) ?
		      (wait_timep->tv_sec == 0 && wait_timep->tv_nsec == 0) :
		      FALSE;

	if (!wait_semaphore->active) {
		kr = KERN_TERMINATED;
	} else if (wait_semaphore->count > 0) {
		wait_semaphore->count--;
		kr = KERN_SUCCESS;
	} else if (nonblocking) {
		kr = KERN_OPERATION_TIMED_OUT;
	} else {
		uint64_t	abstime;
		thread_t	self = current_thread();

		wait_semaphore->count = -1;  /* we don't keep an actual count */
		thread_lock(self);
		
		/*
		 * If it is a timed wait, calculate the wake up deadline.
		 */
		if (wait_timep != (mach_timespec_t *)0) {
			nanoseconds_to_absolutetime((uint64_t)wait_timep->tv_sec *
											NSEC_PER_SEC + wait_timep->tv_nsec, &abstime);
			clock_absolutetime_interval_to_deadline(abstime, &abstime);
		}
		else
			abstime = 0;

		(void)wait_queue_assert_wait64_locked(
					&wait_semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_ABORTSAFE, abstime,
					self);
		thread_unlock(self);
	}
	semaphore_unlock(wait_semaphore);
	splx(spl_level);

	/*
	 * wait_semaphore is unlocked so we are free to go ahead and
	 * signal the signal_semaphore (if one was provided).
	 */
	if (signal_semaphore != SEMAPHORE_NULL) {
		kern_return_t signal_kr;

		/*
		 * lock the signal semaphore reference we got and signal it.
		 * This will NOT block (we cannot block after having asserted
		 * our intention to wait above).
		 */
		signal_kr = semaphore_signal_internal(signal_semaphore,
						      THREAD_NULL,
						      SEMAPHORE_SIGNAL_PREPOST);

		if (signal_kr == KERN_NOT_WAITING)
			signal_kr = KERN_SUCCESS;
		else if (signal_kr == KERN_TERMINATED) {
			/* 
			 * Uh!Oh!  The semaphore we were to signal died.
			 * We have to get ourselves out of the wait in
			 * case we get stuck here forever (it is assumed
			 * that the semaphore we were posting is gating
			 * the decision by someone else to post the
			 * semaphore we are waiting on).  People will
			 * discover the other dead semaphore soon enough.
			 * If we got out of the wait cleanly (someone
			 * already posted a wakeup to us) then return that
			 * (most important) result.  Otherwise,
			 * return the KERN_TERMINATED status.
			 */
			thread_t self = current_thread();

			clear_wait(self, THREAD_INTERRUPTED);
			kr = semaphore_convert_wait_result(self->wait_result);
			if (kr == KERN_ABORTED)
				kr = KERN_TERMINATED;
		}
	}
	
	/*
	 * If we had an error, or we didn't really need to wait we can
	 * return now that we have signalled the signal semaphore.
	 */
	if (kr != KERN_ALREADY_WAITING)
		return kr;

	/*
	 * Now, we can block.  If the caller supplied a continuation
	 * pointer of his own for after the block, block with the
	 * appropriate semaphore continuation.  Thiswill gather the
	 * semaphore results, release references on the semaphore(s),
	 * and then call the caller's continuation.
	 */
	if (caller_cont) {
		thread_t self = current_thread();

		self->sth_continuation = caller_cont;
		self->sth_waitsemaphore = wait_semaphore;
		self->sth_signalsemaphore = signal_semaphore;
		wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
	}
	else {
		wait_result = thread_block(THREAD_CONTINUE_NULL);
	}

	return (semaphore_convert_wait_result(wait_result));
}
コード例 #26
0
ファイル: locks.c プロジェクト: JackieXie168/xnu
/*
 * Routine: 	lck_mtx_lock_acquire
 *
 * Invoked on acquiring the mutex when there is
 * contention.
 *
 * Returns the current number of waiters.
 *
 * Called with the interlock locked.
 */
int
lck_mtx_lock_acquire(
	lck_mtx_t		*lck)
{
	thread_t		thread = current_thread();
	lck_mtx_t		*mutex;
	integer_t		priority;
	spl_t			s;
	__kdebug_only uintptr_t	trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);

	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
		mutex = lck;
	else
		mutex = &lck->lck_mtx_ptr->lck_mtx;

	if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
		thread->pending_promoter[thread->pending_promoter_index] = NULL;
		if (thread->pending_promoter_index > 0)
			thread->pending_promoter_index--;
		mutex->lck_mtx_waiters--;
	}

	if (mutex->lck_mtx_waiters)
		priority = mutex->lck_mtx_pri;
	else {
		mutex->lck_mtx_pri = 0;
		priority = 0;
	}

	if (priority || thread->was_promoted_on_wakeup) {
		s = splsched();
		thread_lock(thread);

		if (priority) {
			thread->promotions++;
			thread->sched_flags |= TH_SFLAG_PROMOTED;
			if (thread->sched_pri < priority) {
				KERNEL_DEBUG_CONSTANT(
					MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
							thread->sched_pri, priority, 0, trace_lck, 0);
				/* Do not promote past promotion ceiling */
				assert(priority <= MAXPRI_PROMOTE);
				set_sched_pri(thread, priority);
			}
		}
		if (thread->was_promoted_on_wakeup) {
			thread->was_promoted_on_wakeup = 0;
			if (thread->promotions == 0)
				lck_mtx_clear_promoted(thread, trace_lck);
		}

		thread_unlock(thread);
		splx(s);
	}

#if CONFIG_DTRACE
	if (lockstat_probemap[LS_LCK_MTX_LOCK_ACQUIRE] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_ACQUIRE]) {
		if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
			LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, lck, 0);
		} else {
			LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, lck, 0);
		}
	}
#endif	
	return (mutex->lck_mtx_waiters);
}
コード例 #27
0
/*
 *	thread_terminate_self:
 */
void
thread_terminate_self(void)
{
	thread_t		thread = current_thread();
	task_t			task;
	spl_t			s;
	int threadcnt;

	DTRACE_PROC(lwp__exit);

	thread_mtx_lock(thread);

	ulock_release_all(thread);

	ipc_thread_disable(thread);
	
	thread_mtx_unlock(thread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel priority depression, wait for concurrent expirations
	 *	on other processors.
	 */
	if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
		thread->sched_mode &= ~TH_MODE_ISDEPRESSED;

		if (timer_call_cancel(&thread->depress_timer))
			thread->depress_timer_active--;
	}

	while (thread->depress_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	thread_sched_call(thread, NULL);

	thread_unlock(thread);
	splx(s);

	thread_policy_reset(thread);

	task = thread->task;
	uthread_cleanup(task, thread->uthread, task->bsd_info);
	threadcnt = hw_atomic_sub(&task->active_thread_count, 1);

	/*
	 * If we are the last thread to terminate and the task is
	 * associated with a BSD process, perform BSD process exit.
	 */
	if (threadcnt == 0 && task->bsd_info != NULL)
		proc_exit(task->bsd_info);

	uthread_cred_free(thread->uthread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel wait timer, and wait for
	 *	concurrent expirations.
	 */
	if (thread->wait_timer_is_set) {
		thread->wait_timer_is_set = FALSE;

		if (timer_call_cancel(&thread->wait_timer))
			thread->wait_timer_active--;
	}

	while (thread->wait_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	/*
	 *	If there is a reserved stack, release it.
	 */
	if (thread->reserved_stack != 0) {
		if (thread->reserved_stack != thread->kernel_stack)
			stack_free_stack(thread->reserved_stack);
		thread->reserved_stack = 0;
	}

	/*
	 *	Mark thread as terminating, and block.
	 */
	thread->state |= TH_TERMINATE;
	thread_mark_wait_locked(thread, THREAD_UNINT);
	assert(thread->promotions == 0);
	thread_unlock(thread);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_continue);
	/*NOTREACHED*/
}
コード例 #28
0
ファイル: thread_call.c プロジェクト: Prajna/xnu
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
	thread_call_group_t		group)
{
	thread_t		self = current_thread();

	(void) splsched();
	thread_call_lock_spin();

	thread_sched_call(self, sched_call_thread);

    while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->func;
		param0 = call->param0;
		param1 = call->param1;
	
		call->queue = NULL;

		_internal_call_release(call);

		thread_call_unlock();
		(void) spllo();

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				func, param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
				  pl, func, param0, param1);
		}
		
		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) splsched();
		thread_call_lock_spin();
    }

	thread_sched_call(self, NULL);
	group->active_count--;

    if (group->idle_count < thread_call_thread_min) {
		group->idle_count++;

		wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0);
	
		thread_call_unlock();
		(void) spllo();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
		/* NOTREACHED */
    }

    thread_call_unlock();
    (void) spllo();
    
    thread_terminate(self);
	/* NOTREACHED */
}
コード例 #29
0
ファイル: ipc_sched.c プロジェクト: vedangp/gnumach_lotto
	thread_t old,
	continuation_t continuation,
	thread_t new)
{
	spl_t	s;

	assert(current_thread() == old);

	/*
	 *	XXX Dubious things here:
	 *	I don't check the idle_count on the processor set.
	 *	No scheduling priority or policy checks.
	 *	I assume the new thread is interruptible.
	 */

	s = splsched();
	thread_lock(new);

	/*
	 *	The first thing we must do is check the state
	 *	of the threads, to ensure we can handoff.
	 *	This check uses current_processor()->processor_set,
	 *	which we can read without locking.
	 */

	if ((old->stack_privilege == current_stack()) ||
	    (new->state != (TH_WAIT|TH_SWAPPED)) ||
	     !check_processor_set(new) ||
	     !check_bound_processor(new)) {
		thread_unlock(new);
		(void) splx(s);
コード例 #30
0
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			/*
			 * JMM - These ownership transfer macros have a
			 * locking/race problem.  To keep the thread from
			 * changing states on us (nullifying the ownership
			 * assignment) we need to keep the thread locked
			 * during the assignment.  But we can't because the
			 * macros take an activation lock, which is a mutex.
			 * Since this code was already broken before I got
			 * here, I will leave it for now.
			 */
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}