コード例 #1
0
ファイル: thread_swap.c プロジェクト: LastAvenger/gnumach
/*
 *	swapin_thread: [exported]
 *
 *	This procedure executes as a kernel thread.  Threads that need to
 *	be swapped in are swapped in by this thread.
 */
void __attribute__((noreturn)) swapin_thread_continue(void)
{
	for (;;) {
		thread_t thread;
		spl_t s;

		s = splsched();
		swapper_lock();

		while ((thread = (thread_t) dequeue_head(&swapin_queue))
							!= THREAD_NULL) {
			kern_return_t kr;
			swapper_unlock();
			(void) splx(s);

			kr = thread_doswapin(thread);		/* may block */

			s = splsched();
			swapper_lock();

			if (kr != KERN_SUCCESS) {
				enqueue_head(&swapin_queue,
					     (queue_entry_t) thread);
				break;
			}
		}

		assert_wait((event_t) &swapin_queue, FALSE);
		swapper_unlock();
		(void) splx(s);
		counter(c_swapin_thread_block++);
		thread_block(swapin_thread_continue);
	}
}
コード例 #2
0
static uintptr_t 
iopa_alloc(vm_size_t bytes, uint32_t balign)
{
    static const uint64_t align_masks[] = {
	0xFFFFFFFFFFFFFFFF,
	0xAAAAAAAAAAAAAAAA,
	0x8888888888888888,
	0x8080808080808080,
	0x8000800080008000,
	0x8000000080000000,
	0x8000000000000000,
    };
    io_pagealloc_t * pa;
    uintptr_t        addr = 0;
    uint32_t         count;
    uint64_t         align;

    if (!bytes) bytes = 1;
    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
    align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];

    IOSimpleLockLock(gIOPageAllocLock);
    pa = (typeof(pa)) queue_first(&gIOPageAllocList);
    while (!queue_end(&gIOPageAllocList, &pa->link))
    {
	addr = iopa_allocinpage(pa, count, align);
	if (addr)
	{
	    gIOPageAllocBytes += bytes;
	    break;
	}
	pa = (typeof(pa)) queue_next(&pa->link);
    }
    IOSimpleLockUnlock(gIOPageAllocLock);
    if (!addr)
    {
        pa = iopa_allocpage();
	if (pa)
	{
	    addr = iopa_allocinpage(pa, count, align);
	    IOSimpleLockLock(gIOPageAllocLock);
	    if (pa->avail) enqueue_head(&gIOPageAllocList, &pa->link);
	    gIOPageAllocCount++;
	    if (addr) gIOPageAllocBytes += bytes;
	    IOSimpleLockUnlock(gIOPageAllocLock);
	}
    }

    if (addr)
    {
        assert((addr & ((1 << log2up(balign)) - 1)) == 0);
    	IOStatisticsAlloc(kIOStatisticsMallocAligned, bytes);
#if IOALLOCDEBUG
	debug_iomalloc_size += bytes;
#endif
    }

    return (addr);
}
コード例 #3
0
ファイル: thread_call.c プロジェクト: CptFrazz/xnu
/*
 *	_internal_call_release:
 *
 *	Release an internal callout entry which
 *	is no longer pending (or delayed).
 *
 * 	Called with thread_call_lock held.
 */
static __inline__ void
_internal_call_release(
    thread_call_t		call)
{
    if (    call >= internal_call_storage						&&
	   	    call < &internal_call_storage[INTERNAL_CALL_COUNT]		)
		enqueue_head(&thread_call_internal_queue, qe(call));
}
コード例 #4
0
ファイル: thread_call.c プロジェクト: Prajna/xnu
/*
 *	_internal_call_release:
 *
 *	Release an internal callout entry which
 *	is no longer pending (or delayed).
 *
 * 	Called with thread_call_lock held.
 */
static __inline__ void
_internal_call_release(
    thread_call_t		call)
{
    if (    call >= internal_call_storage						&&
	   	    call < &internal_call_storage[internal_call_count]		)
		enqueue_head(&thread_call_internal_queue, qe(call));
}
コード例 #5
0
/*
 *	Routine:	semaphore_create
 *
 *	Creates a semaphore.
 *	The port representing the semaphore is returned as a parameter.
 */
kern_return_t
semaphore_create(
	task_t			task,
	semaphore_t		*new_semaphore,
	int				policy,
	int				value)
{
	semaphore_t		 s = SEMAPHORE_NULL;



	if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) {
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_INVALID_ARGUMENT;
	}

	s = (semaphore_t) zalloc (semaphore_zone);

	if (s == SEMAPHORE_NULL) {
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_RESOURCE_SHORTAGE; 
	}

	wait_queue_init(&s->wait_queue, policy); /* also inits lock */
	s->count = value;
	s->ref_count = 1;

	/*
	 *  Create and initialize the semaphore port
	 */
	s->port	= ipc_port_alloc_kernel();
	if (s->port == IP_NULL) {	
		/* This will deallocate the semaphore */	
		semaphore_dereference(s);
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_RESOURCE_SHORTAGE; 
	}

	ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE);

	/*
	 *  Associate the new semaphore with the task by adding
	 *  the new semaphore to the task's semaphore list.
	 *
	 *  Associate the task with the new semaphore by having the
	 *  semaphores task pointer point to the owning task's structure.
	 */
	task_lock(task);
	enqueue_head(&task->semaphore_list, (queue_entry_t) s);
	task->semaphores_owned++;
	s->owner = task;
	s->active = TRUE;
	task_unlock(task);

	*new_semaphore = s;

	return KERN_SUCCESS;
}		  
コード例 #6
0
ファイル: sync_sema.c プロジェクト: JackieXie168/xnu
/*
 *	Routine:	semaphore_create
 *
 *	Creates a semaphore.
 *	The port representing the semaphore is returned as a parameter.
 */
kern_return_t
semaphore_create(
	task_t			task,
	semaphore_t		*new_semaphore,
	int				policy,
	int				value)
{
	semaphore_t		 s = SEMAPHORE_NULL;
	kern_return_t		kret;


	*new_semaphore = SEMAPHORE_NULL;
	if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX)
		return KERN_INVALID_ARGUMENT;

	s = (semaphore_t) zalloc (semaphore_zone);

	if (s == SEMAPHORE_NULL)
		return KERN_RESOURCE_SHORTAGE; 

	kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */
	if (kret != KERN_SUCCESS) {
		zfree(semaphore_zone, s);
		return kret;
	}

	/*
	 * Initialize the semaphore values.
	 */
	s->port	= IP_NULL;
	s->ref_count = 1;
	s->count = value;
	s->active = TRUE;
	s->owner = task;

	/*
	 *  Associate the new semaphore with the task by adding
	 *  the new semaphore to the task's semaphore list.
	 */
	task_lock(task);
	enqueue_head(&task->semaphore_list, (queue_entry_t) s);
	task->semaphores_owned++;
	task_unlock(task);

	*new_semaphore = s;

	return KERN_SUCCESS;
}		  
コード例 #7
0
ファイル: eventcount.c プロジェクト: 0xffea/gnumach
/*
 * The scheduler is too messy for my old little brain
 */
void
simpler_thread_setrun(
	thread_t	th,
	boolean_t	may_preempt)
{
	register struct run_queue	*rq;
	register int			whichq;

	/*
	 *	XXX should replace queue with a boolean in this case.
	 */
	if (default_pset.idle_count > 0) {
		processor_t	processor;

		processor = (processor_t) queue_first(&default_pset.idle_queue);
		queue_remove(&default_pset.idle_queue, processor,
		processor_t, processor_queue);
		default_pset.idle_count--;
		processor->next_thread = th;
		processor->state = PROCESSOR_DISPATCHING;
		return;
	}
	rq = &(master_processor->runq);
	ast_on(cpu_number(), AST_BLOCK);

	whichq = (th)->sched_pri;
	simple_lock(&(rq)->lock);	/* lock the run queue */
	enqueue_head(&(rq)->runq[whichq], (queue_entry_t) (th));

	if (whichq < (rq)->low || (rq)->count == 0)
		 (rq)->low = whichq;	/* minimize */
	(rq)->count++;
#ifdef MIGRATING_THREADS
	(th)->shuttle.runq = (rq);
#else
	(th)->runq = (rq);
#endif
	simple_unlock(&(rq)->lock);

	/*
	 *	Turn off first_quantum to allow context switch.
	 */
	current_processor()->first_quantum = FALSE;
}
コード例 #8
0
ファイル: wait_queue.c プロジェクト: OpenDarwin-CVS/SEDarwin
/*
 *	Routine:	wait_queue_assert_wait64_locked
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *
 *	Conditions:
 *		The wait queue is assumed locked.
 *		The waiting thread is assumed locked.
 *
 */
__private_extern__ wait_result_t
wait_queue_assert_wait64_locked(
	wait_queue_t wq,
	event64_t event,
	wait_interrupt_t interruptible,
	thread_t thread)
{
	wait_result_t wait_result;

	if (!wait_queue_assert_possible(thread))
		panic("wait_queue_assert_wait64_locked");

	if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
		wait_queue_set_t wqs = (wait_queue_set_t)wq;

		if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
			return(THREAD_AWAKENED);
	}
	  
	/*
	 * This is the extent to which we currently take scheduling attributes
	 * into account.  If the thread is vm priviledged, we stick it at
	 * the front of the queue.  Later, these queues will honor the policy
	 * value set at wait_queue_init time.
	 */
	wait_result = thread_mark_wait_locked(thread, interruptible);
	if (wait_result == THREAD_WAITING) {
		if (thread->vm_privilege)
			enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
		else
			enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
		thread->wait_event = event;
		thread->wait_queue = wq;
	}
	return(wait_result);
}
コード例 #9
0
ファイル: wait_queue.c プロジェクト: Prajna/xnu
/*
 *	Routine:	wait_queue_assert_wait64_locked
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *
 *	Conditions:
 *		The wait queue is assumed locked.
 *		The waiting thread is assumed locked.
 *
 */
__private_extern__ wait_result_t
wait_queue_assert_wait64_locked(
	wait_queue_t wq,
	event64_t event,
	wait_interrupt_t interruptible,
	uint64_t deadline,
	thread_t thread)
{
	wait_result_t wait_result;
	boolean_t realtime;

	if (!wait_queue_assert_possible(thread))
		panic("wait_queue_assert_wait64_locked");

	if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
		wait_queue_set_t wqs = (wait_queue_set_t)wq;

		if (event == NO_EVENT64 && wqs_is_preposted(wqs))
			return(THREAD_AWAKENED);
	}

	/*
	 * Realtime threads get priority for wait queue placements.
	 * This allows wait_queue_wakeup_one to prefer a waiting
	 * realtime thread, similar in principle to performing
	 * a wait_queue_wakeup_all and allowing scheduler prioritization
	 * to run the realtime thread, but without causing the
	 * lock contention of that scenario.
	 */
	realtime = (thread->sched_pri >= BASEPRI_REALTIME);

	/*
	 * This is the extent to which we currently take scheduling attributes
	 * into account.  If the thread is vm priviledged, we stick it at
	 * the front of the queue.  Later, these queues will honor the policy
	 * value set at wait_queue_init time.
	 */
	wait_result = thread_mark_wait_locked(thread, interruptible);
	if (wait_result == THREAD_WAITING) {
		if (!wq->wq_fifo
			|| (thread->options & TH_OPT_VMPRIV)
			|| realtime)
			enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
		else
			enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);

		thread->wait_event = event;
		thread->wait_queue = wq;

		if (deadline != 0) {
			uint32_t flags;

			flags = realtime ? TIMER_CALL_CRITICAL : 0;

			if (!timer_call_enter(&thread->wait_timer, deadline, flags))
				thread->wait_timer_active++;
			thread->wait_timer_is_set = TRUE;
		}
	}
	return(wait_result);
}