예제 #1
0
파일: thread_call.c 프로젝트: CptFrazz/xnu
void
thread_call_delayed_timer(
		timer_call_param_t		p0,
		__unused timer_call_param_t	p1
)
{
	thread_call_t			call;
	thread_call_group_t		group = p0;
	uint64_t				timestamp;

	thread_call_lock_spin();

	timestamp = mach_absolute_time();

	call = TC(queue_first(&group->delayed_queue));

	while (!queue_end(&group->delayed_queue, qe(call))) {
		if (call->tc_call.deadline <= timestamp) {
			_pending_call_enqueue(call, group);
		}
		else
			break;

		call = TC(queue_first(&group->delayed_queue));
	}

	if (!queue_end(&group->delayed_queue, qe(call)))
		_set_delayed_call_timer(call, group);

	thread_call_unlock();
}
예제 #2
0
void
timer_call_shutdown(
	processor_t			processor)
{
	timer_call_t		call;
	queue_t				queue, myqueue;

	assert(processor != current_processor());

	queue = &PROCESSOR_DATA(processor, timer_call_queue);
	myqueue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

	simple_lock(&timer_call_lock);

	call = TC(queue_first(queue));

	while (!queue_end(queue, qe(call))) {
		_delayed_call_dequeue(call);

		_delayed_call_enqueue(myqueue, call);

		call = TC(queue_first(queue));
	}

	call = TC(queue_first(myqueue));

	if (!queue_end(myqueue, qe(call)))
		_set_delayed_call_timer(call);

	simple_unlock(&timer_call_lock);
}
예제 #3
0
파일: thread_call.c 프로젝트: CptFrazz/xnu
/*
 *	thread_call_func:
 *
 *	Enqueue a function callout.
 *
 *	Guarantees { function, argument }
 *	uniqueness if unique_call is TRUE.
 */
void
thread_call_func(
    thread_call_func_t		func,
    thread_call_param_t		param,
    boolean_t				unique_call)
{
	thread_call_t		call;
	thread_call_group_t	group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH];
	spl_t			s;

	s = splsched();
	thread_call_lock_spin();

	call = TC(queue_first(&group->pending_queue));

	while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
		if (call->tc_call.func == func && call->tc_call.param0 == param) {
			break;
		}

		call = TC(queue_next(qe(call)));
	}

	if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
		call = _internal_call_allocate();
		call->tc_call.func	= func;
		call->tc_call.param0	= param;
		call->tc_call.param1	= NULL;

		_pending_call_enqueue(call, group);
	}

	thread_call_unlock();
	splx(s);
}
예제 #4
0
파일: wait_queue.c 프로젝트: Prajna/xnu
/*
 *	Routine:	wait_queue_member_locked
 *	Purpose:
 *		Indicate if this set queue is a member of the queue
 *	Conditions:
 *		The wait queue is locked
 *		The set queue is just that, a set queue
 */
static boolean_t
wait_queue_member_locked(
	wait_queue_t wq,
	wait_queue_set_t wq_set)
{
	wait_queue_element_t wq_element;
	queue_t q;

	assert(wait_queue_held(wq));
	assert(wait_queue_is_set(wq_set));

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		if ((wq_element->wqe_type == WAIT_QUEUE_LINK) ||
		    (wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC)) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;

			if (wql->wql_setqueue == wq_set)
				return TRUE;
		}
		wq_element = (wait_queue_element_t)
			     queue_next((queue_t) wq_element);
	}
	return FALSE;
}
예제 #5
0
static void
run_queue_check(
                run_queue_t     rq,
                thread_t        thread)
{
	queue_t         q;
	queue_entry_t   qe;

	if (rq != thread->runq)
		panic("run_queue_check: thread runq");

	if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
		panic("run_queue_check: thread sched_pri");

	q = &rq->queues[thread->sched_pri];
	qe = queue_first(q);
	while (!queue_end(q, qe)) {
		if (qe == (queue_entry_t)thread)
			return;

		qe = queue_next(qe);
	}

	panic("run_queue_check: end");
}
예제 #6
0
파일: thread_call.c 프로젝트: CptFrazz/xnu
/*
 *	_remove_from_delayed_queue:
 *
 *	Remove the first (or all) matching
 *	entries	from the delayed queue.
 *
 *	Returns	TRUE if any matching entries
 *	were found.
 *
 *	Called with thread_call_lock held.
 */
static boolean_t
_remove_from_delayed_queue(
    thread_call_func_t		func,
    thread_call_param_t		param0,
    boolean_t				remove_all)
{
	boolean_t			call_removed = FALSE;
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH];

	call = TC(queue_first(&group->delayed_queue));

	while (!queue_end(&group->delayed_queue, qe(call))) {
		if (call->tc_call.func == func	&&
				call->tc_call.param0 == param0) {
			thread_call_t	next = TC(queue_next(qe(call)));

			_call_dequeue(call, group);

			_internal_call_release(call);

			call_removed = TRUE;
			if (!remove_all)
				break;

			call = next;
		}
		else	
			call = TC(queue_next(qe(call)));
	}

	return (call_removed);
}
예제 #7
0
파일: thread_call.c 프로젝트: Prajna/xnu
/*
 *	_remove_from_pending_queue:
 *
 *	Remove the first (or all) matching
 *	entries	from the pending queue.
 *
 *	Returns	TRUE if any matching entries
 *	were found.
 *
 *	Called with thread_call_lock held.
 */
static boolean_t
_remove_from_pending_queue(
    thread_call_func_t		func,
    thread_call_param_t		param0,
    boolean_t				remove_all)
{
	boolean_t			call_removed = FALSE;
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
    
    call = TC(queue_first(&group->pending_queue));
    
    while (!queue_end(&group->pending_queue, qe(call))) {
    	if (	call->func == func			&&
				call->param0 == param0			) {
			thread_call_t	next = TC(queue_next(qe(call)));
		
			_call_dequeue(call, group);

			_internal_call_release(call);
	    
			call_removed = TRUE;
			if (!remove_all)
				break;
		
			call = next;
		}
		else	
			call = TC(queue_next(qe(call)));
    }
    
    return (call_removed);
}
예제 #8
0
/*
 *	Routine:	_wait_queue_select64_one
 *	Purpose:
 *		Select the best thread off a wait queue that meet the
 *		supplied criteria.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a locked thread - if one found
 *	Note:
 *		This is where the sync policy of the wait queue comes
 *		into effect.  For now, we just assume FIFO.
 */
static thread_t
_wait_queue_select64_one(
	wait_queue_t wq,
	event64_t event)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	thread_t t = THREAD_NULL;
	queue_t q;

	assert(wq->wq_fifo);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		/*
		 * We may have to recurse if this is a compound wait queue.
		 */
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_t set_queue;

			/*
			 * We have to check the set wait queue.
			 */
			set_queue = (wait_queue_t)wql->wql_setqueue;
			wait_queue_lock(set_queue);
			if (! wait_queue_empty(set_queue)) {
				t = _wait_queue_select64_one(set_queue, event);
			}
			wait_queue_unlock(set_queue);
			if (t != THREAD_NULL)
				return t;
		} else {
			
			/*
			 * Otherwise, its a thread.  If it is waiting on
			 * the event we are posting to this queue, pull
			 * it off the queue and stick it in out wake_queue.
			 */
			thread_t t = (thread_t)wq_element;

			if (t->wait_event == event) {
				thread_lock(t);
				remqueue(q, (queue_entry_t) t);
				t->wait_queue = WAIT_QUEUE_NULL;
				t->wait_event = NO_EVENT64;
				t->at_safe_point = FALSE;
				return t;	/* still locked */
			}
		}
		wq_element = wqe_next;
	}
	return THREAD_NULL;
}
예제 #9
0
static uintptr_t 
iopa_alloc(vm_size_t bytes, uint32_t balign)
{
    static const uint64_t align_masks[] = {
	0xFFFFFFFFFFFFFFFF,
	0xAAAAAAAAAAAAAAAA,
	0x8888888888888888,
	0x8080808080808080,
	0x8000800080008000,
	0x8000000080000000,
	0x8000000000000000,
    };
    io_pagealloc_t * pa;
    uintptr_t        addr = 0;
    uint32_t         count;
    uint64_t         align;

    if (!bytes) bytes = 1;
    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
    align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];

    IOSimpleLockLock(gIOPageAllocLock);
    pa = (typeof(pa)) queue_first(&gIOPageAllocList);
    while (!queue_end(&gIOPageAllocList, &pa->link))
    {
	addr = iopa_allocinpage(pa, count, align);
	if (addr)
	{
	    gIOPageAllocBytes += bytes;
	    break;
	}
	pa = (typeof(pa)) queue_next(&pa->link);
    }
    IOSimpleLockUnlock(gIOPageAllocLock);
    if (!addr)
    {
        pa = iopa_allocpage();
	if (pa)
	{
	    addr = iopa_allocinpage(pa, count, align);
	    IOSimpleLockLock(gIOPageAllocLock);
	    if (pa->avail) enqueue_head(&gIOPageAllocList, &pa->link);
	    gIOPageAllocCount++;
	    if (addr) gIOPageAllocBytes += bytes;
	    IOSimpleLockUnlock(gIOPageAllocLock);
	}
    }

    if (addr)
    {
        assert((addr & ((1 << log2up(balign)) - 1)) == 0);
    	IOStatisticsAlloc(kIOStatisticsMallocAligned, bytes);
#if IOALLOCDEBUG
	debug_iomalloc_size += bytes;
#endif
    }

    return (addr);
}
예제 #10
0
파일: http2.cpp 프로젝트: choury/sproxy
/* ping 帧永远插到最前面*/
void Http2Base::PushFrame(Http2_header *header){
    uint32_t id = HTTP2_ID(header->id);
    LOGD(DHTTP2, "push a frame [%d]:%d, size:%d, flags: %d\n", id, header->type, get24(header->length), header->flags);
    std::list<write_block>::insert_iterator i;
    if((http2_flag & HTTP2_FLAG_INITED) == 0){
        i = queue_end();
        goto ret;
    }
    switch(header->type){
    case PING_TYPE:
        for(i = queue_head(); i!= queue_end() ; i++){
            if(i->offset){
                continue;
            }
            const Http2_header* check = (const Http2_header*)i->buff;
            if(check->type != PING_TYPE){
                break;
            }
        }
        break;
    case HEADERS_TYPE:{
        auto j = queue_end();
        do{
            i = j--;
            if(j == queue_head() || j->offset){
                break;
            }

            const Http2_header* check = (const Http2_header*)j->buff;
            if(check->type != DATA_TYPE)
                break;
            uint32_t jid = HTTP2_ID(check->id);
            if(jid == 0 || jid == id)
                break;
        }while(true);
        break;
    }
    default:
        i = queue_end();
        break;
    }
ret:
    size_t length = sizeof(Http2_header) + get24(header->length);
    assert(i == queue_end() || i == queue_head() || i->offset == 0);
    queue_insert(i, write_block{header, length, 0});
}
예제 #11
0
파일: wait_queue.c 프로젝트: Prajna/xnu
/*
 *	Routine:	wait_queue_link_internal
 *	Purpose:
 *		Insert a set wait queue into a wait queue.  This
 *		requires us to link the two together using a wait_queue_link
 *		structure that was provided.
 *	Conditions:
 *		The wait queue being inserted must be inited as a set queue
 *		The wait_queue_link structure must already be properly typed
 */
static 
kern_return_t
wait_queue_link_internal(
	wait_queue_t wq,
	wait_queue_set_t wq_set,
	wait_queue_link_t wql)
{
	wait_queue_element_t wq_element;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set))
  		return KERN_INVALID_ARGUMENT;

	/*
	 * There are probably fewer threads and sets associated with
	 * the wait queue than there are wait queues associated with
	 * the set.  So let's validate it that way.
	 */
	s = splsched();
	wait_queue_lock(wq);
	q = &wq->wq_queue;
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		if ((wq_element->wqe_type == WAIT_QUEUE_LINK ||
		     wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) &&
		    ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
			wait_queue_unlock(wq);
			splx(s);
			return KERN_ALREADY_IN_SET;
		}
		wq_element = (wait_queue_element_t)
				queue_next((queue_t) wq_element);
	}

	/*
	 * Not already a member, so we can add it.
	 */
	wqs_lock(wq_set);

	WAIT_QUEUE_SET_CHECK(wq_set);

	assert(wql->wql_type == WAIT_QUEUE_LINK ||
	       wql->wql_type == WAIT_QUEUE_LINK_NOALLOC);

	wql->wql_queue = wq;
	wql_clear_prepost(wql);
	queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
	wql->wql_setqueue = wq_set;
	queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);

	wqs_unlock(wq_set);
	wait_queue_unlock(wq);
	splx(s);

	return KERN_SUCCESS;
}	
예제 #12
0
/*
 * XXX: wait for BSD to  fix signal code
 * Until then, we cannot block here.  We know the task
 * can't go away, so we make sure it is still active after
 * retrieving the first thread for extra safety.
 */
thread_act_t get_firstthread(task_t task)
{
	thread_act_t	thr_act;

	thr_act = (thread_act_t)queue_first(&task->threads);
	if (queue_end(&task->threads, (queue_entry_t)thr_act))
		thr_act = THR_ACT_NULL;
	if (!task->active)
		return(THR_ACT_NULL);
	return(thr_act);
}
예제 #13
0
파일: wait_queue.c 프로젝트: Prajna/xnu
kern_return_t
wait_queue_unlink_all(
	wait_queue_t wq)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wq_next_element;
	wait_queue_set_t wq_set;
	wait_queue_link_t wql;
	queue_head_t links_queue_head;
	queue_t links = &links_queue_head;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	queue_init(links);

	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		boolean_t alloced;

		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wq_next_element = (wait_queue_element_t)
			     queue_next((queue_t) wq_element);

		alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK);
		if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
			wql = (wait_queue_link_t)wq_element;
			wq_set = wql->wql_setqueue;
			wqs_lock(wq_set);
			wait_queue_unlink_locked(wq, wq_set, wql);
			wqs_unlock(wq_set);
			if (alloced)
				enqueue(links, &wql->wql_links);
		}
		wq_element = wq_next_element;
	}
	wait_queue_unlock(wq);
	splx(s);

	while(!queue_empty(links)) {
		wql = (wait_queue_link_t) dequeue(links);
		zfree(_wait_queue_link_zone, wql);
	}

	return(KERN_SUCCESS);
}	
예제 #14
0
/*
 *	sched_traditional_processor_queue_shutdown:
 *
 *	Shutdown a processor run queue by
 *	re-dispatching non-bound threads.
 *
 *	Associated pset must be locked, and is
 *	returned unlocked.
 */
static void
sched_traditional_processor_queue_shutdown(processor_t processor)
{
	processor_set_t         pset    = processor->processor_set;
	run_queue_t             rq      = runq_for_processor(processor);
	queue_t                 queue   = rq->queues + rq->highq;
	int                     pri     = rq->highq;
	int                     count   = rq->count;
	thread_t                next, thread;
	queue_head_t            tqueue;

	queue_init(&tqueue);

	while (count > 0) {
		thread = (thread_t)(uintptr_t)queue_first(queue);
		while (!queue_end(queue, (queue_entry_t)thread)) {
			next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);

			if (thread->bound_processor == PROCESSOR_NULL) {
				remqueue((queue_entry_t)thread);

				thread->runq = PROCESSOR_NULL;
				SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
				runq_consider_decr_bound_count(processor, thread);
				rq->count--;
				if (SCHED(priority_is_urgent)(pri)) {
					rq->urgency--; assert(rq->urgency >= 0);
				}
				if (queue_empty(queue)) {
					bitmap_clear(rq->bitmap, pri);
					rq->highq = bitmap_first(rq->bitmap, NRQS);
				}

				enqueue_tail(&tqueue, (queue_entry_t)thread);
			}
			count--;

			thread = next;
		}

		queue--; pri--;
	}

	pset_unlock(pset);

	while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) {
		thread_lock(thread);

		thread_setrun(thread, SCHED_TAILQ);

		thread_unlock(thread);
	}
}
예제 #15
0
파일: wait_queue.c 프로젝트: Prajna/xnu
/*
 *	Routine:	wait_queue_set_unlink_all
 *	Purpose:
 *		Remove the linkage between a set wait queue and all its
 *		member wait queues. The link structures are freed for those
 *		links which were dynamically allocated.
 *	Conditions:
 *		The wait queue must be a set
 */
kern_return_t
wait_queue_set_unlink_all(
	wait_queue_set_t wq_set)
{
	wait_queue_link_t wql;
	wait_queue_t wq;
	queue_t q;
	queue_head_t links_queue_head;
	queue_t links = &links_queue_head;
	spl_t s;

	if (!wait_queue_is_set(wq_set)) {
		return KERN_INVALID_ARGUMENT;
	}

	queue_init(links);

retry:
	s = splsched();
	wqs_lock(wq_set);

	q = &wq_set->wqs_setlinks;

	wql = (wait_queue_link_t)queue_first(q);
	while (!queue_end(q, (queue_entry_t)wql)) {
		WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
		wq = wql->wql_queue;
		if (wait_queue_lock_try(wq)) {
			boolean_t alloced;

			alloced = (wql->wql_type == WAIT_QUEUE_LINK);
			wait_queue_unlink_locked(wq, wq_set, wql);
			wait_queue_unlock(wq);
			if (alloced)
				enqueue(links, &wql->wql_links);
			wql = (wait_queue_link_t)queue_first(q);
		} else {
			wqs_unlock(wq_set);
			splx(s);
			delay(1);
			goto retry;
		}
	}
	wqs_unlock(wq_set);
	splx(s);

	while (!queue_empty (links)) {
		wql = (wait_queue_link_t) dequeue(links);
		zfree(_wait_queue_link_zone, wql);
	}
	return(KERN_SUCCESS);
}	
예제 #16
0
파일: wait_queue.c 프로젝트: Prajna/xnu
/*
 *	Routine:	wait_queue_select64_thread
 *	Purpose:
 *		Look for a thread and remove it from the queues, if
 *		(and only if) the thread is waiting on the supplied
 *		<wait_queue, event> pair.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		KERN_NOT_WAITING: Thread is not waiting here.
 *		KERN_SUCCESS: It was, and is now removed (returned locked)
 */
static kern_return_t
_wait_queue_select64_thread(
	wait_queue_t wq,
	event64_t event,
	thread_t thread)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	kern_return_t res = KERN_NOT_WAITING;
	queue_t q = &wq->wq_queue;

	thread_lock(thread);
	if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
		remqueue((queue_entry_t) thread);
		thread->at_safe_point = FALSE;
		thread->wait_event = NO_EVENT64;
		thread->wait_queue = WAIT_QUEUE_NULL;
		/* thread still locked */
		return KERN_SUCCESS;
	}
	thread_unlock(thread);
	
	/*
	 * The wait_queue associated with the thread may be one of this
	 * wait queue's sets.  Go see.  If so, removing it from
	 * there is like removing it from here.
	 */
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_set_t set_queue = wql->wql_setqueue;

			wqs_lock(set_queue);
			if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
				res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue,
								event,
								thread);
			}
			wqs_unlock(set_queue);
			if (res == KERN_SUCCESS)
				return KERN_SUCCESS;
		}
		wq_element = wqe_next;
	}
	return res;
}
예제 #17
0
파일: thread_call.c 프로젝트: Prajna/xnu
/*
 *	thread_call_func:
 *
 *	Enqueue a function callout.
 *
 *	Guarantees { function, argument }
 *	uniqueness if unique_call is TRUE.
 */
void
thread_call_func(
    thread_call_func_t		func,
    thread_call_param_t		param,
    boolean_t				unique_call)
{
    thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
    spl_t					s;
    
    s = splsched();
    thread_call_lock_spin();
    
    call = TC(queue_first(&group->pending_queue));
    
	while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
    	if (	call->func == func			&&
				call->param0 == param			) {
			break;
		}
	
		call = TC(queue_next(qe(call)));
    }
    
    if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
		call = _internal_call_allocate();
		call->func			= func;
		call->param0		= param;
		call->param1		= NULL;
	
		_pending_call_enqueue(call, group);
		
		if (group->active_count == 0)
			thread_call_wake(group);
    }

    thread_call_unlock();
    splx(s);
}
예제 #18
0
kern_return_t
wait_queue_unlink_all(
	wait_queue_t wq)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wq_next_element;
	wait_queue_set_t wq_set;
	wait_queue_link_t wql;
	queue_head_t links_queue_head;
	queue_t links = &links_queue_head;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_queue(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	queue_init(links);

	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wq_next_element = (wait_queue_element_t)
			     queue_next((queue_t) wq_element);

		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wql = (wait_queue_link_t)wq_element;
			wq_set = wql->wql_setqueue;
			wqs_lock(wq_set);
			wait_queue_unlink_locked(wq, wq_set, wql);
			wqs_unlock(wq_set);
			enqueue(links, &wql->wql_links);
		}
		wq_element = wq_next_element;
	}
	wait_queue_unlock(wq);
	splx(s);

	while(!queue_empty(links)) {
		wql = (wait_queue_link_t) dequeue(links);
		kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
	}

	return(KERN_SUCCESS);
}	
예제 #19
0
static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
{
    struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
    struct usb_host_interface *intf_desc = intf->cur_altsetting;
    struct usb_endpoint_descriptor *ep_desc;
    struct data_queue *queue = rt2x00dev->tx;
    struct usb_endpoint_descriptor *tx_ep_desc = NULL;
    unsigned int i;

    /*
     * Walk through all available endpoints to search for "bulk in"
     * and "bulk out" endpoints. When we find such endpoints collect
     * the information we need from the descriptor and assign it
     * to the queue.
     */
    for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
        ep_desc = &intf_desc->endpoint[i].desc;

        if (usb_endpoint_is_bulk_in(ep_desc)) {
            rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
        } else if (usb_endpoint_is_bulk_out(ep_desc) &&
                   (queue != queue_end(rt2x00dev))) {
            rt2x00usb_assign_endpoint(queue, ep_desc);
            queue = queue_next(queue);

            tx_ep_desc = ep_desc;
        }
    }

    /*
     * At least 1 endpoint for RX and 1 endpoint for TX must be available.
     */
    if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
        rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
        return -EPIPE;
    }

    /*
     * It might be possible not all queues have a dedicated endpoint.
     * Loop through all TX queues and copy the endpoint information
     * which we have gathered from already assigned endpoints.
     */
    txall_queue_for_each(rt2x00dev, queue) {
        if (!queue->usb_endpoint)
            rt2x00usb_assign_endpoint(queue, tx_ep_desc);
    }

    return 0;
}
예제 #20
0
kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast)
{

        thread_act_t inc;
        thread_act_t ninc;
        thread_act_t thr_act;
		thread_t	th;
		int found=0;

	task_lock(task);
	if (!task->active) {
		task_unlock(task);
		return(KERN_FAILURE);
	}

        thr_act = THR_ACT_NULL;
        for (inc  = (thread_act_t)queue_first(&task->threads);
			 !queue_end(&task->threads, (queue_entry_t)inc);
             inc  = ninc) {

				if (inc != thact) {
                	ninc = (thread_act_t)queue_next(&inc->task_threads);
						continue;
				}
                th = act_lock_thread(inc);
                if ((inc->active)  && 
                    ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
					found = 1;
                    thr_act = inc;
                   break;
                }
                act_unlock_thread(inc);
                /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
				break;
        }
out:
		if (found) {
            if (setast)
				act_set_astbsd(thr_act);

           act_unlock_thread(thr_act);
        }
		task_unlock(task);

        if (found) 
            return(KERN_SUCCESS);
        else 
            return(KERN_FAILURE);
}
예제 #21
0
/*
 *	Routine:	wait_queue_set_unlink_all
 *	Purpose:
 *		Remove the linkage between a set wait queue and all its
 *		member wait queues. The link structures are freed.
 *	Conditions:
 *		The wait queue must be a set
 */
kern_return_t
wait_queue_set_unlink_all(
	wait_queue_set_t wq_set)
{
	wait_queue_link_t wql;
	wait_queue_t wq;
	queue_t q;
	queue_head_t links_queue_head;
	queue_t links = &links_queue_head;
	kern_return_t kret;
	spl_t s;

	if (!wait_queue_is_set(wq_set)) {
		return KERN_INVALID_ARGUMENT;
	}

	queue_init(links);

retry:
	s = splsched();
	wqs_lock(wq_set);

	q = &wq_set->wqs_setlinks;

	wql = (wait_queue_link_t)queue_first(q);
	while (!queue_end(q, (queue_entry_t)wql)) {
		WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
		wq = wql->wql_queue;
		if (wait_queue_lock_try(wq)) {
			wait_queue_unlink_locked(wq, wq_set, wql);
			wait_queue_unlock(wq);
			enqueue(links, &wql->wql_links);
			wql = (wait_queue_link_t)queue_first(q);
		} else {
			wqs_unlock(wq_set);
			splx(s);
			delay(1);
			goto retry;
		}
	}
	wqs_unlock(wq_set);
	splx(s);

	while (!queue_empty (links)) {
		wql = (wait_queue_link_t) dequeue(links);
		kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
	}
	return(KERN_SUCCESS);
}	
예제 #22
0
파일: thread_call.c 프로젝트: Prajna/xnu
void
thread_call_delayed_timer(
	timer_call_param_t				p0,
	__unused timer_call_param_t		p1
)
{
    thread_call_t			call;
	thread_call_group_t		group = p0;
	boolean_t				new_pending = FALSE;
	uint64_t				timestamp;

	thread_call_lock_spin();

	timestamp = mach_absolute_time();
    
    call = TC(queue_first(&group->delayed_queue));
    
    while (!queue_end(&group->delayed_queue, qe(call))) {
    	if (call->deadline <= timestamp) {
			_pending_call_enqueue(call, group);
			new_pending = TRUE;
		}
		else
			break;
	    
		call = TC(queue_first(&group->delayed_queue));
    }

	if (!queue_end(&group->delayed_queue, qe(call)))
		_set_delayed_call_timer(call, group);

    if (new_pending && group->active_count == 0)
		thread_call_wake(group);

    thread_call_unlock();
}
예제 #23
0
void
task_act_iterate_wth_args(
	task_t task,
	void (*func_callback)(thread_act_t, void *),
	void *func_arg)
{
        thread_act_t inc, ninc;

	task_lock(task);
        for (inc  = (thread_act_t)queue_first(&task->threads);
			 !queue_end(&task->threads, (queue_entry_t)inc);
             inc  = ninc) {
                ninc = (thread_act_t)queue_next(&inc->task_threads);
                (void) (*func_callback)(inc, func_arg);
        }
	task_unlock(task);
}
예제 #24
0
파일: msg.c 프로젝트: AndrewD/prex
/*
 * Dequeue thread from specified queue.
 * The most highest priority thread will be chosen.
 */
static thread_t
msg_dequeue(queue_t head)
{
	queue_t q;
	thread_t th, top;

	q = queue_first(head);
	top = queue_entry(q, struct thread, ipc_link);
	while (!queue_end(head, q)) {
		th = queue_entry(q, struct thread, ipc_link);
		if (th->prio < top->prio)
			top = th;
		q = queue_next(q);
	}
	queue_remove(&top->ipc_link);
	return top;
}
예제 #25
0
파일: wait_queue.c 프로젝트: Prajna/xnu
/*
 *	Routine:	wait_queue_unlink
 *	Purpose:
 *		Remove the linkage between a wait queue and a set,
 *		freeing the linkage structure.
 *	Conditions:
 *		The wait queue being must be a member set queue
 */
kern_return_t
wait_queue_unlink(
	wait_queue_t wq,
	wait_queue_set_t wq_set)
{
	wait_queue_element_t wq_element;
	wait_queue_link_t wql;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
		return KERN_INVALID_ARGUMENT;
	}
	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {

		   	wql = (wait_queue_link_t)wq_element;
			
			if (wql->wql_setqueue == wq_set) {
				boolean_t alloced;

				alloced = (wql->wql_type == WAIT_QUEUE_LINK);
				wqs_lock(wq_set);
				wait_queue_unlink_locked(wq, wq_set, wql);
				wqs_unlock(wq_set);
				wait_queue_unlock(wq);
				splx(s);
				if (alloced)
					zfree(_wait_queue_link_zone, wql);
				return KERN_SUCCESS;
			}
		}
		wq_element = (wait_queue_element_t)
				queue_next((queue_t) wq_element);
	}
	wait_queue_unlock(wq);
	splx(s);
	return KERN_NOT_IN_SET;
}	
예제 #26
0
kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast)
{

        thread_act_t inc;
        thread_act_t ninc;
        thread_act_t thr_act;
	thread_t	th;

	task_lock(task);
	if (!task->active) {
		task_unlock(task);
		return(KERN_FAILURE);
	}

        thr_act = THR_ACT_NULL;
        for (inc  = (thread_act_t)queue_first(&task->threads);
			 !queue_end(&task->threads, (queue_entry_t)inc);
             inc  = ninc) {
                th = act_lock_thread(inc);
                if ((inc->active)  && 
                    ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
                    thr_act = inc;
                   break;
                }
                act_unlock_thread(inc);
                ninc = (thread_act_t)queue_next(&inc->task_threads);
        }
out:
        if (thact) 
                *thact = thr_act;
        if (thr_act) {
                if (setast)
                    act_set_astbsd(thr_act);

                act_unlock_thread(thr_act);
        }
	task_unlock(task);

        if (thr_act) 
            return(KERN_SUCCESS);
        else 
            return(KERN_FAILURE);
}
예제 #27
0
파일: chario.c 프로젝트: 0xffea/gnumach
/*
 * Port-death routine to clean up reply messages.
 */
boolean_t
tty_queue_clean(
	queue_t		q,
	ipc_port_t	port,
	boolean_t	(*routine)(io_req_t) )
{
	register io_req_t	ior;

	ior = (io_req_t)queue_first(q);
	while (!queue_end(q, (queue_entry_t)ior)) {
	    if (ior->io_reply_port == port) {
		remqueue(q, (queue_entry_t)ior);
		ior->io_done = routine;
		iodone(ior);
		return TRUE;
	    }
	    ior = ior->io_next;
	}
	return FALSE;
}
예제 #28
0
/*
 *	sched_traditional_choose_thread_from_runq:
 *
 *	Locate a thread to execute from the processor run queue
 *	and return it.  Only choose a thread with greater or equal
 *	priority.
 *
 *	Associated pset must be locked.  Returns THREAD_NULL
 *	on failure.
 */
static thread_t
sched_traditional_choose_thread_from_runq(
                                          processor_t     processor,
                                          run_queue_t     rq,
                                          int             priority)
{
	queue_t         queue   = rq->queues + rq->highq;
	int             pri     = rq->highq;
	int             count   = rq->count;
	thread_t        thread;

	while (count > 0 && pri >= priority) {
		thread = (thread_t)(uintptr_t)queue_first(queue);
		while (!queue_end(queue, (queue_entry_t)thread)) {
			if (thread->bound_processor == PROCESSOR_NULL ||
			    thread->bound_processor == processor) {
				remqueue((queue_entry_t)thread);

				thread->runq = PROCESSOR_NULL;
				SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
				rq->count--;
				if (SCHED(priority_is_urgent)(pri)) {
					rq->urgency--; assert(rq->urgency >= 0);
				}
				if (queue_empty(queue)) {
					bitmap_clear(rq->bitmap, pri);
					rq->highq = bitmap_first(rq->bitmap, NRQS);
				}

				return (thread);
			}
			count--;

			thread = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);
		}

		queue--; pri--;
	}

	return (THREAD_NULL);
}
예제 #29
0
/*
 *	Routine:	wait_queue_set_unlink_all_nofree
 *	Purpose:
 *		Remove the linkage between a set wait queue and all its
 *		member wait queues. The link structures are not freed, nor
 *		returned. It is the caller's responsibility to track and free
 *		them.
 *	Conditions:
 *		The wait queue being must be a member set queue
 */
kern_return_t
wait_queue_set_unlink_all_nofree(
	wait_queue_set_t wq_set)
{
	wait_queue_link_t wql;
	wait_queue_t wq;
	queue_t q;
	kern_return_t kret;
	spl_t s;

	if (!wait_queue_is_set(wq_set)) {
		return KERN_INVALID_ARGUMENT;
	}

retry:
	s = splsched();
	wqs_lock(wq_set);

	q = &wq_set->wqs_setlinks;

	wql = (wait_queue_link_t)queue_first(q);
	while (!queue_end(q, (queue_entry_t)wql)) {
		WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
		wq = wql->wql_queue;
		if (wait_queue_lock_try(wq)) {
			wait_queue_unlink_locked(wq, wq_set, wql);
			wait_queue_unlock(wq);
			wql = (wait_queue_link_t)queue_first(q);
		} else {
			wqs_unlock(wq_set);
			splx(s);
			delay(1);
			goto retry;
		}
	}
	wqs_unlock(wq_set);
	splx(s);

	return(KERN_SUCCESS);
}	
예제 #30
0
/*
 *	Routine:	wait_queue_unlink
 *	Purpose:
 *		Remove the linkage between a wait queue and a set,
 *		freeing the linkage structure.
 *	Conditions:
 *		The wait queue being must be a member set queue
 */
kern_return_t
wait_queue_unlink(
	wait_queue_t wq,
	wait_queue_set_t wq_set)
{
	wait_queue_element_t wq_element;
	wait_queue_link_t wql;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
		return KERN_INVALID_ARGUMENT;
	}
	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
		   	wql = (wait_queue_link_t)wq_element;
			
			if (wql->wql_setqueue == wq_set) {
				wqs_lock(wq_set);
				wait_queue_unlink_locked(wq, wq_set, wql);
				wqs_unlock(wq_set);
				wait_queue_unlock(wq);
				splx(s);
				kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
				return KERN_SUCCESS;
			}
		}
		wq_element = (wait_queue_element_t)
				queue_next((queue_t) wq_element);
	}
	wait_queue_unlock(wq);
	splx(s);
	return KERN_NOT_IN_SET;
}