Beispiel #1
0
/*
 * Routine:	lck_rw_sleep
 */
wait_result_t
lck_rw_sleep(
        lck_rw_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible)
{
	wait_result_t	res;
	lck_rw_type_t	lck_rw_type;
 
	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	res = assert_wait(event, interruptible);
	if (res == THREAD_WAITING) {
		lck_rw_type = lck_rw_done(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
				lck_rw_lock(lck, lck_rw_type);
			else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
				lck_rw_lock_exclusive(lck);
			else
				lck_rw_lock_shared(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		(void)lck_rw_done(lck);

	return res;
}
Beispiel #2
0
/*
 * Routine:	lck_spin_sleep
 */
wait_result_t
lck_spin_sleep(
        lck_spin_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible)
{
	wait_result_t	res;
 
	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	res = assert_wait(event, interruptible);
	if (res == THREAD_WAITING) {
		lck_spin_unlock(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
			lck_spin_lock(lck);
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		lck_spin_unlock(lck);

	return res;
}
Beispiel #3
0
/*
 *	swapin_thread: [exported]
 *
 *	This procedure executes as a kernel thread.  Threads that need to
 *	be swapped in are swapped in by this thread.
 */
void __attribute__((noreturn)) swapin_thread_continue(void)
{
	for (;;) {
		thread_t thread;
		spl_t s;

		s = splsched();
		swapper_lock();

		while ((thread = (thread_t) dequeue_head(&swapin_queue))
							!= THREAD_NULL) {
			kern_return_t kr;
			swapper_unlock();
			(void) splx(s);

			kr = thread_doswapin(thread);		/* may block */

			s = splsched();
			swapper_lock();

			if (kr != KERN_SUCCESS) {
				enqueue_head(&swapin_queue,
					     (queue_entry_t) thread);
				break;
			}
		}

		assert_wait((event_t) &swapin_queue, FALSE);
		swapper_unlock();
		(void) splx(s);
		counter(c_swapin_thread_block++);
		thread_block(swapin_thread_continue);
	}
}
Beispiel #4
0
/*
 * Routine:	lck_mtx_sleep
 */
wait_result_t
lck_mtx_sleep(
        lck_mtx_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible)
{
	wait_result_t	res;
 
	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
		     (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	res = assert_wait(event, interruptible);
	if (res == THREAD_WAITING) {
		lck_mtx_unlock(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
			lck_mtx_lock(lck);
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		lck_mtx_unlock(lck);

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);

	return res;
}
void
cyclic_remove(cyclic_id_t cyclic)
{
	wrap_thread_call_t *wrapTC = (wrap_thread_call_t *)cyclic;

	ASSERT(cyclic != CYCLIC_NONE);

	while (!thread_call_cancel(wrapTC->TChdl)) {
		int ret = assert_wait(wrapTC, THREAD_UNINT);
		ASSERT(ret == THREAD_WAITING);

		wrapTC->when.cyt_interval = WAKEUP_REAPER;

		ret = thread_block(THREAD_CONTINUE_NULL);
		ASSERT(ret == THREAD_AWAKENED);
	}

	if (thread_call_free(wrapTC->TChdl))
		_FREE(wrapTC, M_TEMP);
	else {
		/* Gut this cyclic and move on ... */
		wrapTC->hdlr.cyh_func = noop_cyh_func;
		wrapTC->when.cyt_interval = NEARLY_FOREVER;
	}
}
Beispiel #6
0
/*
 * special_handler	- handles suspension, termination.  Called
 * with nothing locked.  Returns (if it returns) the same way.
 */
void
special_handler(
	thread_t				thread)
{
	spl_t		s;

	thread_mtx_lock(thread);

	s = splsched();
	thread_lock(thread);
	thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
	thread_unlock(thread);
	splx(s);

	/*
	 * If we're suspended, go to sleep and wait for someone to wake us up.
	 */
	if (thread->active) {
		if (thread->suspend_count > 0) {
			assert_wait(&thread->suspend_count, THREAD_ABORTSAFE);
			thread_mtx_unlock(thread);
			thread_block((thread_continue_t)special_handler_continue);
			/*NOTREACHED*/
		}
	}
	else {
		thread_mtx_unlock(thread);

		thread_terminate_self();
		/*NOTREACHED*/
	}

	thread_mtx_unlock(thread);
}
Beispiel #7
0
nw_result mk_multicast_drop(nw_ep local_ep, nw_address_1 rem_addr_1,
			    nw_address_2 rem_addr_2, nw_ep remote_ep) {
  nw_result rc;
  nw_pv_t pv;

  nw_lock();
  if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
    rc = NW_BAD_EP;
  } else {
    while (pv != NULL && pv->owner != current_task())
      pv = pv->next;
    if (pv == NULL) {
      rc = NW_PROT_VIOLATION;
    } else {
      rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->drop))
	      (local_ep, rem_addr_1, rem_addr_2, remote_ep);
      if (rc == NW_SYNCH) {
	hect[local_ep].sig_waiter = current_thread();
	assert_wait(0, TRUE);
	current_thread()->nw_ep_waited = NULL;
	simple_unlock(&nw_simple_lock);
	thread_block(mk_return);
      }
    }
  }
  nw_unlock();
  return rc;
}
Beispiel #8
0
/*
 * Wait for all requested invocations of a thread call prior to now
 * to finish.  Can only be invoked on thread calls whose storage we manage.  
 * Just waits for the finish count to catch up to the submit count we find
 * at the beginning of our wait.
 */
static void
thread_call_wait_locked(thread_call_t call)
{
	uint64_t submit_count;
	wait_result_t res;

	assert(call->tc_flags & THREAD_CALL_ALLOC);

	submit_count = call->tc_submit_count;

	while (call->tc_finish_count < submit_count) {
		call->tc_flags |= THREAD_CALL_WAIT;

		res = assert_wait(call, THREAD_UNINT);
		if (res != THREAD_WAITING) {
			panic("Unable to assert wait?");
		}

		thread_call_unlock();
		(void) spllo();

		res = thread_block(NULL);
		if (res != THREAD_AWAKENED) {
			panic("Awoken with %d?", res);
		}
	
		(void) splsched();
		thread_call_lock_spin();
	}
}
Beispiel #9
0
nw_result mk_connection_close(nw_ep ep) {
  nw_result rc;
  nw_pv_t pv;

  nw_lock();
  if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
    rc = NW_BAD_EP;
  } else {
    while (pv != NULL && pv->owner != current_task())
      pv = pv->next;
    if (pv == NULL) {
      rc = NW_PROT_VIOLATION;
    } else {
      rc = (*devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->close)
	      (ep);
      if (rc == NW_SYNCH) {
	hect[ep].sig_waiter = current_thread();
	assert_wait(0, TRUE);
	current_thread()->nw_ep_waited = NULL;
	simple_unlock(&nw_simple_lock);
	thread_block(mk_return);
      }
    }
  }
  nw_unlock();
  return rc;
}
Beispiel #10
0
/*
 *	thread_stack_daemon:
 *
 *	Perform stack allocation as required due to
 *	invoke failures.
 */
static void
thread_stack_daemon(void)
{
	thread_t		thread;

	simple_lock(&thread_stack_lock);

	while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
		simple_unlock(&thread_stack_lock);

		stack_alloc(thread);
		
		(void)splsched();
		thread_lock(thread);
		thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
		thread_unlock(thread);
		(void)spllo();

		simple_lock(&thread_stack_lock);
	}

	assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
	simple_unlock(&thread_stack_lock);

	thread_block((thread_continue_t)thread_stack_daemon);
	/*NOTREACHED*/
}
bool DldIODataQueue::waitForData()
{
    
    assert( preemption_enabled() );
    
    bool wait;
    
    IOLockLock( this->lock );
    {// start of the lock
        
        //
        // wait if the queue is empty
        //
        wait = ( this->dataQueue->head == this->dataQueue->tail );

        if( wait && THREAD_WAITING != assert_wait( this->dataQueue, THREAD_UNINT ) )
            wait = false;
        
    }// end of the lock
    IOLockUnlock( this->lock );
    
    if( wait )
        thread_block( THREAD_CONTINUE_NULL );
    
    return true;
}
Beispiel #12
0
/* virtual */ void IOWorkLoop::threadMain()
{
restartThread:
    do {
	if ( !runEventSources() )
	    goto exitThread;

	IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock);
        if ( !ISSETP(&fFlags, kLoopTerminate) && !workToDo) {
	    assert_wait((void *) &workToDo, false);
	    IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
	    thread_continue_t cptr = NULL;
	    if (!reserved || !(kPreciousStack & reserved->options))
		cptr = OSMemberFunctionCast(
			thread_continue_t, this, &IOWorkLoop::threadMain);
	    thread_block_parameter(cptr, this);
	    goto restartThread;
	    /* NOTREACHED */
	}

	// At this point we either have work to do or we need
	// to commit suicide.  But no matter 
	// Clear the simple lock and retore the interrupt state
	IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
    } while(workToDo);

exitThread:
	thread_t thread = workThread;
    workThread = 0;	// Say we don't have a loop and free ourselves
    free();

	thread_deallocate(thread);
    (void) thread_terminate(thread);
}
Beispiel #13
0
void
action_thread(void)
{
	register processor_t	processor;
	spl_t			s;

	thread_swappable(current_act(), FALSE);

	while (TRUE) {
		s = splsched();
		simple_lock(&action_lock);
		while ( !queue_empty(&action_queue)) {
			processor = (processor_t) queue_first(&action_queue);
			queue_remove(&action_queue, processor, processor_t,
				     processor_queue);
			simple_unlock(&action_lock);
			splx(s);

			processor_doaction(processor);

			s = splsched();
			simple_lock(&action_lock);
		}

		assert_wait((event_t) &action_queue, FALSE);
		simple_unlock(&action_lock);
		splx(s);
		counter(c_action_thread_block++);
		thread_block((void (*)(void)) 0);
	}
}
Beispiel #14
0
/*
 * Routine:	lck_rw_sleep
 */
wait_result_t
lck_rw_sleep(
        lck_rw_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible)
{
	wait_result_t	res;
	lck_rw_type_t	lck_rw_type;
	thread_t		thread = current_thread();

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		/*
		 * Although we are dropping the RW lock, the intent in most cases
		 * is that this thread remains as an observer, since it may hold
		 * some secondary resource, but must yield to avoid deadlock. In
		 * this situation, make sure that the thread is boosted to the
		 * RW lock ceiling while blocked, so that it can re-acquire the
		 * RW lock at that priority.
		 */
		thread->rwlock_count++;
	}

	res = assert_wait(event, interruptible);
	if (res == THREAD_WAITING) {
		lck_rw_type = lck_rw_done(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
				lck_rw_lock(lck, lck_rw_type);
			else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
				lck_rw_lock_exclusive(lck);
			else
				lck_rw_lock_shared(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		(void)lck_rw_done(lck);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
			/* sched_flags checked without lock, but will be rechecked while clearing */

			/* Only if the caller wanted the lck_rw_t returned unlocked should we drop to 0 */
			assert(lck_sleep_action & LCK_SLEEP_UNLOCK);

			lck_rw_clear_promotion(thread);
		}
	}

	return res;
}
Beispiel #15
0
/*
 * Routine:	lck_mtx_sleep
 */
wait_result_t
lck_mtx_sleep(
        lck_mtx_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible)
{
	wait_result_t	res;
	thread_t		thread = current_thread();
 
	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
		     VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0);

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		/*
		 * We overload the RW lock promotion to give us a priority ceiling
		 * during the time that this thread is asleep, so that when it
		 * is re-awakened (and not yet contending on the mutex), it is
		 * runnable at a reasonably high priority.
		 */
		thread->rwlock_count++;
	}

	res = assert_wait(event, interruptible);
	if (res == THREAD_WAITING) {
		lck_mtx_unlock(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if ((lck_sleep_action & LCK_SLEEP_SPIN))
				lck_mtx_lock_spin(lck);
			else if ((lck_sleep_action & LCK_SLEEP_SPIN_ALWAYS))
				lck_mtx_lock_spin_always(lck);
			else
				lck_mtx_lock(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		lck_mtx_unlock(lck);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
			/* sched_flags checked without lock, but will be rechecked while clearing */
			lck_rw_clear_promotion(thread);
		}
	}

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);

	return res;
}
Beispiel #16
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t			thread;
	task_t				task;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		task->total_system_time += timer_grab(&thread->system_timer);

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;

		/* 
		 * If the task is being halted, and there is only one thread
		 * left in the task after this one, then wakeup that thread.
		 */
		if (task->thread_count == 1 && task->halting)
			thread_wakeup((event_t)&task->halting);

		task_unlock(task);

		lck_mtx_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		lck_mtx_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Beispiel #17
0
/*
 * If a copy is currently being created, wait for it.
 */
xmm_obj_t
svm_get_stable_copy(
	xmm_obj_t	mobj)
{
	assert(xmm_obj_lock_held(mobj));
	while (MOBJ->copy_in_progress) {
		MOBJ->copy_wanted = TRUE;
		assert_wait(svm_copy_event(mobj), FALSE);
		xmm_obj_unlock(mobj);
		thread_block((void (*)(void)) 0);
		xmm_obj_lock(mobj);
	}
	return MOBJ->copy;
}
Beispiel #18
0
/*
 * thread_stop_freeze
 *	Block the thread in the kernel and freeze the processor set.
 * return value:
 *	TRUE - the thread has blocked interruptibly, is stopped, and
 *		the processor set assignment is frozen
 *	FALSE - the thread is no longer in the processor set, so it
 *		isn't stopped, and the processor set assignment
 *		is released.
 */
int
thread_stop_freeze( thread_t thread, processor_set_t pset )
{
	thread_act_t	thr_act;
	spl_t	s;

	/*
	 * hold it, and wait for it to stop.
	 */
	thr_act = thread_lock_act(thread);
	thread_hold(thr_act);
	act_unlock_thread(thr_act);

	thread_stop(thread);

	s = splsched();
	wake_lock(thread);
        while( thread->state & (TH_RUN|TH_UNINT) ) {
                thread->wake_active = TRUE;
                assert_wait((event_t)&thread->wake_active, FALSE);
                wake_unlock(thread);
                splx(s);
                thread_block( (void (*)(void)) 0 );
                (void) splsched();
                wake_lock(thread);
        }

	/*
	 * Now, the thread has blocked uninterruptibly; freeze the 
	 * assignment and make sure it's still part of the processor set.
	 */
	wake_unlock(thread);
	thread_freeze(thread);
	thread_lock(thread);

	/*
	 * if the processor set has changed, release the freeze and
	 * then unstop it.
	 */
	if( thread->processor_set != pset ) {
		thread_unlock(thread);
		splx(s);
		thread_unfreeze(thread);
		thread_unstop(thread);
		return FALSE;
	}
	thread_unlock(thread);
	splx(s);
	return TRUE;
}
Beispiel #19
0
nw_result mk_connection_open_internal(nw_ep local_ep, nw_address_1 rem_addr_1,
               		      nw_address_2 rem_addr_2, nw_ep remote_ep) {
  nw_result rc;
  
  rc = (*devct[NW_DEVICE(rem_addr_1)].entry->open) (local_ep,
						    rem_addr_1, rem_addr_2,
						    remote_ep);
  if (rc == NW_SYNCH) {
    hect[local_ep].sig_waiter = current_thread();
    assert_wait(0, TRUE);
    simple_unlock(&nw_simple_lock);
    thread_block((void (*)()) 0);
  }
  return rc;
}
static void
timer_call_remove_cyclic(cyclic_id_t cyclic)
{
	wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;

	while (!timer_call_cancel(&(wrapTC->call))) {
		int ret = assert_wait(wrapTC, THREAD_UNINT);
		ASSERT(ret == THREAD_WAITING);

		wrapTC->when.cyt_interval = WAKEUP_REAPER;

		ret = thread_block(THREAD_CONTINUE_NULL);
		ASSERT(ret == THREAD_AWAKENED);
	}
}
Beispiel #21
0
/* thread for calling back to a compressor's memory allocator
 * Needed for Digital UNIX since it's VM can't handle requests
 * for large amounts of memory without blocking.  The thread
 * provides a context in which we can call a memory allocator
 * that may block.
 */
static void
ppp_comp_alloc(comp_state_t *cp)
{
    int len, cmd;
    unsigned char *compressor_options;
    thread_t thread;
    void *(*comp_allocator)();


#if defined(MAJOR_VERSION) && (MAJOR_VERSION <= 2)

    /* In 2.x and earlier the argument gets passed
     * in the thread structure itself.  Yuck.
     */
    thread = current_thread();
    cp = thread->reply_port;
    thread->reply_port = PORT_NULL;

#endif

    for (;;) {
	assert_wait((vm_offset_t)&cp->memreq.thread_status, TRUE);
	thread_block();

	if (thread_should_halt(current_thread()))
	    thread_halt_self();
	cmd = cp->memreq.cmd;
	compressor_options = &cp->memreq.comp_opts[0];
	len = compressor_options[1];
	if (cmd == PPPIO_XCOMP) {
	    cp->memreq.returned_mem = cp->xcomp->comp_alloc(compressor_options, len);
	    if (!cp->memreq.returned_mem) {
		cp->memreq.thread_status = ENOSR;
	    } else {
		cp->memreq.thread_status = 0;
	    }
	} else {
	    cp->memreq.returned_mem = cp->rcomp->decomp_alloc(compressor_options, len);
	    if (!cp->memreq.returned_mem) {
	        cp->memreq.thread_status = ENOSR;
	    } else {
		cp->memreq.thread_status = 0;
	    }
	}
    }
}
Beispiel #22
0
void
afs_osi_Sleep(void *event)
{
    struct afs_event *evp;
    int seq;

    evp = afs_getevent(event);
    seq = evp->seq;
    while (seq == evp->seq) {
	AFS_ASSERT_GLOCK();
	assert_wait((vm_offset_t) (&evp->cond), 0);
	AFS_GUNLOCK();
	thread_block();
	AFS_GLOCK();
    }
    relevent(evp);
}
Beispiel #23
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t			thread;
	task_t				task;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		task->total_system_time += timer_grab(&thread->system_timer);

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;
		task_unlock(task);

		mutex_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		mutex_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Beispiel #24
0
/*
 * User-trappable
 */
kern_return_t evc_wait(natural_t ev_id)
{
	spl_t		s;
	kern_return_t	ret;
	evc_t		ev;

	if ((ev_id >= MAX_EVCS) ||
	    ((ev = all_eventcounters[ev_id]) == 0) ||
	    (ev->ev_id != ev_id) || (ev->sanity != ev))
		return KERN_INVALID_ARGUMENT;

	s = splsched();
	simple_lock(&ev->lock);
		/*
		 * The values assumed by the "count" field are
		 * as follows:
		 *	0	At initialization time, and with no
		 *		waiting thread means no events pending;
		 *		with waiting thread means the event
		 *		was signalled and the thread not yet resumed
		 *	-1	no events, there must be a waiting thread
		 *	N>0	no waiting thread means N pending,
		 *		with waiting thread N-1 pending.
		 *	
		 */
		if (ev->count > 0) {
			ev->count--;
			ret = KERN_SUCCESS;
		} else {
			if (ev->waiting_thread == THREAD_NULL) {
				ev->count--;
				ev->waiting_thread = current_thread();
				assert_wait((event_t) 0, TRUE);	/* ifnot race */
				simple_unlock(&ev->lock);
				thread_block(evc_continue);
				return KERN_SUCCESS;
			}
			ret = KERN_NO_SPACE; /* XX */
		}
	simple_unlock(&ev->lock);
	splx(s);
	return ret;
}
IOReturn IOFWSyncer::wait(bool autoRelease)
{
    IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock);

    if (threadMustStop) {
	assert_wait((void *) &threadMustStop, false);
    	IOSimpleLockUnlockEnableInterrupt(guardLock, is);
        thread_block(THREAD_CONTINUE_NULL);
    }
    else
        IOSimpleLockUnlockEnableInterrupt(guardLock, is);

    IOReturn result = fResult;	// Pick up before auto deleting!

    if(autoRelease)
	release();

    return result;
}
Beispiel #26
0
/*
 * If xmm_object is being terminated, wait for it.
 */
void
xmm_object_wait(
	ipc_port_t	memory_object)
{
	while (! IP_WAS_REMOTE(memory_object) &&
	       ((int)memory_object->ip_norma_xmm_object | 1) ==
	       ((int)memory_object | 1)) {
		/*
		 * Use the low bit to flag someone is waiting
		 */
		memory_object->ip_norma_xmm_object = (ipc_port_t)
			((int)memory_object->ip_norma_xmm_object | 1);

		assert_wait((event_t) &memory_object->ip_norma_xmm_object, FALSE);
		ip_unlock(memory_object);
		thread_block((void (*)(void)) 0);
		ip_lock(memory_object);
	}
}
Beispiel #27
0
void
lck_rw_lock_shared_gen(lck_rw_t	*lck)
{
	int		i;
	wait_result_t      res;
    
	lck_rw_ilk_lock(lck);

	while ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
           ((lck->lck_rw_shared_count == 0) || (lck->lck_rw_priv_excl))) {
		i = lock_wait_time[1];
        
		KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
                     (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, i, 0);
        
		if (i != 0) {
			lck_rw_ilk_unlock(lck);
			while (--i != 0 &&
			       (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
			       ((lck->lck_rw_shared_count == 0) || (lck->lck_rw_priv_excl)))
				continue;
			lck_rw_ilk_lock(lck);
		}
        
		if ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
		    ((lck->lck_rw_shared_count == 0) || (lck->lck_rw_priv_excl))) {
			lck->lck_rw_waiting = TRUE;
			res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
			if (res == THREAD_WAITING) {
				lck_rw_ilk_unlock(lck);
				res = thread_block(THREAD_CONTINUE_NULL);
				lck_rw_ilk_lock(lck);
			}
		}
		KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
                     (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, res, 0);
	}
    
	lck->lck_rw_shared_count++;
    
	lck_rw_ilk_unlock(lck);
}
Beispiel #28
0
static boolean_t
proc_init_wqptr_or_wait(struct proc *p) {
	proc_lock(p);

	if (p->p_wqptr == NULL){
		p->p_wqptr = WQPTR_IS_INITING_VALUE;
		proc_unlock(p);

		return TRUE;
	} else if (p->p_wqptr == WQPTR_IS_INITING_VALUE){
		assert_wait(&p->p_wqptr, THREAD_UNINT);
		proc_unlock(p);
		thread_block(THREAD_CONTINUE_NULL);

		return FALSE;
	} else {
		proc_unlock(p);

		return FALSE;
	}
}
Beispiel #29
0
/*
 * Finds a virtual address in the reserved range to map the vm_page.
 * It may block waiting for free entries in the reserved range only
 * if can_block is TRUE.
 */
vm_offset_t kkt_find_mapping(vm_page_t m, vm_offset_t offset, boolean_t can_block)
{
  unsigned int ind_range;
  vm_offset_t virt_addr;

  if (kkt_virt_free_index == 0) {
    if (can_block) {
      simple_lock(&kkt_virt_lock);
      while (kkt_virt_free_index == 0) {
	simple_unlock(&kkt_virt_lock);
	assert_wait((event_t) kkt_virt_status, FALSE);
	thread_block((void (*)(void)) 0);
	simple_lock(&kkt_virt_lock);
      }
      simple_unlock(&kkt_virt_lock);
    }
    else {
      /* XXX we cannot block because of locks held */
      panic("kkt_find_mapping: virtual range full and cannot block\n");
    }
  }

  virt_addr = (vm_offset_t) 0;

  simple_lock(&kkt_virt_lock);
  for (ind_range = 0; ind_range < KKT_VIRT_SIZE; ind_range++) {
    if (kkt_virt_status[ind_range] == FREE_VADDR) {
      kkt_virt_status[ind_range] = MAP_VADDR;
      kkt_virt_vmp[ind_range] = m;
      kkt_virt_free_index--;
      virt_addr = kkt_virt_start_vaddr + (ind_range * PAGE_SIZE);
      break;
    }
  }
  simple_unlock(&kkt_virt_lock);

  assert(virt_addr);

  return(virt_addr + offset);
}
Beispiel #30
0
nw_result mk_connection_accept(nw_ep ep, nw_buffer_t msg,
			       nw_ep_t new_epp) {
  nw_result rc;
  nw_pv_t pv;

  nw_lock();
  if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
    rc = NW_BAD_EP;
  } else {
    while (pv != NULL && pv->owner != current_task())
      pv = pv->next;
    if (pv == NULL) {
      rc = NW_PROT_VIOLATION;
    } else if ((char *) msg < pv->buf_start ||
	       (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
	       !msg->buf_used ||
	       (char *) msg + msg->buf_length > pv->buf_end) {
      rc = NW_BAD_BUFFER;
    } else if (new_epp != NULL &&
	       (invalid_user_access(current_task()->map, (vm_offset_t) new_epp,
				   (vm_offset_t) new_epp + sizeof(nw_ep) - 1,
				   VM_PROT_READ | VM_PROT_WRITE) ||
		(*new_epp != 0 && *new_epp != ep))) {
      rc = NW_INVALID_ARGUMENT;
    } else {
      rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->accept))
	      (ep, msg, new_epp);
      if (rc == NW_SYNCH) {
	hect[ep].sig_waiter = current_thread();
	assert_wait(0, TRUE);
	current_thread()->nw_ep_waited = NULL;
	simple_unlock(&nw_simple_lock);
	thread_block(mk_return);
      }
    }
  }
  nw_unlock();
  return rc;
}