Esempio n. 1
0
/*
 * Close the tty.
 * Tty must be locked (at spltty).
 * Iff modem control should run on master.
 */
void ttyclose(
	register struct tty *tp)
{
	register io_req_t	ior;

	/*
	 * Flush the read and write queues.  Signal
	 * the open queue so that those waiting for open
	 * to complete will see that the tty is closed.
	 */
	while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_read)) != 0) {
	    ior->io_done = tty_close_read_reply;
	    iodone(ior);
	}
	while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_write)) != 0) {
	    ior->io_done = tty_close_write_reply;
	    iodone(ior);
	}
	while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_open)) != 0) {
	    ior->io_done = tty_close_open_reply;
	    iodone(ior);
	}

	/* Close down modem */
	if (tp->t_mctl) {
		(*tp->t_mctl)(tp, TM_BRK|TM_RTS, DMBIC);
		if ((tp->t_state&(TS_HUPCLS|TS_WOPEN)) || (tp->t_state&TS_ISOPEN)==0)
			(*tp->t_mctl)(tp, TM_HUP, DMSET);
	}

	/* only save buffering bit, and carrier */
	tp->t_state = tp->t_state & (TS_MIN|TS_CARR_ON);
}
Esempio n. 2
0
static unsigned int fq_codel_drop(struct Qdisc *sch)
{
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
	unsigned int maxbacklog = 0, idx = 0, i, len;
	struct fq_codel_flow *flow;

	/* Queue is full! Find the fat flow and drop packet from it.
	 * This might sound expensive, but with 1024 flows, we scan
	 * 4KB of memory, and we dont need to handle a complex tree
	 * in fast path (packet queue/enqueue) with many cache misses.
	 */
	for (i = 0; i < q->flows_cnt; i++) {
		if (q->backlogs[i] > maxbacklog) {
			maxbacklog = q->backlogs[i];
			idx = i;
		}
	}
	flow = &q->flows[idx];
	skb = dequeue_head(flow);
	len = qdisc_pkt_len(skb);
	q->backlogs[idx] -= len;
	kfree_skb(skb);
	sch->q.qlen--;
	qdisc_qstats_drop(sch);
	qdisc_qstats_backlog_dec(sch, skb);
	flow->dropped++;
	return idx;
}
Esempio n. 3
0
/*
 *	swapin_thread: [exported]
 *
 *	This procedure executes as a kernel thread.  Threads that need to
 *	be swapped in are swapped in by this thread.
 */
void __attribute__((noreturn)) swapin_thread_continue(void)
{
	for (;;) {
		thread_t thread;
		spl_t s;

		s = splsched();
		swapper_lock();

		while ((thread = (thread_t) dequeue_head(&swapin_queue))
							!= THREAD_NULL) {
			kern_return_t kr;
			swapper_unlock();
			(void) splx(s);

			kr = thread_doswapin(thread);		/* may block */

			s = splsched();
			swapper_lock();

			if (kr != KERN_SUCCESS) {
				enqueue_head(&swapin_queue,
					     (queue_entry_t) thread);
				break;
			}
		}

		assert_wait((event_t) &swapin_queue, FALSE);
		swapper_unlock();
		(void) splx(s);
		counter(c_swapin_thread_block++);
		thread_block(swapin_thread_continue);
	}
}
Esempio n. 4
0
/*
 *	thread_stack_daemon:
 *
 *	Perform stack allocation as required due to
 *	invoke failures.
 */
static void
thread_stack_daemon(void)
{
	thread_t		thread;

	simple_lock(&thread_stack_lock);

	while ((thread = (thread_t)dequeue_head(&thread_stack_queue)) != THREAD_NULL) {
		simple_unlock(&thread_stack_lock);

		stack_alloc(thread);
		
		(void)splsched();
		thread_lock(thread);
		thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
		thread_unlock(thread);
		(void)spllo();

		simple_lock(&thread_stack_lock);
	}

	assert_wait((event_t)&thread_stack_queue, THREAD_UNINT);
	simple_unlock(&thread_stack_lock);

	thread_block((thread_continue_t)thread_stack_daemon);
	/*NOTREACHED*/
}
bool
InstanceDataSampleList::dequeue(const DataSampleElement* stale)
{
  if (head_ == 0) {
    return false;
  }

  if (stale == head_) {
    DataSampleElement* tmp;
    return dequeue_head(tmp);
  }

  if (stale == tail_) {
    tail_ = tail_->previous_instance_sample_;
    tail_->next_instance_sample_ = 0;

  } else {
    stale->previous_instance_sample_->next_instance_sample_ =
      stale->next_instance_sample_;
    stale->next_instance_sample_->previous_instance_sample_ =
      stale->previous_instance_sample_;
  }

  stale->next_instance_sample_ = stale->previous_instance_sample_ = 0;
  --size_;
  return true;
}
Esempio n. 6
0
/*
 * Retry delayed IO operations for TTY.
 * TTY containing queue must be locked (at spltty).
 */
void tty_queue_completion(
	register queue_t	qh)
{
	register io_req_t	ior;

	while ((ior = (io_req_t)dequeue_head(qh)) != 0) {
	    iodone(ior);
	}
}
Esempio n. 7
0
/*
 *	sched_traditional_processor_queue_shutdown:
 *
 *	Shutdown a processor run queue by
 *	re-dispatching non-bound threads.
 *
 *	Associated pset must be locked, and is
 *	returned unlocked.
 */
static void
sched_traditional_processor_queue_shutdown(processor_t processor)
{
	processor_set_t         pset    = processor->processor_set;
	run_queue_t             rq      = runq_for_processor(processor);
	queue_t                 queue   = rq->queues + rq->highq;
	int                     pri     = rq->highq;
	int                     count   = rq->count;
	thread_t                next, thread;
	queue_head_t            tqueue;

	queue_init(&tqueue);

	while (count > 0) {
		thread = (thread_t)(uintptr_t)queue_first(queue);
		while (!queue_end(queue, (queue_entry_t)thread)) {
			next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);

			if (thread->bound_processor == PROCESSOR_NULL) {
				remqueue((queue_entry_t)thread);

				thread->runq = PROCESSOR_NULL;
				SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
				runq_consider_decr_bound_count(processor, thread);
				rq->count--;
				if (SCHED(priority_is_urgent)(pri)) {
					rq->urgency--; assert(rq->urgency >= 0);
				}
				if (queue_empty(queue)) {
					bitmap_clear(rq->bitmap, pri);
					rq->highq = bitmap_first(rq->bitmap, NRQS);
				}

				enqueue_tail(&tqueue, (queue_entry_t)thread);
			}
			count--;

			thread = next;
		}

		queue--; pri--;
	}

	pset_unlock(pset);

	while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) {
		thread_lock(thread);

		thread_setrun(thread, SCHED_TAILQ);

		thread_unlock(thread);
	}
}
Esempio n. 8
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t			thread;
	task_t				task;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		task->total_system_time += timer_grab(&thread->system_timer);

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;

		/* 
		 * If the task is being halted, and there is only one thread
		 * left in the task after this one, then wakeup that thread.
		 */
		if (task->thread_count == 1 && task->halting)
			thread_wakeup((event_t)&task->halting);

		task_unlock(task);

		lck_mtx_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		lck_mtx_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Esempio n. 9
0
/*
 *	_internal_call_allocate:
 *
 *	Allocate an internal callout entry.
 *
 *	Called with thread_call_lock held.
 */
static __inline__ thread_call_t
_internal_call_allocate(void)
{
    thread_call_t		call;
    
    if (queue_empty(&thread_call_internal_queue))
    	panic("_internal_call_allocate");
	
    call = TC(dequeue_head(&thread_call_internal_queue));
    
    return (call);
}
Esempio n. 10
0
/* This is the specific function called from codel_dequeue()
 * to dequeue a packet from queue. Note: backlog is handled in
 * codel, we dont need to reduce it here.
 */
static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
{
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	struct fq_codel_flow *flow;
	struct sk_buff *skb = NULL;

	flow = container_of(vars, struct fq_codel_flow, cvars);
	if (flow->head) {
		skb = dequeue_head(flow);
		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
		sch->q.qlen--;
	}
	return skb;
}
Esempio n. 11
0
/* This is the specific function called from codel_dequeue()
 * to dequeue a packet from queue. Note: backlog is handled in
 * codel, we dont need to reduce it here.
 */
static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{
	struct Qdisc *sch = ctx;
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	struct fq_codel_flow *flow;
	struct sk_buff *skb = NULL;

	flow = container_of(vars, struct fq_codel_flow, cvars);
	if (flow->head) {
		skb = dequeue_head(flow);
		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
		q->memory_usage -= get_codel_cb(skb)->mem_usage;
		sch->q.qlen--;
		sch->qstats.backlog -= qdisc_pkt_len(skb);
	}
	return skb;
}
Esempio n. 12
0
bool
SendStateDataSampleList::dequeue(const DataSampleElement* stale)
{
  if (head_ == 0) {
    return false;
  }

  // Same as dequeue from head.
  if (stale == head_) {
    DataSampleElement* tmp = head_;
    return dequeue_head(tmp);
  }

  // Search from head_->next_send_sample_.
  DataSampleElement* toRemove = 0;
  for (DataSampleElement* item = head_->next_send_sample_;
       item != 0 && toRemove == 0;
       item = item->next_send_sample_) {
    if (item == stale) {
      toRemove = item;
    }
  }

  if (toRemove) {
    size_ --;
    // Remove from the previous element.
    toRemove->previous_send_sample_->next_send_sample_ = toRemove->next_send_sample_ ;

    // Remove from the next element.
    if (toRemove->next_send_sample_ != 0) {
      // Remove from the inside of the list.
      toRemove->next_send_sample_->previous_send_sample_ = toRemove->previous_send_sample_ ;

    } else {
      toRemove->previous_send_sample_->next_send_sample_ = 0;
      // Remove from the tail of the list.
      tail_ = toRemove->previous_send_sample_ ;
    }

    toRemove->next_send_sample_ = 0;
    toRemove->previous_send_sample_ = 0;
  }

  return toRemove;
}
Esempio n. 13
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t			thread;
	task_t				task;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		task->total_system_time += timer_grab(&thread->system_timer);

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;
		task_unlock(task);

		mutex_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		mutex_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Esempio n. 14
0
static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
				  struct sk_buff **to_free)
{
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
	unsigned int maxbacklog = 0, idx = 0, i, len;
	struct fq_codel_flow *flow;
	unsigned int threshold;
	unsigned int mem = 0;

	/* Queue is full! Find the fat flow and drop packet(s) from it.
	 * This might sound expensive, but with 1024 flows, we scan
	 * 4KB of memory, and we dont need to handle a complex tree
	 * in fast path (packet queue/enqueue) with many cache misses.
	 * In stress mode, we'll try to drop 64 packets from the flow,
	 * amortizing this linear lookup to one cache line per drop.
	 */
	for (i = 0; i < q->flows_cnt; i++) {
		if (q->backlogs[i] > maxbacklog) {
			maxbacklog = q->backlogs[i];
			idx = i;
		}
	}

	/* Our goal is to drop half of this fat flow backlog */
	threshold = maxbacklog >> 1;

	flow = &q->flows[idx];
	len = 0;
	i = 0;
	do {
		skb = dequeue_head(flow);
		len += qdisc_pkt_len(skb);
		mem += get_codel_cb(skb)->mem_usage;
		__qdisc_drop(skb, to_free);
	} while (++i < max_packets && len < threshold);

	flow->dropped += i;
	q->backlogs[idx] -= len;
	q->memory_usage -= mem;
	sch->qstats.drops += i;
	sch->qstats.backlog -= len;
	sch->q.qlen -= i;
	return idx;
}
Esempio n. 15
0
static void fq_codel_reset(struct Qdisc *sch)
{
	struct fq_codel_sched_data *q = qdisc_priv(sch);
	int i;

	INIT_LIST_HEAD(&q->new_flows);
	INIT_LIST_HEAD(&q->old_flows);
	for (i = 0; i < q->flows_cnt; i++) {
		struct fq_codel_flow *flow = q->flows + i;

		while (flow->head) {
			struct sk_buff *skb = dequeue_head(flow);

			qdisc_qstats_backlog_dec(sch, skb);
			kfree_skb(skb);
		}

		INIT_LIST_HEAD(&flow->flowchain);
		codel_vars_init(&flow->cvars);
	}
	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
	sch->q.qlen = 0;
}
Esempio n. 16
0
bool
WriterDataSampleList::dequeue(const DataSampleElement* stale)
{
  if (head_ == 0) {
    return false;
  }

  if (stale == head_) {
    DataSampleElement* head = head_;
    return dequeue_head(head);
  }

  // Search from head_->next_writer_sample_.
  bool found = false;

  for (DataSampleElement* item = head_->next_writer_sample_ ;
       item != 0 ;
       item = item->next_writer_sample_) {
    if (item == stale) {
      found = true;
      break;
    }
  }

  if (found) {
    // Adjust list size.
    -- size_ ;

    //
    // Remove from the previous element.
    //
    if (stale->previous_writer_sample_ != 0) {
      // Remove from inside of the list.
      stale->previous_writer_sample_->next_writer_sample_ = stale->next_writer_sample_ ;

    } else {
      // Remove from the head of the list.
      head_ = stale->next_writer_sample_ ;

      if (head_ != 0) {
        head_->previous_writer_sample_ = 0;
      }
    }

    //
    // Remove from the next element.
    //
    if (stale->next_writer_sample_ != 0) {
      // Remove the inside of the list.
      stale->next_writer_sample_->previous_writer_sample_ = stale->previous_writer_sample_ ;

    } else {
      // Remove from the tail of the list.
      tail_ = stale->previous_writer_sample_ ;
    }

    stale->next_writer_sample_ = 0;
    stale->previous_writer_sample_ = 0;
  }

  return found;
}
Esempio n. 17
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
	thread_call_group_t		group)
{
	thread_t		self = current_thread();

	(void) splsched();
	thread_call_lock_spin();

	thread_sched_call(self, sched_call_thread);

    while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->func;
		param0 = call->param0;
		param1 = call->param1;
	
		call->queue = NULL;

		_internal_call_release(call);

		thread_call_unlock();
		(void) spllo();

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				func, param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
				  pl, func, param0, param1);
		}
		
		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) splsched();
		thread_call_lock_spin();
    }

	thread_sched_call(self, NULL);
	group->active_count--;

    if (group->idle_count < thread_call_thread_min) {
		group->idle_count++;

		wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0);
	
		thread_call_unlock();
		(void) spllo();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
		/* NOTREACHED */
    }

    thread_call_unlock();
    (void) spllo();
    
    thread_terminate(self);
	/* NOTREACHED */
}
Esempio n. 18
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t	self, thread;
	task_t		task;

	self = current_thread();
	self->options |= TH_OPT_SYSTEM_CRITICAL;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		if (thread->precise_user_kernel_time) {
			task->total_system_time += timer_grab(&thread->system_timer);
		} else {
			task->total_user_time += timer_grab(&thread->system_timer);
		}

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		task->syscalls_unix += thread->syscalls_unix;
		task->syscalls_mach += thread->syscalls_mach;

		task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
		task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;

		/* 
		 * If the task is being halted, and there is only one thread
		 * left in the task after this one, then wakeup that thread.
		 */
		if (task->thread_count == 1 && task->halting)
			thread_wakeup((event_t)&task->halting);

		task_unlock(task);

		lck_mtx_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		lck_mtx_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	self->options &= ~TH_OPT_SYSTEM_CRITICAL;
	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Esempio n. 19
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
		thread_call_group_t		group,
		wait_result_t			wres)
{
	thread_t	self = current_thread();
	boolean_t	canwait;

	/*
	 * A wakeup with THREAD_INTERRUPTED indicates that 
	 * we should terminate.
	 */
	if (wres == THREAD_INTERRUPTED) {
		thread_terminate(self);

		/* NOTREACHED */
		panic("thread_terminate() returned?");
	}

	(void)disable_ints_and_lock();

	thread_sched_call(self, group->sched_call);

	while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->tc_call.func;
		param0 = call->tc_call.param0;
		param1 = call->tc_call.param1;

		call->tc_call.queue = NULL;

		_internal_call_release(call);

		/*
		 * Can only do wakeups for thread calls whose storage
		 * we control.
		 */
		if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) {
			canwait = TRUE;
			call->tc_refs++;	/* Delay free until we're done */
		} else
			canwait = FALSE;

		enable_ints_and_unlock();

		KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
					pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
		}

		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) disable_ints_and_lock();
		
		if (canwait) {
			/* Frees if so desired */
			thread_call_finish(call);
		}
	}

	thread_sched_call(self, NULL);
	group->active_count--;

	if (group_isparallel(group)) {
		/*
		 * For new style of thread group, thread always blocks. 
		 * If we have more than the target number of threads,
		 * and this is the first to block, and it isn't active 
		 * already, set a timer for deallocating a thread if we 
		 * continue to have a surplus.
		 */
		group->idle_count++;

		if (group->idle_count == 1) {
			group->idle_timestamp = mach_absolute_time();
		}   

		if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) &&
				((group->active_count + group->idle_count) > group->target_thread_count)) {
			group->flags |= TCG_DEALLOC_ACTIVE;
			thread_call_start_deallocate_timer(group);
		}   

		/* Wait for more work (or termination) */
		wres = wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTIBLE, 0); 
		if (wres != THREAD_WAITING) {
			panic("kcall worker unable to assert wait?");
		}   

		enable_ints_and_unlock();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
	} else {
		if (group->idle_count < group->target_thread_count) {
			group->idle_count++;

			wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */

			enable_ints_and_unlock();

			thread_block_parameter((thread_continue_t)thread_call_thread, group);
			/* NOTREACHED */
		}
	}

	enable_ints_and_unlock();

	thread_terminate(self);
	/* NOTREACHED */
}