int
_pthread_getschedparam(pthread_t pthread, int *policy, 
	struct sched_param *param)
{
	struct pthread *curthread = _get_curthread();
	int ret, tmp;

	if ((param == NULL) || (policy == NULL))
		/* Return an invalid argument error: */
		ret = EINVAL;
	else if (pthread == curthread) {
		/*
		 * Avoid searching the thread list when it is the current
		 * thread.
		 */
		THR_SCHED_LOCK(curthread, curthread);
		param->sched_priority =
		    THR_BASE_PRIORITY(pthread->base_priority);
		tmp = pthread->attr.sched_policy;
		THR_SCHED_UNLOCK(curthread, curthread);
		*policy = tmp;
		ret = 0;
	}
	/* Find the thread in the list of active threads. */
	else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
	    == 0) {
		THR_SCHED_LOCK(curthread, pthread);
		param->sched_priority =
		    THR_BASE_PRIORITY(pthread->base_priority);
		tmp = pthread->attr.sched_policy;
		THR_SCHED_UNLOCK(curthread, pthread);
		_thr_ref_delete(curthread, pthread);
		*policy = tmp;
	}
	return (ret);
}
示例#2
0
void
_pthread_exit(void *status)
{
	struct pthread *curthread = _get_curthread();
	kse_critical_t crit;
	struct kse *curkse;

	/* Check if this thread is already in the process of exiting: */
	if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
		char msg[128];
		snprintf(msg, sizeof(msg), "Thread %p has called "
		    "pthread_exit() from a destructor. POSIX 1003.1 "
		    "1996 s16.2.5.2 does not allow this!", curthread);
		PANIC(msg);
	}

	/*
	 * Flag this thread as exiting.  Threads should now be prevented
	 * from joining to this thread.
	 */
	THR_SCHED_LOCK(curthread, curthread);
	curthread->flags |= THR_FLAGS_EXITING;
	THR_SCHED_UNLOCK(curthread, curthread);
	
	/*
	 * To avoid signal-lost problem, if signals had already been
	 * delivered to us, handle it. we have already set EXITING flag
	 * so no new signals should be delivered to us.
	 * XXX this is not enough if signal was delivered just before
	 * thread called sigprocmask and masked it! in this case, we
	 * might have to re-post the signal by kill() if the signal
	 * is targeting process (not for a specified thread).
	 * Kernel has same signal-lost problem, a signal may be delivered
	 * to a thread which is on the way to call sigprocmask or thr_exit()!
	 */
	if (curthread->check_pending)
		_thr_sig_check_pending(curthread);
	/* Save the return value: */
	curthread->ret = status;
	while (curthread->cleanup != NULL) {
		_pthread_cleanup_pop(1);
	}
	if (curthread->attr.cleanup_attr != NULL) {
		curthread->attr.cleanup_attr(curthread->attr.arg_attr);
	}
	/* Check if there is thread specific data: */
	if (curthread->specific != NULL) {
		/* Run the thread-specific data destructors: */
		_thread_cleanupspecific();
	}
	if (!_kse_isthreaded())
		exit(0);
	crit = _kse_critical_enter();
	curkse = _get_curkse();
	KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
	/* Use thread_list_lock */
	_thread_active_threads--;
	if ((_thread_scope_system <= 0 && _thread_active_threads == 1) ||
	    (_thread_scope_system > 0 && _thread_active_threads == 0)) {
		KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
		_kse_critical_leave(crit);
		exit(0);
		/* Never reach! */
	}
	KSE_LOCK_RELEASE(curkse, &_thread_list_lock);

	/* This thread will never be re-scheduled. */
	KSE_LOCK(curkse);
	THR_SET_STATE(curthread, PS_DEAD);
	_thr_sched_switch_unlocked(curthread);
	/* Never reach! */

	/* This point should not be reached. */
	PANIC("Dead thread has resumed");
}
示例#3
0
int
_pthread_cancel(pthread_t pthread)
{
	struct pthread *curthread = _get_curthread();
	struct pthread *joinee = NULL;
	struct kse_mailbox *kmbx = NULL;
	int ret;

	if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
		/*
		 * Take the thread's lock while we change the cancel flags.
		 */
		THR_THREAD_LOCK(curthread, pthread);
		THR_SCHED_LOCK(curthread, pthread);
		if (pthread->flags & THR_FLAGS_EXITING) {
			THR_SCHED_UNLOCK(curthread, pthread);
			THR_THREAD_UNLOCK(curthread, pthread);
			_thr_ref_delete(curthread, pthread);
			return (ESRCH);
		}
		if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) ||
		    (((pthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
		    ((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)))
			/* Just mark it for cancellation: */
			pthread->cancelflags |= THR_CANCELLING;
		else {
			/*
			 * Check if we need to kick it back into the
			 * run queue:
			 */
			switch (pthread->state) {
			case PS_RUNNING:
				/* No need to resume: */
				pthread->cancelflags |= THR_CANCELLING;
				break;

			case PS_LOCKWAIT:
				/*
				 * These can't be removed from the queue.
				 * Just mark it as cancelling and tell it
				 * to yield once it leaves the critical
				 * region.
				 */
				pthread->cancelflags |= THR_CANCELLING;
				pthread->critical_yield = 1;
				break;

			case PS_SLEEP_WAIT:
			case PS_SIGSUSPEND:
			case PS_SIGWAIT:
				/* Interrupt and resume: */
				pthread->interrupted = 1;
				pthread->cancelflags |= THR_CANCELLING;
				kmbx = _thr_setrunnable_unlocked(pthread);
				break;

			case PS_JOIN:
				/* Disconnect the thread from the joinee: */
				joinee = pthread->join_status.thread;
				pthread->join_status.thread = NULL;
				pthread->cancelflags |= THR_CANCELLING;
				kmbx = _thr_setrunnable_unlocked(pthread);
				if ((joinee != NULL) &&
				    (pthread->kseg == joinee->kseg)) {
					/* Remove the joiner from the joinee. */
					joinee->joiner = NULL;
					joinee = NULL;
				}
				break;

			case PS_SUSPENDED:
			case PS_MUTEX_WAIT:
			case PS_COND_WAIT:
				/*
				 * Threads in these states may be in queues.
				 * In order to preserve queue integrity, the
				 * cancelled thread must remove itself from the
				 * queue.  Mark the thread as interrupted and
				 * needing cancellation, and set the state to
				 * running.  When the thread resumes, it will
				 * remove itself from the queue and call the
				 * cancellation completion routine.
				 */
				pthread->interrupted = 1;
				pthread->cancelflags |= THR_CANCEL_NEEDED;
				kmbx = _thr_setrunnable_unlocked(pthread);
				pthread->continuation =
					_thr_finish_cancellation;
				break;

			case PS_DEAD:
			case PS_DEADLOCK:
			case PS_STATE_MAX:
				/* Ignore - only here to silence -Wall: */
				break;
			}
			if ((pthread->cancelflags & THR_AT_CANCEL_POINT) &&
			    (pthread->blocked != 0 ||
			     pthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
				kse_thr_interrupt(&pthread->tcb->tcb_tmbx,
					KSE_INTR_INTERRUPT, 0);
		}

		/*
		 * Release the thread's lock and remove the
		 * reference:
		 */
		THR_SCHED_UNLOCK(curthread, pthread);
		THR_THREAD_UNLOCK(curthread, pthread);
		_thr_ref_delete(curthread, pthread);
		if (kmbx != NULL)
			kse_wakeup(kmbx);

		if ((joinee != NULL) &&
		    (_thr_ref_add(curthread, joinee, /* include dead */1) == 0)) {
			/* Remove the joiner from the joinee. */
			THR_SCHED_LOCK(curthread, joinee);
			joinee->joiner = NULL;
			THR_SCHED_UNLOCK(curthread, joinee);
			_thr_ref_delete(curthread, joinee);
		}
	}
	return (ret);
}
示例#4
0
int
_pthread_join(pthread_t pthread, void **thread_return)
{
	struct pthread *curthread = _get_curthread();
	void *tmp;
	kse_critical_t crit;
	int ret = 0;
 
	_thr_cancel_enter(curthread);

	/* Check if the caller has specified an invalid thread: */
	if (pthread == NULL || pthread->magic != THR_MAGIC) {
		/* Invalid thread: */
		_thr_cancel_leave(curthread, 1);
		return (EINVAL);
	}

	/* Check if the caller has specified itself: */
	if (pthread == curthread) {
		/* Avoid a deadlock condition: */
		_thr_cancel_leave(curthread, 1);
		return (EDEADLK);
	}

	/*
	 * Find the thread in the list of active threads or in the
	 * list of dead threads:
	 */
	if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) {
		/* Return an error: */
		_thr_cancel_leave(curthread, 1);
		return (ESRCH);
	}

	THR_SCHED_LOCK(curthread, pthread);
	/* Check if this thread has been detached: */
	if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
		THR_SCHED_UNLOCK(curthread, pthread);
		/* Remove the reference and return an error: */
		_thr_ref_delete(curthread, pthread);
		ret = EINVAL;
	} else {
		/* Lock the target thread while checking its state. */
		if (pthread->state == PS_DEAD) {
			/* Return the thread's return value: */
			tmp = pthread->ret;

			/* Detach the thread. */
			pthread->attr.flags |= PTHREAD_DETACHED;

			/* Unlock the thread. */
			THR_SCHED_UNLOCK(curthread, pthread);

			/*
			 * Remove the thread from the list of active
			 * threads and add it to the GC list.
			 */
			crit = _kse_critical_enter();
			KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
			THR_LIST_REMOVE(pthread);
			THR_GCLIST_ADD(pthread);
			KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
			_kse_critical_leave(crit);

			/* Remove the reference. */
			_thr_ref_delete(curthread, pthread);
			if (thread_return != NULL)
				*thread_return = tmp;
		}
		else if (pthread->joiner != NULL) {
			/* Unlock the thread and remove the reference. */
			THR_SCHED_UNLOCK(curthread, pthread);
			_thr_ref_delete(curthread, pthread);

			/* Multiple joiners are not supported. */
			ret = ENOTSUP;
		}
		else {
			/* Set the running thread to be the joiner: */
			pthread->joiner = curthread;

			/* Keep track of which thread we're joining to: */
			curthread->join_status.thread = pthread;

			/* Unlock the thread and remove the reference. */
			THR_SCHED_UNLOCK(curthread, pthread);
			_thr_ref_delete(curthread, pthread);

			THR_SCHED_LOCK(curthread, curthread);
			while (curthread->join_status.thread == pthread) {
				THR_SET_STATE(curthread, PS_JOIN);
				THR_SCHED_UNLOCK(curthread, curthread);
				/* Schedule the next thread: */
				_thr_sched_switch(curthread);
				THR_SCHED_LOCK(curthread, curthread);
			}
			THR_SCHED_UNLOCK(curthread, curthread);

			if ((curthread->cancelflags & THR_CANCELLING) &&
			   !(curthread->cancelflags & PTHREAD_CANCEL_DISABLE)) {
				if (_thr_ref_add(curthread, pthread, 1) == 0) {
					THR_SCHED_LOCK(curthread, pthread);
					pthread->joiner = NULL;
					THR_SCHED_UNLOCK(curthread, pthread);
					_thr_ref_delete(curthread, pthread);
				}
				_pthread_exit(PTHREAD_CANCELED);
			}

			/*
			 * The thread return value and error are set by the
			 * thread we're joining to when it exits or detaches:
			 */
			ret = curthread->join_status.error;
			if ((ret == 0) && (thread_return != NULL))
				*thread_return = curthread->join_status.ret;
		}
	}
	_thr_cancel_leave(curthread, 1);

	/* Return the completion status: */
	return (ret);
}
示例#5
0
int
_pthread_setschedparam(pthread_t pthread, int policy, 
	const struct sched_param *param)
{
	struct pthread *curthread = _get_curthread();
	int	in_syncq;
	int	in_readyq = 0;
	int	old_prio;
	int	ret = 0;

	if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) {
		/* Return an invalid argument error: */
		ret = EINVAL;
	} else if ((param->sched_priority < THR_MIN_PRIORITY) ||
	    (param->sched_priority > THR_MAX_PRIORITY)) {
		/* Return an unsupported value error. */
		ret = ENOTSUP;

	/* Find the thread in the list of active threads: */
	} else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
	    == 0) {
		/*
		 * Lock the threads scheduling queue while we change
		 * its priority:
		 */
		THR_SCHED_LOCK(curthread, pthread);
		if ((pthread->state == PS_DEAD) ||
		    (pthread->state == PS_DEADLOCK) ||
		    ((pthread->flags & THR_FLAGS_EXITING) != 0)) {
			THR_SCHED_UNLOCK(curthread, pthread);
			_thr_ref_delete(curthread, pthread);
			return (ESRCH);
		}
		in_syncq = pthread->sflags & THR_FLAGS_IN_SYNCQ;

		/* Set the scheduling policy: */
		pthread->attr.sched_policy = policy;

		if (param->sched_priority ==
		    THR_BASE_PRIORITY(pthread->base_priority))
			/*
			 * There is nothing to do; unlock the threads
			 * scheduling queue.
			 */
			THR_SCHED_UNLOCK(curthread, pthread);
		else {
			/*
			 * Remove the thread from its current priority
			 * queue before any adjustments are made to its
			 * active priority:
			 */
			old_prio = pthread->active_priority;
			if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) {
				in_readyq = 1;
				THR_RUNQ_REMOVE(pthread);
			}

			/* Set the thread base priority: */
			pthread->base_priority &=
			    (THR_SIGNAL_PRIORITY | THR_RT_PRIORITY);
			pthread->base_priority = param->sched_priority;

			/* Recalculate the active priority: */
			pthread->active_priority = MAX(pthread->base_priority,
			    pthread->inherited_priority);

			if (in_readyq) {
				if ((pthread->priority_mutex_count > 0) &&
				    (old_prio > pthread->active_priority)) {
					/*
					 * POSIX states that if the priority is
					 * being lowered, the thread must be
					 * inserted at the head of the queue for
					 * its priority if it owns any priority
					 * protection or inheritence mutexes.
					 */
					THR_RUNQ_INSERT_HEAD(pthread);
				}
				else
					THR_RUNQ_INSERT_TAIL(pthread);
			}

			/* Unlock the threads scheduling queue: */
			THR_SCHED_UNLOCK(curthread, pthread);

			/*
			 * Check for any mutex priority adjustments.  This
			 * includes checking for a priority mutex on which
			 * this thread is waiting.
			 */
			_mutex_notify_priochange(curthread, pthread, in_syncq);
		}
		_thr_ref_delete(curthread, pthread);
	}
	return (ret);
}
示例#6
0
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
	struct pthread	*curthread = _get_curthread();
	struct pthread	*pthread;
	struct kse_mailbox *kmbx;
	int		rval = 0;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");
	if (cond == NULL)
		rval = EINVAL;
       /*
        * If the condition variable is statically initialized, perform dynamic
        * initialization.
        */
	else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
		/* Lock the condition variable structure: */
		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			/* Increment the sequence number: */
			(*cond)->c_seqno++;

			/*
			 * Enter a loop to bring all threads off the
			 * condition queue:
			 */
			while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
			    != NULL) {
				THR_SCHED_LOCK(curthread, pthread);
				cond_queue_remove(*cond, pthread);
				pthread->sigbackout = NULL;
				if ((pthread->kseg == curthread->kseg) &&
				    (pthread->active_priority >
				    curthread->active_priority))
					curthread->critical_yield = 1;
				kmbx = _thr_setrunnable_unlocked(pthread);
				THR_SCHED_UNLOCK(curthread, pthread);
				if (kmbx != NULL)
					kse_wakeup(kmbx);
			}

			/* There are no more waiting threads: */
			(*cond)->c_mutex = NULL;
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		/* Unlock the condition variable structure: */
		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
	}

	/* Return the completion status: */
	return (rval);
}
示例#7
0
int
_pthread_cond_signal(pthread_cond_t * cond)
{
	struct pthread	*curthread = _get_curthread();
	struct pthread	*pthread;
	struct kse_mailbox *kmbx;
	int		rval = 0;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");
	if (cond == NULL)
		rval = EINVAL;
       /*
        * If the condition variable is statically initialized, perform dynamic
        * initialization.
        */
	else if (*cond != NULL || (rval = _pthread_cond_init(cond, NULL)) == 0) {
		/* Lock the condition variable structure: */
		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			/* Increment the sequence number: */
			(*cond)->c_seqno++;

			/*
			 * Wakeups have to be done with the CV lock held;
			 * otherwise there is a race condition where the
			 * thread can timeout, run on another KSE, and enter
			 * another blocking state (including blocking on a CV).
			 */
			if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
			    != NULL) {
				THR_SCHED_LOCK(curthread, pthread);
				cond_queue_remove(*cond, pthread);
				pthread->sigbackout = NULL;
				if ((pthread->kseg == curthread->kseg) &&
				    (pthread->active_priority >
				    curthread->active_priority))
					curthread->critical_yield = 1;
				kmbx = _thr_setrunnable_unlocked(pthread);
				THR_SCHED_UNLOCK(curthread, pthread);
				if (kmbx != NULL)
					kse_wakeup(kmbx);
			}
			/* Check for no more waiters: */
			if (TAILQ_EMPTY(&(*cond)->c_queue))
				(*cond)->c_mutex = NULL;
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		/* Unlock the condition variable structure: */
		THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
	}

	/* Return the completion status: */
	return (rval);
}
示例#8
0
int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
		       const struct timespec * abstime)
{
	struct pthread	*curthread = _get_curthread();
	int	rval = 0;
	int	done = 0;
	int	mutex_locked = 1;
	int	seqno;

	THR_ASSERT(curthread->locklevel == 0,
	    "cv_timedwait: locklevel is not zero!");

	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
	    abstime->tv_nsec >= 1000000000)
		return (EINVAL);
	/*
	 * If the condition variable is statically initialized, perform dynamic
	 * initialization.
	 */
	if (*cond == NULL && (rval = _pthread_cond_init(cond, NULL)) != 0)
		return (rval);

	if (!_kse_isthreaded())
		_kse_setthreaded(1);

	/*
	 * Enter a loop waiting for a condition signal or broadcast
	 * to wake up this thread.  A loop is needed in case the waiting
	 * thread is interrupted by a signal to execute a signal handler.
	 * It is not (currently) possible to remain in the waiting queue
	 * while running a handler.  Instead, the thread is interrupted
	 * and backed out of the waiting queue prior to executing the
	 * signal handler.
	 */

	/* Lock the condition variable structure: */
	THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
	seqno = (*cond)->c_seqno;
	do {
		/*
		 * If the condvar was statically allocated, properly
		 * initialize the tail queue.
		 */
		if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
			TAILQ_INIT(&(*cond)->c_queue);
			(*cond)->c_flags |= COND_FLAGS_INITED;
		}

		/* Process according to condition variable type: */
		switch ((*cond)->c_type) {
		/* Fast condition variable: */
		case COND_TYPE_FAST:
			if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
			    ((*cond)->c_mutex != *mutex))) {
				/* Return invalid argument error: */
				rval = EINVAL;
			} else {
				/* Reset the timeout and interrupted flags: */
				curthread->timeout = 0;
				curthread->interrupted = 0;

				/*
				 * Queue the running thread for the condition
				 * variable:
				 */
				cond_queue_enq(*cond, curthread);

				/* Unlock the mutex: */
				if (mutex_locked &&
				   ((rval = _mutex_cv_unlock(mutex)) != 0)) {
					/*
					 * Cannot unlock the mutex; remove the
					 * running thread from the condition
					 * variable queue: 
					 */
					cond_queue_remove(*cond, curthread);
				} else {
					/* Remember the mutex: */
					(*cond)->c_mutex = *mutex;

					/*
					 * Don't unlock the mutex the next
					 * time through the loop (if the
					 * thread has to be requeued after
					 * handling a signal).
					 */
					mutex_locked = 0;

					/*
					 * This thread is active and is in a
					 * critical region (holding the cv
					 * lock); we should be able to safely
					 * set the state.
					 */
					THR_SCHED_LOCK(curthread, curthread);
					/* Set the wakeup time: */
					curthread->wakeup_time.tv_sec =
					    abstime->tv_sec;
					curthread->wakeup_time.tv_nsec =
					    abstime->tv_nsec;
					THR_SET_STATE(curthread, PS_COND_WAIT);

					/* Remember the CV: */
					curthread->data.cond = *cond;
					curthread->sigbackout = cond_wait_backout;
					THR_SCHED_UNLOCK(curthread, curthread);

					/* Unlock the CV structure: */
					THR_LOCK_RELEASE(curthread,
					    &(*cond)->c_lock);

					/* Schedule the next thread: */
					_thr_sched_switch(curthread);

					/*
					 * XXX - This really isn't a good check
					 * since there can be more than one
					 * thread waiting on the CV.  Signals
					 * sent to threads waiting on mutexes
					 * or CVs should really be deferred
					 * until the threads are no longer
					 * waiting, but POSIX says that signals
					 * should be sent "as soon as possible".
					 */
					done = (seqno != (*cond)->c_seqno);
					if (done && !THR_IN_CONDQ(curthread)) {
						/*
						 * The thread is dequeued, so
						 * it is safe to clear these.
						 */
						curthread->data.cond = NULL;
						curthread->sigbackout = NULL;
						check_continuation(curthread,
						    NULL, mutex);
						return (_mutex_cv_lock(mutex));
					}

					/* Relock the CV structure: */
					THR_LOCK_ACQUIRE(curthread,
					    &(*cond)->c_lock);

					/*
					 * Clear these after taking the lock to
					 * prevent a race condition where a
					 * signal can arrive before dequeueing
					 * the thread.
					 */
					curthread->data.cond = NULL;
					curthread->sigbackout = NULL;

					done = (seqno != (*cond)->c_seqno);

					if (THR_IN_CONDQ(curthread)) {
						cond_queue_remove(*cond,
						    curthread);

						/* Check for no more waiters: */
						if (TAILQ_EMPTY(&(*cond)->c_queue))
							(*cond)->c_mutex = NULL;
					}

					if (curthread->timeout != 0) {
						/* The wait timedout. */
						rval = ETIMEDOUT;
					}
				}
			}
			break;

		/* Trap invalid condition variable types: */
		default:
			/* Return an invalid argument error: */
			rval = EINVAL;
			break;
		}

		check_continuation(curthread, *cond,
		    mutex_locked ? NULL : mutex);
	} while ((done == 0) && (rval == 0));

	/* Unlock the condition variable structure: */
	THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);

	if (mutex_locked == 0)
		_mutex_cv_lock(mutex);

	/* Return the completion status: */
	return (rval);
}
示例#9
0
/*
 * Some notes on new thread creation and first time initializion
 * to enable multi-threading.
 *
 * There are basically two things that need to be done.
 *
 *   1) The internal library variables must be initialized.
 *   2) Upcalls need to be enabled to allow multiple threads
 *      to be run.
 *
 * The first may be done as a result of other pthread functions
 * being called.  When _thr_initial is null, _libpthread_init is
 * called to initialize the internal variables; this also creates
 * or sets the initial thread.  It'd be nice to automatically
 * have _libpthread_init called on program execution so we don't
 * have to have checks throughout the library.
 *
 * The second part is only triggered by the creation of the first
 * thread (other than the initial/main thread).  If the thread
 * being created is a scope system thread, then a new KSE/KSEG
 * pair needs to be allocated.  Also, if upcalls haven't been
 * enabled on the initial thread's KSE, they must be now that
 * there is more than one thread; this could be delayed until
 * the initial KSEG has more than one thread.
 */
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct kse *kse = NULL;
	struct kse_group *kseg = NULL;
	kse_critical_t crit;
	int ret = 0;

	if (_thr_initial == NULL)
		_libpthread_init(NULL);

	/*
	 * Turn on threaded mode, if failed, it is unnecessary to
	 * do further work.
	 */
	if (_kse_isthreaded() == 0 && _kse_setthreaded(1)) {
		return (EAGAIN);
	}
	curthread = _get_curthread();

	/*
	 * Allocate memory for the thread structure.
	 * Some functions use malloc, so don't put it
	 * in a critical region.
	 */
	if ((new_thread = _thr_alloc(curthread)) == NULL) {
		/* Insufficient memory to create a thread: */
		ret = EAGAIN;
	} else {
		/* Check if default thread attributes are required: */
		if (attr == NULL || *attr == NULL)
			/* Use the default thread attributes: */
			new_thread->attr = _pthread_attr_default;
		else {
			new_thread->attr = *(*attr);
			if ((*attr)->sched_inherit == PTHREAD_INHERIT_SCHED) {
				/* inherit scheduling contention scop */
				if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
					new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
				else
					new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
				/*
				 * scheduling policy and scheduling parameters will be
				 * inherited in following code.
				 */
			}
		}
		if (_thread_scope_system > 0)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else if ((_thread_scope_system < 0)
		    && (thread != &_thr_sig_daemon))
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
		if (create_stack(&new_thread->attr) != 0) {
			/* Insufficient memory to create a stack: */
			ret = EAGAIN;
			_thr_free(curthread, new_thread);
		}
		else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
		    (((kse = _kse_alloc(curthread, 1)) == NULL)
		    || ((kseg = _kseg_alloc(curthread)) == NULL))) {
			/* Insufficient memory to create a new KSE/KSEG: */
			ret = EAGAIN;
			if (kse != NULL) {
				kse->k_kcb->kcb_kmbx.km_flags |= KMF_DONE;
				_kse_free(curthread, kse);
			}
			free_stack(&new_thread->attr);
			_thr_free(curthread, new_thread);
		}
		else {
			if (kseg != NULL) {
				/* Add the KSE to the KSEG's list of KSEs. */
				TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_kgqe);
				kseg->kg_ksecount = 1;
				kse->k_kseg = kseg;
				kse->k_schedq = &kseg->kg_schedq;
			}
			/*
			 * Write a magic value to the thread structure
			 * to help identify valid ones:
			 */
			new_thread->magic = THR_MAGIC;

			new_thread->slice_usec = -1;
			new_thread->start_routine = start_routine;
			new_thread->arg = arg;
			new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
			    PTHREAD_CANCEL_DEFERRED;

			/* No thread is wanting to join to this one: */
			new_thread->joiner = NULL;

			/*
			 * Initialize the machine context.
			 * Enter a critical region to get consistent context.
			 */
			crit = _kse_critical_enter();
			THR_GETCONTEXT(&new_thread->tcb->tcb_tmbx.tm_context);
			/* Initialize the thread for signals: */
			new_thread->sigmask = curthread->sigmask;
			_kse_critical_leave(crit);

			new_thread->tcb->tcb_tmbx.tm_udata = new_thread;
			new_thread->tcb->tcb_tmbx.tm_context.uc_sigmask =
			    new_thread->sigmask;
			new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size =
			    new_thread->attr.stacksize_attr;
			new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp =
			    new_thread->attr.stackaddr_attr;
			makecontext(&new_thread->tcb->tcb_tmbx.tm_context,
			    (void (*)(void))thread_start, 3, new_thread,
			    start_routine, arg);
			/*
			 * Check if this thread is to inherit the scheduling
			 * attributes from its parent:
			 */
			if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
				/*
				 * Copy the scheduling attributes.
				 * Lock the scheduling lock to get consistent
				 * scheduling parameters.
				 */
				THR_SCHED_LOCK(curthread, curthread);
				new_thread->base_priority =
				    curthread->base_priority &
				    ~THR_SIGNAL_PRIORITY;
				new_thread->attr.prio =
				    curthread->base_priority &
				    ~THR_SIGNAL_PRIORITY;
				new_thread->attr.sched_policy =
				    curthread->attr.sched_policy;
				THR_SCHED_UNLOCK(curthread, curthread);
			} else {
				/*
				 * Use just the thread priority, leaving the
				 * other scheduling attributes as their
				 * default values:
				 */
				new_thread->base_priority =
				    new_thread->attr.prio;
			}
			new_thread->active_priority = new_thread->base_priority;
			new_thread->inherited_priority = 0;

			/* Initialize the mutex queue: */
			TAILQ_INIT(&new_thread->mutexq);

			/* Initialise hooks in the thread structure: */
			new_thread->specific = NULL;
			new_thread->specific_data_count = 0;
			new_thread->cleanup = NULL;
			new_thread->flags = 0;
			new_thread->tlflags = 0;
			new_thread->sigbackout = NULL;
			new_thread->continuation = NULL;
			new_thread->wakeup_time.tv_sec = -1;
			new_thread->lock_switch = 0;
			sigemptyset(&new_thread->sigpend);
			new_thread->check_pending = 0;
			new_thread->locklevel = 0;
			new_thread->rdlock_count = 0;
			new_thread->sigstk.ss_sp = 0;
			new_thread->sigstk.ss_size = 0;
			new_thread->sigstk.ss_flags = SS_DISABLE;
			new_thread->oldsigmask = NULL;

			if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
				new_thread->state = PS_SUSPENDED;
				new_thread->flags = THR_FLAGS_SUSPENDED;
			}
			else
				new_thread->state = PS_RUNNING;

			/*
			 * System scope threads have their own kse and
			 * kseg.  Process scope threads are all hung
			 * off the main process kseg.
			 */
			if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
				new_thread->kseg = _kse_initial->k_kseg;
				new_thread->kse = _kse_initial;
			}
			else {
				kse->k_curthread = NULL;
				kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
				new_thread->kse = kse;
				new_thread->kseg = kse->k_kseg;
				kse->k_kcb->kcb_kmbx.km_udata = kse;
				kse->k_kcb->kcb_kmbx.km_curthread = NULL;
			}

			/*
			 * Schedule the new thread starting a new KSEG/KSE
			 * pair if necessary.
			 */
			ret = _thr_schedule_add(curthread, new_thread);
			if (ret != 0)
				free_thread(curthread, new_thread);
			else {
				/* Return a pointer to the thread structure: */
				(*thread) = new_thread;
			}
		}
	}

	/* Return the status: */
	return (ret);
}