int _sigsuspend(const sigset_t *set) { struct pthread *curthread = _get_curthread(); sigset_t oldmask, newmask, tempset; int ret = -1; if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) return (__sys_sigsuspend(set)); /* Check if a new signal set was provided by the caller: */ if (set != NULL) { newmask = *set; SIG_CANTMASK(newmask); THR_LOCK_SWITCH(curthread); /* Save current sigmask: */ oldmask = curthread->sigmask; curthread->oldsigmask = &oldmask; /* Change the caller's mask: */ curthread->sigmask = newmask; tempset = curthread->sigpend; SIGSETNAND(tempset, newmask); if (SIGISEMPTY(tempset)) { THR_SET_STATE(curthread, PS_SIGSUSPEND); /* Wait for a signal: */ _thr_sched_switch_unlocked(curthread); } else { curthread->check_pending = 1; THR_UNLOCK_SWITCH(curthread); /* check pending signal I can handle: */ _thr_sig_check_pending(curthread); } if ((curthread->cancelflags & THR_CANCELLING) != 0) curthread->oldsigmask = NULL; else { THR_ASSERT(curthread->oldsigmask == NULL, "oldsigmask is not cleared"); } /* Always return an interrupted error: */ errno = EINTR; } else { /* Return an invalid argument error: */ errno = EINVAL; } /* Return the completion status: */ return (ret); }
void _pthread_exit(void *status) { struct pthread *curthread = _get_curthread(); kse_critical_t crit; struct kse *curkse; /* Check if this thread is already in the process of exiting: */ if ((curthread->flags & THR_FLAGS_EXITING) != 0) { char msg[128]; snprintf(msg, sizeof(msg), "Thread %p has called " "pthread_exit() from a destructor. POSIX 1003.1 " "1996 s16.2.5.2 does not allow this!", curthread); PANIC(msg); } /* * Flag this thread as exiting. Threads should now be prevented * from joining to this thread. */ THR_SCHED_LOCK(curthread, curthread); curthread->flags |= THR_FLAGS_EXITING; THR_SCHED_UNLOCK(curthread, curthread); /* * To avoid signal-lost problem, if signals had already been * delivered to us, handle it. we have already set EXITING flag * so no new signals should be delivered to us. * XXX this is not enough if signal was delivered just before * thread called sigprocmask and masked it! in this case, we * might have to re-post the signal by kill() if the signal * is targeting process (not for a specified thread). * Kernel has same signal-lost problem, a signal may be delivered * to a thread which is on the way to call sigprocmask or thr_exit()! */ if (curthread->check_pending) _thr_sig_check_pending(curthread); /* Save the return value: */ curthread->ret = status; while (curthread->cleanup != NULL) { _pthread_cleanup_pop(1); } if (curthread->attr.cleanup_attr != NULL) { curthread->attr.cleanup_attr(curthread->attr.arg_attr); } /* Check if there is thread specific data: */ if (curthread->specific != NULL) { /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } if (!_kse_isthreaded()) exit(0); crit = _kse_critical_enter(); curkse = _get_curkse(); KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); /* Use thread_list_lock */ _thread_active_threads--; if ((_thread_scope_system <= 0 && _thread_active_threads == 1) || (_thread_scope_system > 0 && _thread_active_threads == 0)) { KSE_LOCK_RELEASE(curkse, &_thread_list_lock); _kse_critical_leave(crit); exit(0); /* Never reach! */ } KSE_LOCK_RELEASE(curkse, &_thread_list_lock); /* This thread will never be re-scheduled. */ KSE_LOCK(curkse); THR_SET_STATE(curthread, PS_DEAD); _thr_sched_switch_unlocked(curthread); /* Never reach! */ /* This point should not be reached. */ PANIC("Dead thread has resumed"); }
static int lib_sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout) { struct pthread *curthread = _get_curthread(); int ret = 0; int i; struct sigwait_data waitdata; sigset_t waitset; kse_critical_t crit; siginfo_t siginfo; if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { if (info == NULL) info = &siginfo; return (__sys_sigtimedwait(set, info, timeout)); } /* * Initialize the set of signals that will be waited on: */ waitset = *set; /* These signals can't be waited on. */ SIGDELSET(waitset, SIGKILL); SIGDELSET(waitset, SIGSTOP); /* * POSIX says that the _application_ must explicitly install * a dummy handler for signals that are SIG_IGN in order * to sigwait on them. Note that SIG_IGN signals are left in * the mask because a subsequent sigaction could enable an * ignored signal. */ crit = _kse_critical_enter(); KSE_SCHED_LOCK(curthread->kse, curthread->kseg); for (i = 1; i <= _SIG_MAXSIG; ++i) { if (SIGISMEMBER(waitset, i) && SIGISMEMBER(curthread->sigpend, i)) { SIGDELSET(curthread->sigpend, i); siginfo = curthread->siginfo[i - 1]; KSE_SCHED_UNLOCK(curthread->kse, curthread->kseg); _kse_critical_leave(crit); ret = i; goto OUT; } } curthread->timeout = 0; curthread->interrupted = 0; _thr_set_timeout(timeout); /* Wait for a signal: */ siginfo.si_signo = 0; waitdata.waitset = &waitset; waitdata.siginfo = &siginfo; curthread->data.sigwait = &waitdata; THR_SET_STATE(curthread, PS_SIGWAIT); _thr_sched_switch_unlocked(curthread); /* * Return the signal number to the caller: */ if (siginfo.si_signo > 0) { ret = siginfo.si_signo; } else { if (curthread->interrupted) errno = EINTR; else if (curthread->timeout) errno = EAGAIN; ret = -1; } curthread->timeout = 0; curthread->interrupted = 0; /* * Probably unnecessary, but since it's in a union struct * we don't know how it could be used in the future. */ curthread->data.sigwait = NULL; OUT: if (ret > 0 && info != NULL) *info = siginfo; return (ret); }
int _pthread_join(pthread_t pthread, void **thread_return) { struct pthread *curthread = _get_curthread(); void *tmp; kse_critical_t crit; int ret = 0; _thr_cancel_enter(curthread); /* Check if the caller has specified an invalid thread: */ if (pthread == NULL || pthread->magic != THR_MAGIC) { /* Invalid thread: */ _thr_cancel_leave(curthread, 1); return (EINVAL); } /* Check if the caller has specified itself: */ if (pthread == curthread) { /* Avoid a deadlock condition: */ _thr_cancel_leave(curthread, 1); return (EDEADLK); } /* * Find the thread in the list of active threads or in the * list of dead threads: */ if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) { /* Return an error: */ _thr_cancel_leave(curthread, 1); return (ESRCH); } THR_SCHED_LOCK(curthread, pthread); /* Check if this thread has been detached: */ if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) { THR_SCHED_UNLOCK(curthread, pthread); /* Remove the reference and return an error: */ _thr_ref_delete(curthread, pthread); ret = EINVAL; } else { /* Lock the target thread while checking its state. */ if (pthread->state == PS_DEAD) { /* Return the thread's return value: */ tmp = pthread->ret; /* Detach the thread. */ pthread->attr.flags |= PTHREAD_DETACHED; /* Unlock the thread. */ THR_SCHED_UNLOCK(curthread, pthread); /* * Remove the thread from the list of active * threads and add it to the GC list. */ crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); THR_LIST_REMOVE(pthread); THR_GCLIST_ADD(pthread); KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); _kse_critical_leave(crit); /* Remove the reference. */ _thr_ref_delete(curthread, pthread); if (thread_return != NULL) *thread_return = tmp; } else if (pthread->joiner != NULL) { /* Unlock the thread and remove the reference. */ THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); /* Multiple joiners are not supported. */ ret = ENOTSUP; } else { /* Set the running thread to be the joiner: */ pthread->joiner = curthread; /* Keep track of which thread we're joining to: */ curthread->join_status.thread = pthread; /* Unlock the thread and remove the reference. */ THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); THR_SCHED_LOCK(curthread, curthread); while (curthread->join_status.thread == pthread) { THR_SET_STATE(curthread, PS_JOIN); THR_SCHED_UNLOCK(curthread, curthread); /* Schedule the next thread: */ _thr_sched_switch(curthread); THR_SCHED_LOCK(curthread, curthread); } THR_SCHED_UNLOCK(curthread, curthread); if ((curthread->cancelflags & THR_CANCELLING) && !(curthread->cancelflags & PTHREAD_CANCEL_DISABLE)) { if (_thr_ref_add(curthread, pthread, 1) == 0) { THR_SCHED_LOCK(curthread, pthread); pthread->joiner = NULL; THR_SCHED_UNLOCK(curthread, pthread); _thr_ref_delete(curthread, pthread); } _pthread_exit(PTHREAD_CANCELED); } /* * The thread return value and error are set by the * thread we're joining to when it exits or detaches: */ ret = curthread->join_status.error; if ((ret == 0) && (thread_return != NULL)) *thread_return = curthread->join_status.ret; } } _thr_cancel_leave(curthread, 1); /* Return the completion status: */ return (ret); }
int _nanosleep(const struct timespec *time_to_sleep, struct timespec *time_remaining) { struct pthread *curthread = _get_curthread(); int ret = 0; struct timespec ts, ts1; struct timespec remaining_time; struct timespec wakeup_time; /* Check if the time to sleep is legal: */ if ((time_to_sleep == NULL) || (time_to_sleep->tv_sec < 0) || (time_to_sleep->tv_nsec < 0) || (time_to_sleep->tv_nsec >= 1000000000)) { /* Return an EINVAL error : */ errno = EINVAL; ret = -1; } else { if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) return (__sys_nanosleep(time_to_sleep, time_remaining)); KSE_GET_TOD(curthread->kse, &ts); /* Calculate the time for the current thread to wake up: */ TIMESPEC_ADD(&wakeup_time, &ts, time_to_sleep); THR_LOCK_SWITCH(curthread); curthread->interrupted = 0; curthread->wakeup_time = wakeup_time; THR_SET_STATE(curthread, PS_SLEEP_WAIT); /* Reschedule the current thread to sleep: */ _thr_sched_switch_unlocked(curthread); /* Calculate the remaining time to sleep: */ KSE_GET_TOD(curthread->kse, &ts1); remaining_time.tv_sec = time_to_sleep->tv_sec + ts.tv_sec - ts1.tv_sec; remaining_time.tv_nsec = time_to_sleep->tv_nsec + ts.tv_nsec - ts1.tv_nsec; /* Check if the nanosecond field has underflowed: */ if (remaining_time.tv_nsec < 0) { /* Handle the underflow: */ remaining_time.tv_sec -= 1; remaining_time.tv_nsec += 1000000000; } /* Check if the nanosecond field has overflowed: */ else if (remaining_time.tv_nsec >= 1000000000) { /* Handle the overflow: */ remaining_time.tv_sec += 1; remaining_time.tv_nsec -= 1000000000; } /* Check if the sleep was longer than the required time: */ if (remaining_time.tv_sec < 0) { /* Reset the time left: */ remaining_time.tv_sec = 0; remaining_time.tv_nsec = 0; } /* Check if the time remaining is to be returned: */ if (time_remaining != NULL) { /* Return the actual time slept: */ time_remaining->tv_sec = remaining_time.tv_sec; time_remaining->tv_nsec = remaining_time.tv_nsec; } /* Check if the sleep was interrupted: */ if (curthread->interrupted) { /* Return an EINTR error : */ errno = EINTR; ret = -1; } } return (ret); }
int _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { struct pthread *curthread = _get_curthread(); int rval = 0; int done = 0; int mutex_locked = 1; int seqno; THR_ASSERT(curthread->locklevel == 0, "cv_timedwait: locklevel is not zero!"); if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) return (EINVAL); /* * If the condition variable is statically initialized, perform dynamic * initialization. */ if (*cond == NULL && (rval = _pthread_cond_init(cond, NULL)) != 0) return (rval); if (!_kse_isthreaded()) _kse_setthreaded(1); /* * Enter a loop waiting for a condition signal or broadcast * to wake up this thread. A loop is needed in case the waiting * thread is interrupted by a signal to execute a signal handler. * It is not (currently) possible to remain in the waiting queue * while running a handler. Instead, the thread is interrupted * and backed out of the waiting queue prior to executing the * signal handler. */ /* Lock the condition variable structure: */ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock); seqno = (*cond)->c_seqno; do { /* * If the condvar was statically allocated, properly * initialize the tail queue. */ if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) { TAILQ_INIT(&(*cond)->c_queue); (*cond)->c_flags |= COND_FLAGS_INITED; } /* Process according to condition variable type: */ switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && ((*cond)->c_mutex != *mutex))) { /* Return invalid argument error: */ rval = EINVAL; } else { /* Reset the timeout and interrupted flags: */ curthread->timeout = 0; curthread->interrupted = 0; /* * Queue the running thread for the condition * variable: */ cond_queue_enq(*cond, curthread); /* Unlock the mutex: */ if (mutex_locked && ((rval = _mutex_cv_unlock(mutex)) != 0)) { /* * Cannot unlock the mutex; remove the * running thread from the condition * variable queue: */ cond_queue_remove(*cond, curthread); } else { /* Remember the mutex: */ (*cond)->c_mutex = *mutex; /* * Don't unlock the mutex the next * time through the loop (if the * thread has to be requeued after * handling a signal). */ mutex_locked = 0; /* * This thread is active and is in a * critical region (holding the cv * lock); we should be able to safely * set the state. */ THR_SCHED_LOCK(curthread, curthread); /* Set the wakeup time: */ curthread->wakeup_time.tv_sec = abstime->tv_sec; curthread->wakeup_time.tv_nsec = abstime->tv_nsec; THR_SET_STATE(curthread, PS_COND_WAIT); /* Remember the CV: */ curthread->data.cond = *cond; curthread->sigbackout = cond_wait_backout; THR_SCHED_UNLOCK(curthread, curthread); /* Unlock the CV structure: */ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); /* Schedule the next thread: */ _thr_sched_switch(curthread); /* * XXX - This really isn't a good check * since there can be more than one * thread waiting on the CV. Signals * sent to threads waiting on mutexes * or CVs should really be deferred * until the threads are no longer * waiting, but POSIX says that signals * should be sent "as soon as possible". */ done = (seqno != (*cond)->c_seqno); if (done && !THR_IN_CONDQ(curthread)) { /* * The thread is dequeued, so * it is safe to clear these. */ curthread->data.cond = NULL; curthread->sigbackout = NULL; check_continuation(curthread, NULL, mutex); return (_mutex_cv_lock(mutex)); } /* Relock the CV structure: */ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock); /* * Clear these after taking the lock to * prevent a race condition where a * signal can arrive before dequeueing * the thread. */ curthread->data.cond = NULL; curthread->sigbackout = NULL; done = (seqno != (*cond)->c_seqno); if (THR_IN_CONDQ(curthread)) { cond_queue_remove(*cond, curthread); /* Check for no more waiters: */ if (TAILQ_EMPTY(&(*cond)->c_queue)) (*cond)->c_mutex = NULL; } if (curthread->timeout != 0) { /* The wait timedout. */ rval = ETIMEDOUT; } } } break; /* Trap invalid condition variable types: */ default: /* Return an invalid argument error: */ rval = EINVAL; break; } check_continuation(curthread, *cond, mutex_locked ? NULL : mutex); } while ((done == 0) && (rval == 0)); /* Unlock the condition variable structure: */ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock); if (mutex_locked == 0) _mutex_cv_lock(mutex); /* Return the completion status: */ return (rval); }