int _sem_wait(sem_t *sem) { struct pthread *curthread; int retval; if (sem_check_validity(sem) != 0) return (-1); curthread = _get_curthread(); if ((*sem)->syssem != 0) { _thr_cancel_enter(curthread); retval = ksem_wait((*sem)->semid); _thr_cancel_leave(curthread, retval != 0); } else { _pthread_testcancel(); _pthread_mutex_lock(&(*sem)->lock); while ((*sem)->count <= 0) { (*sem)->nwaiters++; THR_CLEANUP_PUSH(curthread, decrease_nwaiters, sem); _pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock); THR_CLEANUP_POP(curthread, 0); (*sem)->nwaiters--; } (*sem)->count--; _pthread_mutex_unlock(&(*sem)->lock); retval = 0; } return (retval); }
int __pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = _pthread_cond_wait(cond, mutex); _thr_cancel_leave(curthread, 1); return (ret); }
int __sigwaitinfo(const sigset_t *set, siginfo_t *info) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = lib_sigtimedwait(set, info, NULL); _thr_cancel_leave(curthread, 1); return (ret); }
int __pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = _pthread_cond_timedwait(cond, mutex, abstime); _thr_cancel_leave(curthread, 1); return (ret); }
int __sigsuspend(const sigset_t * set) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = _sigsuspend(set); _thr_cancel_leave(curthread, 1); return (ret); }
ssize_t __read(int fd, void *buf, size_t nbytes) { struct pthread *curthread = _get_curthread(); ssize_t ret; _thr_cancel_enter(curthread); ret = __sys_read(fd, buf, nbytes); _thr_cancel_leave(curthread, 1); return ret; }
pid_t __wait4(pid_t pid, int *istat, int options, struct rusage *rusage) { struct pthread *curthread = _get_curthread(); pid_t ret; _thr_cancel_enter(curthread); ret = _wait4(pid, istat, options, rusage); _thr_cancel_leave(curthread, 1); return ret; }
int __close(int fd) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __sys_close(fd); _thr_cancel_leave(curthread, 1); return (ret); }
int _tcdrain(int fd) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __tcdrain(fd); _thr_cancel_leave(curthread, 1); return (ret); }
int _pause(void) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __pause(); _thr_cancel_leave(curthread, 1); return ret; }
ssize_t __readv(int fd, const struct iovec *iov, int iovcnt) { struct pthread *curthread = _get_curthread(); ssize_t ret; _thr_cancel_enter(curthread); ret = __sys_readv(fd, iov, iovcnt); _thr_cancel_leave(curthread, 1); return ret; }
pid_t _waitpid(pid_t wpid, int *status, int options) { struct pthread *curthread = _get_curthread(); pid_t ret; _thr_cancel_enter(curthread); ret = __waitpid(wpid, status, options); _thr_cancel_leave(curthread, 1); return (ret); }
int __sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * timeout) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = lib_sigtimedwait(set, info, timeout); _thr_cancel_leave(curthread, 1); return (ret); }
int __poll(struct pollfd *fds, unsigned int nfds, int timeout) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __sys_poll(fds, nfds, timeout); _thr_cancel_leave(curthread, 1); return ret; }
/* * Cancellation behavior: * If thread is canceled, file is not created. */ int ___creat(const char *path, mode_t mode) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __creat(path, mode); _thr_cancel_leave(curthread, ret == -1); return ret; }
/* * Cancellation behavior: * If the thread is canceled, connection is not made. */ int __connect(int fd, const struct sockaddr *name, socklen_t namelen) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __sys_connect(fd, name, namelen); _thr_cancel_leave(curthread, ret == -1); return (ret); }
pid_t _wait(int *istat) { struct pthread *curthread = _get_curthread(); pid_t ret; _thr_cancel_enter(curthread); ret = __wait(istat); _thr_cancel_leave(curthread, 1); return ret; }
int _usleep(useconds_t useconds) { struct pthread *curthread = _get_curthread(); unsigned int ret; _thr_cancel_enter(curthread); ret = __usleep(useconds); _thr_cancel_leave(curthread, 1); return (ret); }
int __aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct timespec *timeout) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __sys_aio_suspend(iocbs, niocb, timeout); _thr_cancel_leave(curthread, 1); return (ret); }
int __nanosleep(const struct timespec *time_to_sleep, struct timespec *time_remaining) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = _nanosleep(time_to_sleep, time_remaining); _thr_cancel_leave(curthread, 1); return (ret); }
int __accept(int s, struct sockaddr *addr, socklen_t *addrlen) { struct pthread *curthread; int ret; curthread = _get_curthread(); _thr_cancel_enter(curthread); ret = __sys_accept(s, addr, addrlen); _thr_cancel_leave(curthread, ret == -1); return (ret); }
int ___creat(const char *path, mode_t mode) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __creat(path, mode); /* * To avoid possible file handle leak, * only check cancellation point if it is failure */ _thr_cancel_leave(curthread, (ret == -1)); return ret; }
int __sigwait(const sigset_t *set, int *sig) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = lib_sigtimedwait(set, NULL, NULL); if (ret > 0) { *sig = ret; ret = 0; } else { ret = errno; } _thr_cancel_leave(curthread, 1); return (ret); }
/* * Cancellation behavior: * According to specification, only F_SETLKW is a cancellation point. * Thread is only canceled at start, or canceled if the system call * is failure, this means the function does not generate side effect * if it is canceled. */ int __fcntl(int fd, int cmd,...) { struct pthread *curthread = _get_curthread(); int ret; va_list ap; va_start(ap, cmd); if (cmd == F_OSETLKW || cmd == F_SETLKW) { _thr_cancel_enter(curthread); #ifdef SYSCALL_COMPAT ret = __fcntl_compat(fd, cmd, va_arg(ap, void *)); #else ret = __sys_fcntl(fd, cmd, va_arg(ap, void *)); #endif _thr_cancel_leave(curthread, ret == -1); } else {
int __select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { struct pthread *curthread = _get_curthread(); struct timespec ts; int ret; if (numfds == 0 && timeout != NULL) { TIMEVAL_TO_TIMESPEC(timeout, &ts); ret = _nanosleep(&ts, NULL); } else { _thr_cancel_enter(curthread); ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout); _thr_cancel_leave(curthread, 1); } return (ret); }
int __msync(void *addr, size_t len, int flags) { struct pthread *curthread = _get_curthread(); int ret; /* * XXX This is quite pointless unless we know how to get the * file descriptor associated with the memory, and lock it for * write. The only real use of this wrapper is to guarantee * a cancellation point, as per the standard. sigh. */ _thr_cancel_enter(curthread); ret = __sys_msync(addr, len, flags); _thr_cancel_leave(curthread, 1); return (ret); }
/* * Cancellation behaivor: * Thread may be canceled at start, if thread is canceled, it means it * did not get a wakeup from pthread_cond_signal(), otherwise, it is * not canceled. * Thread cancellation never cause wakeup from pthread_cond_signal() * to be lost. */ static int cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); int recurse; int error, error2 = 0; error = _mutex_cv_detach(mp, &recurse); if (error != 0) return (error); if (cancel) { _thr_cancel_enter2(curthread, 0); error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); _thr_cancel_leave(curthread, 0); } else { error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters, (struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME|CVWAIT_CLOCKID); } /* * Note that PP mutex and ROBUST mutex may return * interesting error codes. */ if (error == 0) { error2 = _mutex_cv_lock(mp, recurse); } else if (error == EINTR || error == ETIMEDOUT) { error2 = _mutex_cv_lock(mp, recurse); if (error2 == 0 && cancel) _thr_testcancel(curthread); if (error == EINTR) error = 0; } else { /* We know that it didn't unlock the mutex. */ error2 = _mutex_cv_attach(mp, recurse); if (error2 == 0 && cancel) _thr_testcancel(curthread); } return (error2 != 0 ? error2 : error); }
static int join_common(pthread_t pthread, void **thread_return, const struct timespec *abstime) { struct pthread *curthread = tls_get_curthread(); struct timespec ts, ts2, *tsp; void *tmp; long state; int oldcancel; int ret = 0; if (pthread == NULL) return (EINVAL); if (pthread == curthread) return (EDEADLK); THREAD_LIST_LOCK(curthread); if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0) { ret = ESRCH; } else if ((pthread->tlflags & TLFLAGS_DETACHED) != 0) { ret = ESRCH; } else if (pthread->joiner != NULL) { /* Multiple joiners are not supported. */ ret = ENOTSUP; } if (ret) { THREAD_LIST_UNLOCK(curthread); return (ret); } /* Set the running thread to be the joiner: */ pthread->joiner = curthread; THREAD_LIST_UNLOCK(curthread); THR_CLEANUP_PUSH(curthread, backout_join, pthread); oldcancel = _thr_cancel_enter(curthread); while ((state = pthread->state) != PS_DEAD) { if (abstime != NULL) { clock_gettime(CLOCK_REALTIME, &ts); TIMESPEC_SUB(&ts2, abstime, &ts); if (ts2.tv_sec < 0) { ret = ETIMEDOUT; break; } tsp = &ts2; } else tsp = NULL; ret = _thr_umtx_wait(&pthread->state, state, tsp, CLOCK_REALTIME); if (ret == ETIMEDOUT) break; } _thr_cancel_leave(curthread, oldcancel); THR_CLEANUP_POP(curthread, 0); if (ret == ETIMEDOUT) { THREAD_LIST_LOCK(curthread); pthread->joiner = NULL; THREAD_LIST_UNLOCK(curthread); } else { ret = 0; tmp = pthread->ret; THREAD_LIST_LOCK(curthread); pthread->tlflags |= TLFLAGS_DETACHED; pthread->joiner = NULL; THR_GCLIST_ADD(pthread); THREAD_LIST_UNLOCK(curthread); if (thread_return != NULL) *thread_return = tmp; } return (ret); }
void _pthread_cancel_leave(int maycancel) { _thr_cancel_leave(_get_curthread(), maycancel); }
static int cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp, const struct timespec *abstime, int cancel) { struct pthread *curthread = _get_curthread(); struct sleepqueue *sq; int recurse; int error; if (curthread->wchan != NULL) PANIC("thread was already on queue."); if (cancel) _thr_testcancel(curthread); _sleepq_lock(cvp); /* * set __has_user_waiters before unlocking mutex, this allows * us to check it without locking in pthread_cond_signal(). */ cvp->__has_user_waiters = 1; curthread->will_sleep = 1; (void)_mutex_cv_unlock(mp, &recurse); curthread->mutex_obj = mp; _sleepq_add(cvp, curthread); for(;;) { _thr_clear_wake(curthread); _sleepq_unlock(cvp); if (cancel) { _thr_cancel_enter2(curthread, 0); error = _thr_sleep(curthread, cvp->__clock_id, abstime); _thr_cancel_leave(curthread, 0); } else { error = _thr_sleep(curthread, cvp->__clock_id, abstime); } _sleepq_lock(cvp); if (curthread->wchan == NULL) { error = 0; break; } else if (cancel && SHOULD_CANCEL(curthread)) { sq = _sleepq_lookup(cvp); cvp->__has_user_waiters = _sleepq_remove(sq, curthread); _sleepq_unlock(cvp); curthread->mutex_obj = NULL; _mutex_cv_lock(mp, recurse); if (!THR_IN_CRITICAL(curthread)) _pthread_exit(PTHREAD_CANCELED); else /* this should not happen */ return (0); } else if (error == ETIMEDOUT) { sq = _sleepq_lookup(cvp); cvp->__has_user_waiters = _sleepq_remove(sq, curthread); break; } } _sleepq_unlock(cvp); curthread->mutex_obj = NULL; _mutex_cv_lock(mp, recurse); return (error); }