/* Unblock all threads that are blocked on condition variable COND. */ int __pthread_cond_broadcast (pthread_cond_t *cond) { struct __pthread *wakeup; __pthread_spin_lock (&cond->__lock); __pthread_dequeuing_iterate (cond->__queue, wakeup) __pthread_wakeup (wakeup); cond->__queue = NULL; __pthread_spin_unlock (&cond->__lock); return 0; }
/* Unblock at least one of the threads that are blocked on condition variable COND. */ int __pthread_cond_signal (pthread_cond_t *cond) { struct __pthread *wakeup; __pthread_spin_lock (&cond->__lock); wakeup = cond->__queue; if (wakeup) __pthread_dequeue (wakeup); __pthread_spin_unlock (&cond->__lock); if (wakeup) __pthread_wakeup (wakeup); return 0; }
error_t __pthread_sigstate (struct __pthread *thread, int how, const sigset_t *set, sigset_t *oset, int clear_pending) { error_t err = 0; struct hurd_sigstate *ss; ss = _hurd_thread_sigstate (thread->kernel_thread); assert (ss); __pthread_spin_lock (&ss->lock); if (oset) *oset = ss->blocked; if (set) switch (how) { case SIG_BLOCK: ss->blocked |= *set; break; case SIG_SETMASK: ss->blocked = *set; break; case SIG_UNBLOCK: ss->blocked &= ~*set; break; default: err = EINVAL; break; } if (! err && clear_pending) __sigemptyset (&ss->pending); __pthread_spin_unlock (&ss->lock); return err; }
static void cancel_hook (void *arg) { struct cancel_ctx *ctx = arg; struct __pthread *wakeup = ctx->wakeup; pthread_cond_t *cond = ctx->cond; int unblock; __pthread_spin_lock (&cond->__lock); /* The thread only needs to be awaken if it's blocking or about to block. If it was already unblocked, it's not queued any more. */ unblock = wakeup->prevp != NULL; if (unblock) __pthread_dequeue (wakeup); __pthread_spin_unlock (&cond->__lock); if (unblock) __pthread_wakeup (wakeup); }
/* Lock MUTEX, return EBUSY if we can't get it. */ int __pthread_mutex_trylock (struct __pthread_mutex *mutex) { int err; struct __pthread *self; __pthread_spin_lock (&mutex->__lock); if (__pthread_spin_trylock (&mutex->__held) == 0) /* Acquired the lock. */ { #ifndef NDEBUG self = _pthread_self (); if (self) /* The main thread may take a lock before the library is fully initialized, in particular, before the main thread has a TCB. */ { assert (! mutex->owner); mutex->owner = _pthread_self (); } #endif if (mutex->attr) switch (mutex->attr->mutex_type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_RECURSIVE: mutex->locks = 1; case PTHREAD_MUTEX_ERRORCHECK: mutex->owner = _pthread_self (); break; default: LOSE; } __pthread_spin_unlock (&mutex->__lock); return 0; } err = EBUSY; if (mutex->attr) { self = _pthread_self (); switch (mutex->attr->mutex_type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_ERRORCHECK: /* We could check if MUTEX->OWNER is SELF, however, POSIX does not permit pthread_mutex_trylock to return EDEADLK instead of EBUSY, only pthread_mutex_lock. */ break; case PTHREAD_MUTEX_RECURSIVE: if (mutex->owner == self) { mutex->locks ++; err = 0; } break; default: LOSE; } } __pthread_spin_unlock (&mutex->__lock); return err; }
/* Acquire the rwlock *RWLOCK for reading blocking until *ABSTIME if it is already held. As a GNU extension, if TIMESPEC is NULL then wait forever. */ int __pthread_rwlock_timedrdlock_internal (struct __pthread_rwlock *rwlock, const struct timespec *abstime) { error_t err; int drain; struct __pthread *self; __pthread_spin_lock (&rwlock->__lock); if (__pthread_spin_trylock (&rwlock->__held) == 0) /* Successfully acquired the lock. */ { assert (rwlock->__readerqueue == 0); assert (rwlock->__writerqueue == 0); assert (rwlock->__readers == 0); rwlock->__readers = 1; __pthread_spin_unlock (&rwlock->__lock); return 0; } else /* Lock is held, but is held by a reader? */ if (rwlock->__readers > 0) /* Just add ourself to number of readers. */ { assert (rwlock->__readerqueue == 0); rwlock->__readers++; __pthread_spin_unlock (&rwlock->__lock); return 0; } /* The lock is busy. */ /* Better be blocked by a writer. */ assert (rwlock->__readers == 0); if (abstime != NULL && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) return EINVAL; self = _pthread_self (); /* Add ourself to the queue. */ __pthread_enqueue (&rwlock->__readerqueue, self); __pthread_spin_unlock (&rwlock->__lock); /* Block the thread. */ if (abstime != NULL) err = __pthread_timedblock (self, abstime, CLOCK_REALTIME); else { err = 0; __pthread_block (self); } __pthread_spin_lock (&rwlock->__lock); if (self->prevp == NULL) /* Another thread removed us from the queue, which means a wakeup message has been sent. It was either consumed while we were blocking, or queued after we timed out and before we acquired the rwlock lock, in which case the message queue must be drained. */ drain = err ? 1 : 0; else { /* We're still in the queue. Noone attempted to wake us up, i.e. we timed out. */ __pthread_dequeue (self); drain = 0; } __pthread_spin_unlock (&rwlock->__lock); if (drain) __pthread_block (self); if (err) { assert (err == ETIMEDOUT); return err; } /* The reader count has already been increment by whoever woke us up. */ assert (rwlock->__readers > 0); return 0; }
/* Block on condition variable COND until ABSTIME. As a GNU extension, if ABSTIME is NULL, then wait forever. MUTEX should be held by the calling thread. On return, MUTEX will be held by the calling thread. */ int __pthread_cond_timedwait_internal (pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { error_t err; int cancelled, oldtype, drain; clockid_t clock_id = __pthread_default_condattr.__clock; if (abstime && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) return EINVAL; struct __pthread *self = _pthread_self (); struct cancel_ctx ctx; ctx.wakeup = self; ctx.cond = cond; /* Test for a pending cancellation request, switch to deferred mode for safer resource handling, and prepare the hook to call in case we're cancelled while blocking. Once CANCEL_LOCK is released, the cancellation hook can be called by another thread at any time. Whatever happens, this function must exit with MUTEX locked. This function contains inline implementations of pthread_testcancel and pthread_setcanceltype to reduce locking overhead. */ __pthread_mutex_lock (&self->cancel_lock); cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE) && self->cancel_pending; if (!cancelled) { self->cancel_hook = cancel_hook; self->cancel_hook_arg = &ctx; oldtype = self->cancel_type; if (oldtype != PTHREAD_CANCEL_DEFERRED) self->cancel_type = PTHREAD_CANCEL_DEFERRED; /* Add ourselves to the list of waiters. This is done while setting the cancellation hook to simplify the cancellation procedure, i.e. if the thread is queued, it can be cancelled, otherwise it is already unblocked, progressing on the return path. */ __pthread_spin_lock (&cond->__lock); __pthread_enqueue (&cond->__queue, self); if (cond->__attr != NULL) clock_id = cond->__attr->__clock; __pthread_spin_unlock (&cond->__lock); } __pthread_mutex_unlock (&self->cancel_lock); if (cancelled) __pthread_exit (PTHREAD_CANCELED); /* Release MUTEX before blocking. */ __pthread_mutex_unlock (mutex); /* Block the thread. */ if (abstime != NULL) err = __pthread_timedblock (self, abstime, clock_id); else { err = 0; __pthread_block (self); } __pthread_spin_lock (&cond->__lock); if (self->prevp == NULL) { /* Another thread removed us from the list of waiters, which means a wakeup message has been sent. It was either consumed while we were blocking, or queued after we timed out and before we acquired the condition lock, in which case the message queue must be drained. */ if (!err) drain = 0; else { assert (err == ETIMEDOUT); drain = 1; } } else { /* We're still in the list of waiters. Noone attempted to wake us up, i.e. we timed out. */ assert (err == ETIMEDOUT); __pthread_dequeue (self); drain = 0; } __pthread_spin_unlock (&cond->__lock); if (drain) __pthread_block (self); /* We're almost done. Remove the unblock hook, restore the previous cancellation type, and check for a pending cancellation request. */ __pthread_mutex_lock (&self->cancel_lock); self->cancel_hook = NULL; self->cancel_hook_arg = NULL; self->cancel_type = oldtype; cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE) && self->cancel_pending; __pthread_mutex_unlock (&self->cancel_lock); /* Reacquire MUTEX before returning/cancelling. */ __pthread_mutex_lock (mutex); if (cancelled) __pthread_exit (PTHREAD_CANCELED); return err; }