/* Lock the spin lock object LOCK. If the lock is held by another thread spin until it becomes available. */ int _pthread_spin_lock (__pthread_spinlock_t *lock) { int i; #ifdef USE_L4 /* Start with a small timeout of 2 microseconds, then back off exponentially. */ l4_time_t timeout; timeout = l4_time_period (2); #else # warning Do not know how to sleep on this platform. #endif while (1) { for (i = 0; i < __pthread_spin_count; i++) { if (__pthread_spin_trylock (lock) == 0) return 0; } #ifdef USE_L4 l4_sleep (timeout); timeout = l4_time_mul2 (timeout); if (timeout == L4_NEVER) timeout = L4_TIME_PERIOD_MAX; #endif } }
/* Lock the spin lock object LOCK. If the lock is held by another thread spin until it becomes available. */ int _pthread_spin_lock (__pthread_spinlock_t *lock) { l4_time_t timeout; int i; /* Start with a small timeout of 2 microseconds, then back off exponentially. */ timeout = l4_time_period (2); while (1) { for (i = 0; i < __pthread_spin_count; i++) { if (__pthread_spin_trylock (lock) == 0) return 0; } l4_sleep (timeout); timeout = l4_time_mul2 (timeout); if (timeout == L4_NEVER) timeout = L4_TIME_PERIOD_MAX; } }
/* Lock MUTEX, return EBUSY if we can't get it. */ int __pthread_mutex_trylock (struct __pthread_mutex *mutex) { int err; struct __pthread *self; __pthread_spin_lock (&mutex->__lock); if (__pthread_spin_trylock (&mutex->__held) == 0) /* Acquired the lock. */ { #ifndef NDEBUG self = _pthread_self (); if (self) /* The main thread may take a lock before the library is fully initialized, in particular, before the main thread has a TCB. */ { assert (! mutex->owner); mutex->owner = _pthread_self (); } #endif if (mutex->attr) switch (mutex->attr->mutex_type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_RECURSIVE: mutex->locks = 1; case PTHREAD_MUTEX_ERRORCHECK: mutex->owner = _pthread_self (); break; default: LOSE; } __pthread_spin_unlock (&mutex->__lock); return 0; } err = EBUSY; if (mutex->attr) { self = _pthread_self (); switch (mutex->attr->mutex_type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_ERRORCHECK: /* We could check if MUTEX->OWNER is SELF, however, POSIX does not permit pthread_mutex_trylock to return EDEADLK instead of EBUSY, only pthread_mutex_lock. */ break; case PTHREAD_MUTEX_RECURSIVE: if (mutex->owner == self) { mutex->locks ++; err = 0; } break; default: LOSE; } } __pthread_spin_unlock (&mutex->__lock); return err; }
/* Acquire the rwlock *RWLOCK for reading blocking until *ABSTIME if it is already held. As a GNU extension, if TIMESPEC is NULL then wait forever. */ int __pthread_rwlock_timedrdlock_internal (struct __pthread_rwlock *rwlock, const struct timespec *abstime) { error_t err; int drain; struct __pthread *self; __pthread_spin_lock (&rwlock->__lock); if (__pthread_spin_trylock (&rwlock->__held) == 0) /* Successfully acquired the lock. */ { assert (rwlock->__readerqueue == 0); assert (rwlock->__writerqueue == 0); assert (rwlock->__readers == 0); rwlock->__readers = 1; __pthread_spin_unlock (&rwlock->__lock); return 0; } else /* Lock is held, but is held by a reader? */ if (rwlock->__readers > 0) /* Just add ourself to number of readers. */ { assert (rwlock->__readerqueue == 0); rwlock->__readers++; __pthread_spin_unlock (&rwlock->__lock); return 0; } /* The lock is busy. */ /* Better be blocked by a writer. */ assert (rwlock->__readers == 0); if (abstime != NULL && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) return EINVAL; self = _pthread_self (); /* Add ourself to the queue. */ __pthread_enqueue (&rwlock->__readerqueue, self); __pthread_spin_unlock (&rwlock->__lock); /* Block the thread. */ if (abstime != NULL) err = __pthread_timedblock (self, abstime, CLOCK_REALTIME); else { err = 0; __pthread_block (self); } __pthread_spin_lock (&rwlock->__lock); if (self->prevp == NULL) /* Another thread removed us from the queue, which means a wakeup message has been sent. It was either consumed while we were blocking, or queued after we timed out and before we acquired the rwlock lock, in which case the message queue must be drained. */ drain = err ? 1 : 0; else { /* We're still in the queue. Noone attempted to wake us up, i.e. we timed out. */ __pthread_dequeue (self); drain = 0; } __pthread_spin_unlock (&rwlock->__lock); if (drain) __pthread_block (self); if (err) { assert (err == ETIMEDOUT); return err; } /* The reader count has already been increment by whoever woke us up. */ assert (rwlock->__readers > 0); return 0; }