int __pthread_mutex_trylock (pthread_mutex_t *mtxp) { struct __pthread *self; int ret; switch (MTX_TYPE (mtxp)) { case PT_MTX_NORMAL: ret = lll_trylock (&mtxp->__lock); if (ret) ret = EBUSY; break; case PT_MTX_RECURSIVE: self = _pthread_self (); if (mtx_owned_p (mtxp, self, mtxp->__flags)) { if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) return EAGAIN; ++mtxp->__cnt; ret = 0; } else if ((ret = lll_trylock (&mtxp->__lock)) == 0) { mtx_set_owner (mtxp, self, mtxp->__flags); mtxp->__cnt = 1; } else ret = EBUSY; break; case PT_MTX_ERRORCHECK: self = _pthread_self (); if ((ret = lll_trylock (&mtxp->__lock)) == 0) mtx_set_owner (mtxp, self, mtxp->__flags); else ret = EBUSY; break; case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST: case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: self = _pthread_self (); ROBUST_LOCK (self, mtxp, __lll_robust_trylock); break; default: ret = EINVAL; break; } return ret; }
int pthread_mutex_timedlock (pthread_mutex_t *mtxp, const struct timespec *tsp) { struct pthread *self = PTHREAD_SELF; int ret, flags = mtxp->__flags & GSYNC_SHARED; switch (MTX_TYPE (mtxp)) { case PTHREAD_MUTEX_NORMAL: ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags); break; case PTHREAD_MUTEX_RECURSIVE: if (mtx_owned_p (mtxp, self, flags)) { if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) return (EAGAIN); ++mtxp->__cnt; ret = 0; } else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags)) == 0) { mtx_set_owner (mtxp, self, flags); mtxp->__cnt = 1; } break; case PTHREAD_MUTEX_ERRORCHECK: if (mtxp->__owner_id == self->id) return (EDEADLK); else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags)) == 0) mtx_set_owner (mtxp, self, flags); break; case PTHREAD_MUTEX_NORMAL | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: ROBUST_LOCK (self, mtxp, lll_robust_abstimed_lock, tsp, flags); break; default: ret = EINVAL; break; } return (ret); }
int pthread_mutex_trylock (pthread_mutex_t *mtxp) { struct pthread *self = PTHREAD_SELF; int ret; switch (MTX_TYPE (mtxp)) { case PTHREAD_MUTEX_NORMAL: ret = lll_trylock (&mtxp->__lock); break; case PTHREAD_MUTEX_RECURSIVE: if (mtx_owned_p (mtxp, self, mtxp->__flags)) { if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) return (EAGAIN); ++mtxp->__cnt; ret = 0; } else if ((ret = lll_trylock (&mtxp->__lock)) == 0) { mtx_set_owner (mtxp, self, mtxp->__flags); mtxp->__cnt = 1; } break; case PTHREAD_MUTEX_ERRORCHECK: if (mtx_owned_p (mtxp, self, mtxp->__flags)) ret = EDEADLK; else if ((ret = lll_trylock (&mtxp->__lock)) == 0) mtx_set_owner (mtxp, self, mtxp->__flags); break; case PTHREAD_MUTEX_NORMAL | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: ROBUST_LOCK (self, mtxp, lll_robust_trylock); break; default: ret = EINVAL; break; } return (ret); }
int pthread_mutex_lock (pthread_mutex_t *mtxp) { struct pthread *self = PTHREAD_SELF; int flags = mtxp->__flags & GSYNC_SHARED; int ret = 0; switch (MTX_TYPE (mtxp)) { case PTHREAD_MUTEX_NORMAL: lll_lock (&mtxp->__lock, flags); break; case PTHREAD_MUTEX_RECURSIVE: if (mtx_owned_p (mtxp, self, flags)) { if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) return (EAGAIN); ++mtxp->__cnt; return (ret); } lll_lock (&mtxp->__lock, flags); mtx_set_owner (mtxp, self, flags); mtxp->__cnt = 1; break; case PTHREAD_MUTEX_ERRORCHECK: if (mtx_owned_p (mtxp, self, flags)) return (EDEADLK); lll_lock (&mtxp->__lock, flags); mtx_set_owner (mtxp, self, flags); break; case PTHREAD_MUTEX_NORMAL | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PTHREAD_MUTEX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: ROBUST_LOCK (self, mtxp, lll_robust_lock, flags); break; default: ret = EINVAL; break; } return (ret); }
int __pthread_mutex_transfer_np (pthread_mutex_t *mtxp, pthread_t th) { struct __pthread *self = _pthread_self (); struct __pthread *pt = __pthread_getid (th); if (pt == NULL) return ESRCH; else if (pt == self) return 0; int ret = 0; int flags = mtxp->__flags & GSYNC_SHARED; switch (MTX_TYPE (mtxp)) { case PT_MTX_NORMAL: break; case PT_MTX_RECURSIVE: case PT_MTX_ERRORCHECK: if (!mtx_owned_p (mtxp, self, flags)) ret = EPERM; else mtx_set_owner (mtxp, pt, flags); break; case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST: case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: /* Note that this can be used to transfer an inconsistent * mutex as well. The new owner will still have the same * flags as the original. */ if (mtxp->__owner_id != self->thread || (int) (mtxp->__lock & LLL_OWNER_MASK) != __getpid ()) ret = EPERM; else mtxp->__owner_id = pt->thread; break; default: ret = EINVAL; } return ret; }