int futex_wait(int *lock_word, int oldval, long sec, unsigned long usec) { struct timespec timeout; int t; if (sec<0) { t = sys_futex(lock_word, futex_wait_op(), oldval, 0); } else { timeout.tv_sec = sec; timeout.tv_nsec = usec * 1000; t = sys_futex(lock_word, futex_wait_op(), oldval, &timeout); } if (t==0) return 0; else if (errno==ETIMEDOUT) return 1; else if (errno==EINTR) return 2; else /* EWOULDBLOCK and others, need to check the lock */ return -1; }
/* Returns -1 on fail, 0 on wakeup, 1 on pass, 2 on didn't sleep */ int __futex_down_slow(struct futex *futx, int val, struct timespec *rel) { if (sys_futex(&futx->count, FUTEX_WAIT, val, rel) == 0) { /* <= in case someone else decremented it */ if (futx->count <= FUTEX_PASSED) { futx->count = -1; return 1; } return 0; } /* EWOULDBLOCK just means value changed before we slept: loop */ if (errno == EWOULDBLOCK) return 2; return -1; }
/* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ void mm_release(struct task_struct *tsk, struct mm_struct *mm) { /* Get rid of any futexes when releasing the mm */ #ifdef CONFIG_FUTEX if (unlikely(tsk->robust_list)) { exit_robust_list(tsk); tsk->robust_list = NULL; } #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) { compat_exit_robust_list(tsk); tsk->compat_robust_list = NULL; } #endif if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); #endif uprobe_free_utask(tsk); /* Get rid of any cached register state */ deactivate_mm(tsk, mm); if (tsk->vfork_done) complete_vfork_done(tsk); /* * If we're exiting normally, clear a user-space tid field if * requested. We leave this alone when dying by signal, to leave * the value intact in a core dump, and to save the unnecessary * trouble, say, a killed vfork parent shouldn't touch this mm. * Userland only wants this done for a sys_exit. */ if (tsk->clear_child_tid) { if (!(tsk->flags & PF_SIGNALED) && atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tsk->clear_child_tid); sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 1, NULL, NULL, 0); } tsk->clear_child_tid = NULL; } }
/* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ void mm_release(struct task_struct *tsk, struct mm_struct *mm) { struct completion *vfork_done = tsk->vfork_done; /* Get rid of any futexes when releasing the mm */ #ifdef CONFIG_FUTEX if (unlikely(tsk->robust_list)) exit_robust_list(tsk); #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) compat_exit_robust_list(tsk); #endif #endif /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* notify parent sleeping on vfork() */ if (vfork_done) { tsk->vfork_done = NULL; complete(vfork_done); } /* * If we're exiting normally, clear a user-space tid field if * requested. We leave this alone when dying by signal, to leave * the value intact in a core dump, and to save the unnecessary * trouble otherwise. Userland only wants this done for a sys_exit. */ if (tsk->clear_child_tid && !(tsk->flags & PF_SIGNALED) && atomic_read(&mm->mm_users) > 1) { u32 __user * tidptr = tsk->clear_child_tid; tsk->clear_child_tid = NULL; /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tidptr); sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); } }
/* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ void mm_release(void) { struct task_struct *tsk = current; struct completion *vfork_done = tsk->vfork_done; /* notify parent sleeping on vfork() */ if (vfork_done) { tsk->vfork_done = NULL; complete(vfork_done); } if (tsk->clear_child_tid) { u32 * tidptr = tsk->clear_child_tid; tsk->clear_child_tid = NULL; /* * We dont check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tidptr); sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); } }
int fair_futex_lock(fair_futex_t *lock) { uint32_t ticket; uint32_t old_futex; int pause_cnt; /* * Possibly wrap: if we have more than 64K lockers waiting, the ticket * value will wrap and two lockers will simultaneously be granted the * lock. */ ticket = __atomic_fetch_add(&lock->fairlock.fair_lock_waiter, 1, __ATOMIC_SEQ_CST); retry: __sync_synchronize(); old_futex = lock->futex; if(old_futex == (uint32_t)ticket / SPIN_CONTROL) { // printf("ticket %d spins (lo: %d)\n", ticket, // lock->fairlock.fair_lock_owner); while (ticket != lock->fairlock.fair_lock_owner) ; } else { // printf("ticket %d sleeps (lo: %d)\n", ticket, // lock->fairlock.fair_lock_owner); sys_futex((void*)&lock->futex, FUTEX_WAIT, old_futex, 0, 0, 0); goto retry; } /* * Applications depend on a barrier here so that operations holding the * lock see consistent data. */ __sync_synchronize(); // printf("ticket %d got lock\n", ticket); return ticket; }
/* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ void mm_release(struct task_struct *tsk, struct mm_struct *mm) { struct completion *vfork_done = tsk->vfork_done; /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* notify parent sleeping on vfork() */ if (vfork_done) { tsk->vfork_done = NULL; complete(vfork_done); } if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) { u32 __user * tidptr = tsk->clear_child_tid; tsk->clear_child_tid = NULL; /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tidptr); sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); } }
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check if nested request. */ pthread_t Self = pthread_self(); if ( pThis->Owner == Self && pThis->cNestings > 0) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cNestings); return VINF_SUCCESS; } #ifdef RTSEMMUTEX_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies) { int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif /* * Convert timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64End = 0; /* shut up gcc */ if (cMillies != RT_INDEFINITE_WAIT) { ts.tv_sec = cMillies / 1000; ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); pTimeout = &ts; } /* * Lock the mutex. * Optimize for the uncontended case (makes 1-2 ns difference). */ if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0))) { for (;;) { int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2); /* * Was the lock released in the meantime? This is unlikely (but possible) */ if (RT_UNLIKELY(iOld == 0)) break; /* * Go to sleep. */ if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec )) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC)) return VERR_SEM_DESTROYED; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { Assert(pTimeout); return VERR_TIMEOUT; } if (rc == 0) /* we'll leave the loop now unless another thread is faster */; else if (rc == -EWOULDBLOCK) /* retry with new value. */; else if (rc == -EINTR) { if (!fAutoResume) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64End - RTTimeSystemNanoTS(); if (i64Diff < 1000) { rc = VERR_TIMEOUT; break; } ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } } /* * When leaving this loop, iState is set to 2. This means that we gained the * lock and there are _possibly_ some waiters. We don't know exactly as another * thread might entered this loop at nearly the same time. Therefore we will * call futex_wakeup once too often (if _no_ other thread entered this loop). * The key problem is the simple futex_wait test for x != y (iState != 2) in * our case). */ } /* * Set the owner and nesting. */ pThis->Owner = Self; ASMAtomicWriteU32(&pThis->cNestings, 1); #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
int futex_await(struct futex *futx, int signal) { return sys_futex(&futx->count, FUTEX_FD, signal, NULL); }
int __futex_up_slow(struct futex *futx) { futx->count = 1; __futex_commit(); return sys_futex(&futx->count, FUTEX_WAKE, 1, NULL); }
int futex_wake(int *lock_word, int n) { return sys_futex(lock_word, futex_wake_op(),n,0); }
DECLINLINE(int) rtSemEventLnxMultiWait(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); /* * Quickly check whether it's signaled. */ int32_t iCur = ASMAtomicUoReadS32(&pThis->iState); Assert(iCur == 0 || iCur == -1 || iCur == 1); if (iCur == -1) return VINF_SUCCESS; /* * Check and convert the timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64Deadline = 0; /* shut up gcc */ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) { /* If the timeout is zero, then we're done. */ if (!uTimeout) return VERR_TIMEOUT; /* Convert it to a deadline + interval timespec. */ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout != UINT64_MAX) /* unofficial way of indicating an indefinite wait */ { if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) u64Deadline = RTTimeSystemNanoTS() + uTimeout; else { uint64_t u64Now = RTTimeSystemNanoTS(); if (uTimeout <= u64Now) return VERR_TIMEOUT; u64Deadline = uTimeout; uTimeout -= u64Now; } if ( sizeof(ts.tv_sec) >= sizeof(uint64_t) || uTimeout <= UINT64_C(1000000000) * UINT32_MAX) { ts.tv_nsec = uTimeout % UINT32_C(1000000000); ts.tv_sec = uTimeout / UINT32_C(1000000000); pTimeout = &ts; } } } /* * The wait loop. */ #ifdef RTSEMEVENTMULTI_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (unsigned i = 0;; i++) { /* * Start waiting. We only account for there being or having been * threads waiting on the semaphore to keep things simple. */ iCur = ASMAtomicUoReadS32(&pThis->iState); Assert(iCur == 0 || iCur == -1 || iCur == 1); if ( iCur == 1 || ASMAtomicCmpXchgS32(&pThis->iState, 1, 0)) { /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64Deadline - RTTimeSystemNanoTS(); if (i64Diff < 1000) return VERR_TIMEOUT; ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } #ifdef RTSEMEVENTMULTI_STRICT if (pThis->fEverHadSignallers) { int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true); if (RT_FAILURE(rc9)) return rc9; } #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true); long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 1, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI); if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)) return VERR_SEM_DESTROYED; if (rc == 0) return VINF_SUCCESS; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { /** @todo something is broken here. shows up every now and again in the ata * code. Should try to run the timeout against RTTimeMilliTS to * check that it's doing the right thing... */ Assert(pTimeout); return VERR_TIMEOUT; } if (rc == -EWOULDBLOCK) /* retry, the value changed. */; else if (rc == -EINTR) { if (fFlags & RTSEMWAIT_FLAGS_NORESUME) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } } else if (iCur == -1) return VINF_SUCCESS; } }
static int rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume) { PCRTLOCKVALSRCPOS pSrcPos = NULL; /* * Validate input. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); /* * Quickly check whether it's signaled. */ /** @todo this isn't fair if someone is already waiting on it. They should * have the first go at it! * (ASMAtomicReadS32(&pThis->cWaiters) == 0 || !cMillies) && ... */ if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) return VINF_SUCCESS; /* * Convert the timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64End = 0; /* shut up gcc */ if (cMillies != RT_INDEFINITE_WAIT) { if (!cMillies) return VERR_TIMEOUT; ts.tv_sec = cMillies / 1000; ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); pTimeout = &ts; } ASMAtomicIncS32(&pThis->cWaiters); /* * The wait loop. */ #ifdef RTSEMEVENT_STRICT RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) ? RTThreadSelfAutoAdopt() : RTThreadSelf(); #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif int rc = VINF_SUCCESS; for (;;) { #ifdef RTSEMEVENT_STRICT if (pThis->fEverHadSignallers) { rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, cMillies, RTTHREADSTATE_EVENT, true); if (RT_FAILURE(rc)) break; } #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC)) { rc = VERR_SEM_DESTROYED; break; } if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK)) { /* successful wakeup or fSignalled > 0 in the meantime */ if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) break; } else if (lrc == -ETIMEDOUT) { rc = VERR_TIMEOUT; break; } else if (lrc == -EINTR) { if (!fAutoResume) { rc = VERR_INTERRUPTED; break; } } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno)); rc = RTErrConvertFromErrno(lrc); break; } /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64End - RTTimeSystemNanoTS(); if (i64Diff < 1000) { rc = VERR_TIMEOUT; break; } ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } } ASMAtomicDecS32(&pThis->cWaiters); return rc; }
inline int futex_wait( std::atomic< std::int32_t > * addr, std::int32_t x) { return 0 <= sys_futex( static_cast< void * >( addr), FUTEX_WAIT_PRIVATE, x) ? 0 : -1; }
inline int futex_wake( std::atomic< std::int32_t > * addr) { return 0 <= sys_futex( static_cast< void * >( addr), FUTEX_WAKE_PRIVATE, 1) ? 0 : -1; }
void *SharedMemory::lock() { if(rptr==MAP_FAILED)return 0; int c; c = cmpxchg(&SH_MUTEX(rptr), 0, 1); if(!c)return (char*)rptr+sizeof(SegmentHeader); if(c==1)c=xchg(&SH_MUTEX(rptr), 2); while(c) { sys_futex(&SH_MUTEX(rptr), FUTEX_WAIT, 2, NULL, NULL, 0); c = xchg(&SH_MUTEX(rptr), 2); } return (char*)rptr+sizeof(SegmentHeader); }