int _DkInternalLock (PAL_LOCK* lock) { while (_DkMutexLock(lock) < 0); // Retry the lock if being interrupted by signals return 0; }
int _DkMutexLockTimeout (struct mutex_handle * mut, int timeout) { int i, c = 0; if (timeout == -1) return -_DkMutexLock(mut); struct atomic_int * m = &mut->value; /* Spin and try to take lock */ for (i = 0 ; i < MUTEX_SPINLOCK_TIMES ; i++) { c = atomic_dec_and_test(m); if (c) goto success; cpu_relax(); } /* The lock is now contended */ int ret; if (timeout == 0) { ret = c ? 0 : -PAL_ERROR_TRYAGAIN; goto out; } while (!c) { int val = atomic_read(m); if (val == 1) goto again; struct timespec waittime; long sec = timeout / 1000000; long microsec = timeout - (sec * 1000000); waittime.tv_sec = sec; waittime.tv_nsec = microsec * 1000; ret = INLINE_SYSCALL(futex, 6, m, FUTEX_WAIT, val, &waittime, NULL, 0); if (IS_ERR(ret) && ERRNO(ret) != EWOULDBLOCK && ERRNO(ret) != EINTR) { ret = unix_to_pal_error(ERRNO(ret)); goto out; } #ifdef DEBUG_MUTEX if (IS_ERR(ret)) printf("mutex held by thread %d\n", mut->owner); #endif again: /* Upon wakeup, we still need to check whether mutex is unlocked or * someone else took it. * If c==0 upon return from xchg (i.e., the older value of m==0), we * will exit the loop. Else, we sleep again (through a futex call). */ c = atomic_dec_and_test(m); } success: #ifdef DEBUG_MUTEX mut->owner = INLINE_SYSCALL(gettid, 0); #endif ret = 0; out: return ret; }