int __pthread_once(pthread_once_t *control, void (*init)(void)) { /* Return immediately if init finished before, but ensure that * effects of the init routine are visible to the caller. */ if (*control == 2) { a_barrier(); return 0; } /* Try to enter initializing state. Four possibilities: * 0 - we're the first or the other cancelled; run init * 1 - another thread is running init; wait * 2 - another thread finished running init; just return * 3 - another thread is running init, waiters present; wait */ for (;;) switch (a_cas(control, 0, 1)) { case 0: pthread_cleanup_push(undo, control); init(); pthread_cleanup_pop(0); if (a_swap(control, 2) == 3) __wake(control, -1, 1); return 0; case 1: /* If this fails, so will __wait. */ a_cas(control, 1, 3); case 3: __wait(control, 0, 3, 1); continue; case 2: return 0; } }
int __pthread_once_full(pthread_once_t *control, void (*init)(void)) { /* Try to enter initializing state. Four possibilities: * 0 - we're the first or the other cancelled; run init * 1 - another thread is running init; wait * 2 - another thread finished running init; just return * 3 - another thread is running init, waiters present; wait */ for (;;) switch (a_cas(control, 0, 1)) { case 0: pthread_cleanup_push(undo, control); init(); pthread_cleanup_pop(0); if (a_swap(control, 2) == 3) __wake(control, -1, 1); return 0; case 1: /* If this fails, so will __wait. */ a_cas(control, 1, 3); case 3: __wait(control, 0, 3, 1); continue; case 2: return 0; } }
static inline void lock(volatile int *l) { if (a_cas(l, 0, 1)) { a_cas(l, 1, 2); do __wait(l, 0, 2, 1); while (a_cas(l, 0, 2)); } }
static int pshared_barrier_wait(pthread_barrier_t *b) { int limit = (b->_b_limit & INT_MAX) + 1; int ret = 0; int v, w; if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD; while ((v=a_cas(&b->_b_lock, 0, limit))) __wait(&b->_b_lock, &b->_b_waiters, v, 0); /* Wait for <limit> threads to get to the barrier */ if (++b->_b_count == limit) { a_store(&b->_b_count, 0); ret = PTHREAD_BARRIER_SERIAL_THREAD; if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); } else { a_store(&b->_b_lock, 0); if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); while ((v=b->_b_count)>0) __wait(&b->_b_count, &b->_b_waiters2, v, 0); } __vm_lock_impl(+1); /* Ensure all threads have a vm lock before proceeding */ if (a_fetch_add(&b->_b_count, -1)==1-limit) { a_store(&b->_b_count, 0); if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); } else { while ((v=b->_b_count)) __wait(&b->_b_count, &b->_b_waiters2, v, 0); } /* Perform a recursive unlock suitable for self-sync'd destruction */ do { v = b->_b_lock; w = b->_b_waiters; } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v); /* Wake a thread waiting to reuse or destroy the barrier */ if (v==INT_MIN+1 || (v==1 && w)) __wake(&b->_b_lock, 1, 0); __vm_unlock_impl(); return ret; }
int __pthread_once(pthread_once_t *control, void (*init)(void)) { static int waiters; /* Return immediately if init finished before, but ensure that * effects of the init routine are visible to the caller. */ if (*control == 2) { a_barrier(); return 0; } /* Try to enter initializing state. Three possibilities: * 0 - we're the first or the other cancelled; run init * 1 - another thread is running init; wait * 2 - another thread finished running init; just return */ for (;;) switch (a_cas(control, 0, 1)) { case 0: pthread_cleanup_push(undo, control); init(); pthread_cleanup_pop(0); a_store(control, 2); if (waiters) __wake(control, -1, 1); return 0; case 1: __wait(control, &waiters, 1, 1); continue; case 2: return 0; } }
static int __pthread_detach(pthread_t t) { /* If the cas fails, detach state is either already-detached * or exiting/exited, and pthread_join will trap or cleanup. */ if (a_cas(&t->detach_state, DT_JOINABLE, DT_DYNAMIC) != DT_JOINABLE) return __pthread_join(t, 0); return 0; }
void __vm_lock(int inc) { for (;;) { int v = vmlock[0]; if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1); else if (a_cas(vmlock, v, v+inc)==v) break; } }
int __lockfile(FILE *f) { int owner, tid = __pthread_self()->tid; if (f->lock == tid) return 0; while ((owner = a_cas(&f->lock, 0, tid))) __wait(&f->lock, &f->waiters, owner, 1); return 1; }
int pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *at) { int r, t; if (m->_m_type == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY)) return 0; while ((r=pthread_mutex_trylock(m)) == EBUSY) { if (!(r=m->_m_lock) || (r&0x40000000)) continue; if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK && (r&0x1fffffff) == pthread_self()->tid) return EDEADLK; a_inc(&m->_m_waiters); t = r | 0x80000000; a_cas(&m->_m_lock, r, t); r = __timedwait(&m->_m_lock, t, CLOCK_REALTIME, at, 0, 0, 0); a_dec(&m->_m_waiters); if (r && r != EINTR) break; } return r; }
int pthread_mutex_trylock(pthread_mutex_t *m) { int tid; int own; pthread_t self; if (m->_m_type == PTHREAD_MUTEX_NORMAL) return a_swap(&m->_m_lock, EBUSY); self = pthread_self(); tid = self->tid | 0x80000000; if (m->_m_type >= 4) { if (!self->robust_list.off) __syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long)); self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next; self->robust_list.pending = &m->_m_next; } if (m->_m_lock == tid && (m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE) { if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN; m->_m_count++; return 0; } own = m->_m_lock; if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, own, tid)!=own) return EBUSY; m->_m_count = 1; if (m->_m_type < 4) return 0; if (m->_m_type >= 8) { m->_m_lock = 0; return ENOTRECOVERABLE; } m->_m_next = self->robust_list.head; m->_m_prev = &self->robust_list.head; if (self->robust_list.head) self->robust_list.head[-1] = &m->_m_next; self->robust_list.head = &m->_m_next; self->robust_list.pending = 0; if (own) { m->_m_type += 8; return EOWNERDEAD; } return 0; }
int sem_post(sem_t *sem) { int val, waiters; do { val = sem->__val[0]; waiters = sem->__val[1]; if (val == SEM_VALUE_MAX) { errno = EOVERFLOW; return -1; } } while (a_cas(sem->__val, val, val+1+(val<0)) != val); if (val<0 || waiters) __wake(sem->__val, 1, 0); return 0; }
int __pthread_mutex_trylock_owner(pthread_mutex_t* m) { int old, own; int type = m->_m_type & 15; pthread_t self = __pthread_self(); int tid = self->tid; old = m->_m_lock; own = old & 0x7fffffff; if (own == tid && (type & 3) == PTHREAD_MUTEX_RECURSIVE) { if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN; m->_m_count++; return 0; } if (own == 0x40000000) return ENOTRECOVERABLE; if (m->_m_type & 128) { if (!self->robust_list.off) { self->robust_list.off = (char*)&m->_m_lock - (char*)&m->_m_next; __syscall(SYS_set_robust_list, &self->robust_list, 3 * sizeof(long)); } if (m->_m_waiters) tid |= 0x80000000; self->robust_list.pending = &m->_m_next; } if ((own && (!(own & 0x40000000) || !(type & 4))) || a_cas(&m->_m_lock, old, tid) != old) { self->robust_list.pending = 0; return EBUSY; } volatile void* next = self->robust_list.head; m->_m_next = next; m->_m_prev = &self->robust_list.head; if (next != &self->robust_list.head) *(volatile void* volatile*)((char*)next - sizeof(void*)) = &m->_m_next; self->robust_list.head = &m->_m_next; self->robust_list.pending = 0; if (own) { m->_m_count = 0; m->_m_type |= 8; return EOWNERDEAD; } return 0; }
int ftrylockfile(FILE *f) { int tid = pthread_self()->tid; if (f->lock == tid) { if (f->lockcount == LONG_MAX) return -1; f->lockcount++; return 0; } if (f->lock < 0) f->lock = 0; if (f->lock || a_cas(&f->lock, 0, tid)) return -1; f->lockcount = 1; return 0; }
int sem_timedwait(sem_t *sem, const struct timespec *at) { while (sem_trywait(sem)) { int r; a_inc(sem->__val+1); a_cas(sem->__val, 0, -1); r = __timedwait(sem->__val, -1, CLOCK_REALTIME, at, cleanup, sem->__val+1, 0); a_dec(sem->__val+1); if (r) { errno = r; return -1; } } return 0; }
int ftrylockfile(FILE *f) { pthread_t self = __pthread_self(); int tid = self->tid; int owner = f->lock; if ((owner & ~MAYBE_WAITERS) == tid) { if (f->lockcount == LONG_MAX) return -1; f->lockcount++; return 0; } if (owner < 0) f->lock = owner = 0; if (owner || a_cas(&f->lock, 0, tid)) return -1; __register_locked_file(f, self); return 0; }
static void handler(int sig) { struct chain ch; int old_errno = errno; sem_init(&ch.target_sem, 0, 0); sem_init(&ch.caller_sem, 0, 0); ch.tid = __syscall(SYS_gettid); do ch.next = head; while (a_cas_p(&head, ch.next, &ch) != ch.next); if (a_cas(&target_tid, ch.tid, 0) == (ch.tid | 0x80000000)) __syscall(SYS_futex, &target_tid, FUTEX_UNLOCK_PI|FUTEX_PRIVATE); sem_wait(&ch.target_sem); callback(context); sem_post(&ch.caller_sem); sem_wait(&ch.target_sem); errno = old_errno; }
int __pthread_rwlock_trywrlock(pthread_rwlock_t *rw) { if (a_cas(&rw->_rw_lock, 0, 0x7fffffff)) return EBUSY; return 0; }
int __pthread_mutex_trylock(pthread_mutex_t* m) { if ((m->_m_type & 15) == PTHREAD_MUTEX_NORMAL) return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY; return __pthread_mutex_trylock_owner(m); }