int pthread_barrier_wait(pthread_barrier_t *b) { int limit = b->_b_limit; struct instance *inst; /* Trivial case: count was set at 1 */ if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD; /* Process-shared barriers require a separate, inefficient wait */ if (limit < 0) return pshared_barrier_wait(b); /* Otherwise we need a lock on the barrier object */ while (a_swap(&b->_b_lock, 1)) __wait(&b->_b_lock, &b->_b_waiters, 1, 1); inst = b->_b_inst; /* First thread to enter the barrier becomes the "instance owner" */ if (!inst) { struct instance new_inst = { 0 }; int spins = 10000; b->_b_inst = inst = &new_inst; a_store(&b->_b_lock, 0); if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); while (spins-- && !inst->finished) a_spin(); a_inc(&inst->finished); while (inst->finished == 1) __syscall(SYS_futex, &inst->finished, FUTEX_WAIT,1,0); return PTHREAD_BARRIER_SERIAL_THREAD; } /* Last thread to enter the barrier wakes all non-instance-owners */ if (++inst->count == limit) { b->_b_inst = 0; a_store(&b->_b_lock, 0); if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); a_store(&inst->last, 1); if (inst->waiters) __wake(&inst->last, -1, 1); } else { a_store(&b->_b_lock, 0); if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); __wait(&inst->last, &inst->waiters, 0, 1); } /* Last thread to exit the barrier wakes the instance owner */ if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1)) __wake(&inst->finished, 1, 1); return 0; }
static int pshared_barrier_wait(pthread_barrier_t *b) { int limit = (b->_b_limit & INT_MAX) + 1; int ret = 0; int v, w; if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD; while ((v=a_cas(&b->_b_lock, 0, limit))) __wait(&b->_b_lock, &b->_b_waiters, v, 0); /* Wait for <limit> threads to get to the barrier */ if (++b->_b_count == limit) { a_store(&b->_b_count, 0); ret = PTHREAD_BARRIER_SERIAL_THREAD; if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); } else { a_store(&b->_b_lock, 0); if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); while ((v=b->_b_count)>0) __wait(&b->_b_count, &b->_b_waiters2, v, 0); } __vm_lock_impl(+1); /* Ensure all threads have a vm lock before proceeding */ if (a_fetch_add(&b->_b_count, -1)==1-limit) { a_store(&b->_b_count, 0); if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); } else { while ((v=b->_b_count)) __wait(&b->_b_count, &b->_b_waiters2, v, 0); } /* Perform a recursive unlock suitable for self-sync'd destruction */ do { v = b->_b_lock; w = b->_b_waiters; } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v); /* Wake a thread waiting to reuse or destroy the barrier */ if (v==INT_MIN+1 || (v==1 && w)) __wake(&b->_b_lock, 1, 0); __vm_unlock_impl(); return ret; }
int __pthread_once(pthread_once_t *control, void (*init)(void)) { /* Return immediately if init finished before, but ensure that * effects of the init routine are visible to the caller. */ if (*control == 2) { a_barrier(); return 0; } /* Try to enter initializing state. Four possibilities: * 0 - we're the first or the other cancelled; run init * 1 - another thread is running init; wait * 2 - another thread finished running init; just return * 3 - another thread is running init, waiters present; wait */ for (;;) switch (a_cas(control, 0, 1)) { case 0: pthread_cleanup_push(undo, control); init(); pthread_cleanup_pop(0); if (a_swap(control, 2) == 3) __wake(control, -1, 1); return 0; case 1: /* If this fails, so will __wait. */ a_cas(control, 1, 3); case 3: __wait(control, 0, 3, 1); continue; case 2: return 0; } }
int __pthread_once_full(pthread_once_t *control, void (*init)(void)) { /* Try to enter initializing state. Four possibilities: * 0 - we're the first or the other cancelled; run init * 1 - another thread is running init; wait * 2 - another thread finished running init; just return * 3 - another thread is running init, waiters present; wait */ for (;;) switch (a_cas(control, 0, 1)) { case 0: pthread_cleanup_push(undo, control); init(); pthread_cleanup_pop(0); if (a_swap(control, 2) == 3) __wake(control, -1, 1); return 0; case 1: /* If this fails, so will __wait. */ a_cas(control, 1, 3); case 3: __wait(control, 0, 3, 1); continue; case 2: return 0; } }
int pthread_once(pthread_once_t *control, void (*init)(void)) { static int waiters; /* Return immediately if init finished before */ if (*control == 2) return 0; /* Try to enter initializing state. Three possibilities: * 0 - we're the first or the other cancelled; run init * 1 - another thread is running init; wait * 2 - another thread finished running init; just return */ for (;;) switch (a_swap(control, 1)) { case 0: pthread_cleanup_push(undo, control); init(); pthread_cleanup_pop(0); a_store(control, 2); if (waiters) __wake(control, -1, 0); return 0; case 1: __wait(control, &waiters, 1, 0); continue; case 2: a_store(control, 2); return 0; } }
int __pthread_once(pthread_once_t *control, void (*init)(void)) { static int waiters; /* Return immediately if init finished before, but ensure that * effects of the init routine are visible to the caller. */ if (*control == 2) { a_barrier(); return 0; } /* Try to enter initializing state. Three possibilities: * 0 - we're the first or the other cancelled; run init * 1 - another thread is running init; wait * 2 - another thread finished running init; just return */ for (;;) switch (a_cas(control, 0, 1)) { case 0: pthread_cleanup_push(undo, control); init(); pthread_cleanup_pop(0); a_store(control, 2); if (waiters) __wake(control, -1, 1); return 0; case 1: __wait(control, &waiters, 1, 1); continue; case 2: return 0; } }
void __vm_lock(int inc) { for (;;) { int v = vmlock[0]; if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1); else if (a_cas(vmlock, v, v+inc)==v) break; } }
static inline void lock(volatile int *l) { if (a_cas(l, 0, 1)) { a_cas(l, 1, 2); do __wait(l, 0, 2, 1); while (a_cas(l, 0, 2)); } }
int __lockfile(FILE *f) { int owner, tid = __pthread_self()->tid; if (f->lock == tid) return 0; while ((owner = a_cas(&f->lock, 0, tid))) __wait(&f->lock, &f->waiters, owner, 1); return 1; }
int pthread_join(pthread_t t, void **res) { int tmp = t->tid; CANCELPT_BEGIN; if (tmp) __wait(&t->tid, 0, tmp, 1); CANCELPT_END; if (res) *res = t->result; if (t->map_base) munmap(t->map_base, t->map_size); return 0; }
int pthread_cond_broadcast(pthread_cond_t *c) { pthread_mutex_t *m; if (!c->_c_waiters) return 0; a_inc(&c->_c_seq); #ifdef __EMSCRIPTEN__ // XXX Emscripten: TODO: This is suboptimal but works naively correctly for now. The Emscripten-specific code path below // has a bug and does not work for some reason. Figure it out and remove this code block. __wake(&c->_c_seq, -1, 0); return 0; #endif /* If cond var is process-shared, simply wake all waiters. */ if (c->_c_mutex == (void *)-1) { __wake(&c->_c_seq, -1, 0); return 0; } /* Block waiters from returning so we can use the mutex. */ while (a_swap(&c->_c_lock, 1)) __wait(&c->_c_lock, &c->_c_lockwait, 1, 1); if (!c->_c_waiters) goto out; m = c->_c_mutex; /* Move waiter count to the mutex */ a_fetch_add(&m->_m_waiters, c->_c_waiters2); c->_c_waiters2 = 0; #ifdef __EMSCRIPTEN__ int futexResult; do { // XXX Emscripten: Bug, this does not work correctly. futexResult = emscripten_futex_wake_or_requeue(&c->_c_seq, !m->_m_type || (m->_m_lock&INT_MAX)!=pthread_self()->tid, c->_c_seq, &m->_m_lock); } while(futexResult == -EAGAIN); #else /* Perform the futex requeue, waking one waiter unless we know * that the calling thread holds the mutex. */ __syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE, !m->_m_type || (m->_m_lock&INT_MAX)!=pthread_self()->tid, INT_MAX, &m->_m_lock); #endif out: a_store(&c->_c_lock, 0); if (c->_c_lockwait) __wake(&c->_c_lock, 1, 0); return 0; }
void fn0128(ci16 r4, word16 pc) { __wait(); word16 r0_3 = globals->w006C; if (r0_3 != 0x00) { globals->w006C = 0x00; globals->w006E = r0_3; fn053A(r4, pc); } return; }
pid_t _wait(int *istat) { struct pthread *curthread = _get_curthread(); pid_t ret; _thr_cancel_enter(curthread); ret = __wait(istat); _thr_cancel_leave(curthread, 1); return ret; }
int pthread_barrier_destroy(pthread_barrier_t *b) { if (b->_b_limit < 0) { if (b->_b_lock) { int v; a_or(&b->_b_lock, INT_MIN); while ((v = b->_b_lock) & INT_MAX) __wait(&b->_b_lock, 0, v, 0); } __vm_wait(); } return 0; }
static int start(void *p) { pthread_t self = p; if (self->startlock[0]) { __wait(self->startlock, 0, 1, 1); if (self->startlock[0]) { self->detached = 2; pthread_exit(0); } __restore_sigs(self->sigmask); } if (self->unblock_cancel) __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8); pthread_exit(self->start(self->start_arg)); return 0; }
int pthread_cond_timedwait(pthread_cond_t *c, pthread_mutex_t *m, const struct timespec *ts) { struct cm cm = { .c=c, .m=m }; int r, e=0, seq; if (m->_m_type && (m->_m_lock&INT_MAX) != pthread_self()->tid) return EPERM; if (ts && ts->tv_nsec >= 1000000000UL) return EINVAL; pthread_testcancel(); a_inc(&c->_c_waiters); if (c->_c_mutex != (void *)-1) { c->_c_mutex = m; while (a_swap(&c->_c_lock, 1)) __wait(&c->_c_lock, &c->_c_lockwait, 1, 1); c->_c_waiters2++; a_store(&c->_c_lock, 0); if (c->_c_lockwait) __wake(&c->_c_lock, 1, 1); } seq = c->_c_seq; pthread_mutex_unlock(m); do e = __timedwait(&c->_c_seq, seq, c->_c_clock, ts, cleanup, &cm, 0); while (c->_c_seq == seq && (!e || e==EINTR)); if (e == EINTR) e = 0; unwait(c, m); if ((r=pthread_mutex_lock(m))) return r; return e; }
static void unwait(pthread_cond_t *c, pthread_mutex_t *m) { /* Removing a waiter is non-trivial if we could be using requeue * based broadcast signals, due to mutex access issues, etc. */ if (c->_c_mutex == (void *)-1) { a_dec(&c->_c_waiters); if (c->_c_destroy) __wake(&c->_c_waiters, 1, 0); return; } while (a_swap(&c->_c_lock, 1)) __wait(&c->_c_lock, &c->_c_lockwait, 1, 1); if (c->_c_waiters2) c->_c_waiters2--; else a_dec(&m->_m_waiters); a_store(&c->_c_lock, 0); if (c->_c_lockwait) __wake(&c->_c_lock, 1, 1); a_dec(&c->_c_waiters); if (c->_c_destroy) __wake(&c->_c_waiters, 1, 1); }
int pthread_cond_broadcast(pthread_cond_t *c) { pthread_mutex_t *m; if (!c->_c_waiters) return 0; a_inc(&c->_c_seq); /* If cond var is process-shared, simply wake all waiters. */ if (c->_c_mutex == (void *)-1) { __wake(&c->_c_seq, -1, 0); return 0; } /* Block waiters from returning so we can use the mutex. */ while (a_swap(&c->_c_lock, 1)) __wait(&c->_c_lock, &c->_c_lockwait, 1, 1); if (!c->_c_waiters) goto out; m = c->_c_mutex; /* Move waiter count to the mutex */ a_fetch_add(&m->_m_waiters, c->_c_waiters2); c->_c_waiters2 = 0; /* Perform the futex requeue, waking one waiter unless we know * that the calling thread holds the mutex. */ __syscall(SYS_futex, &c->_c_seq, FUTEX_REQUEUE, !m->_m_type || (m->_m_lock&INT_MAX)!=pthread_self()->tid, INT_MAX, &m->_m_lock); out: a_store(&c->_c_lock, 0); if (c->_c_lockwait) __wake(&c->_c_lock, 1, 0); return 0; }
static inline void lock(volatile int *lk) { if (libc.threads_minus_1) while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); }
static void lock(volatile int *lk) { if (!libc.threads_minus_1) return; while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); }
/// Stop all io_service objects in the pool. void stop() { __wait(); }
/*Called by Simulation code, informs that task is waiting for IO */ void Waiting(int pid) { start_overhead(); task_t *t = __wait(pid); t->last_state = WAIT; stop_overhead(); }
uint_t ph_wait(int *err) { return __wait(err); }
void __vm_wait() { int tmp; while ((tmp = vmlock[0])) __wait(vmlock, vmlock + 1, tmp, 1); }