static void setup_nonblocking_pipe(ethr_event *e) { int flgs; int res; res = pipe(e->fd); if (res != 0) ETHR_FATAL_ERROR__(errno); ETHR_ASSERT(e->fd[0] >= 0 && e->fd[1] >= 0); flgs = fcntl(e->fd[0], F_GETFL, 0); fcntl(e->fd[0], F_SETFL, flgs | O_NONBLOCK); flgs = fcntl(e->fd[1], F_GETFL, 0); fcntl(e->fd[1], F_SETFL, flgs | O_NONBLOCK); #ifndef __DARWIN__ if (e->fd[0] >= FD_SETSIZE) ETHR_FATAL_ERROR__(ENOTSUP); #else { int nmasks; ethr_event_fdsets__ *fdsets; size_t mem_size; nmasks = (e->fd[0]+NFDBITS)/NFDBITS; mem_size = 2*nmasks*sizeof(fd_mask); if (mem_size < 2*sizeof(fd_set)) { mem_size = 2*sizeof(fd_set); nmasks = mem_size/(2*sizeof(fd_mask)); } fdsets = malloc(sizeof(ethr_event_fdsets__) + mem_size - sizeof(fd_mask)); if (!fdsets) ETHR_FATAL_ERROR__(ENOMEM); fdsets->rsetp = (fd_set *) (char *) &fdsets->mem[0]; fdsets->esetp = (fd_set *) (char *) &fdsets->mem[nmasks]; fdsets->mem_size = mem_size; e->fdsets = fdsets; } #endif ETHR_MEMBAR(ETHR_StoreStore); }
static ERTS_INLINE int leader_update(ErtsThrPrgrData *tpd) { #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif if (!tpd->leader) { /* Probably need to block... */ block_thread(tpd); } else { ErtsThrPrgrVal current; int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged; erts_aint32_t lflgs; ErtsThrPrgrVal next; erts_aint_t refc; my_ix = tpd->id; if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) { /* Took over as leader from another thread */ tpd->leader_state.current = read_nob(&erts_thr_prgr__.current); tpd->leader_state.next = tpd->leader_state.current; tpd->leader_state.next++; if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING) tpd->leader_state.next = 0; tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix; tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting; tpd->leader_state.umrefc_ix.current = (int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current); if (tpd->confirmed == tpd->leader_state.current) { ErtsThrPrgrVal val = tpd->leader_state.current + 1; if (val == ERTS_THR_PRGR_VAL_WAITING) val = 0; tpd->confirmed = val; set_mb(&intrnl->thr[my_ix].data.current, val); } } next = tpd->leader_state.next; waiting_unmanaged = 0; umrefc_ix = -1; /* Shut up annoying warning */ chk_next_ix = tpd->leader_state.chk_next_ix; no_managed = intrnl->managed.no; ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed); /* Check manged threads */ if (chk_next_ix < no_managed) { for (ix = chk_next_ix; ix < no_managed; ix++) { ErtsThrPrgrVal tmp; if (ix == my_ix) continue; tmp = read_nob(&intrnl->thr[ix].data.current); if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) { tpd->leader_state.chk_next_ix = ix; ASSERT(erts_thr_progress_has_passed__(next, tmp)); goto done; } } } /* Check unmanged threads */ waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1; umrefc_ix = (waiting_unmanaged ? tpd->leader_state.umrefc_ix.waiting : tpd->leader_state.umrefc_ix.current); refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); ASSERT(refc >= 0); if (refc != 0) { int new_umrefc_ix; if (waiting_unmanaged) goto done; new_umrefc_ix = (umrefc_ix + 1) & 0x1; tpd->leader_state.umrefc_ix.waiting = umrefc_ix; tpd->leader_state.chk_next_ix = no_managed; erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current, (erts_aint32_t) new_umrefc_ix); ETHR_MEMBAR(ETHR_StoreLoad); refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); ASSERT(refc >= 0); waiting_unmanaged = 1; if (refc != 0) goto done; } /* Make progress */ current = next; next++; if (next == ERTS_THR_PRGR_VAL_WAITING) next = 0; set_nob(&intrnl->thr[my_ix].data.current, next); set_mb(&erts_thr_prgr__.current, current); tpd->confirmed = next; tpd->leader_state.next = next; tpd->leader_state.current = current; #if ERTS_THR_PRGR_PRINT_VAL if (current % 1000 == 0) erts_fprintf(stderr, "%b64u\n", current); #endif handle_wakeup_requests(current); if (waiting_unmanaged) { waiting_unmanaged = 0; tpd->leader_state.umrefc_ix.waiting = -1; erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs, ~ERTS_THR_PRGR_LFLG_WAITING_UM); } tpd->leader_state.chk_next_ix = 0; done: if (tpd->active) { lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs); if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK) (void) block_thread(tpd); } else { int force_wakeup_check = 0; erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER; tpd->leader = 0; tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING; #if ERTS_THR_PRGR_PRINT_LEADER erts_fprintf(stderr, "L <- %d\n", tpd->id); #endif ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0); intrnl->misc.data.umrefc_ix.waiting = tpd->leader_state.umrefc_ix.waiting; if (waiting_unmanaged) set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM; lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs, set_flags); lflgs |= set_flags; if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK) lflgs = block_thread(tpd); if (waiting_unmanaged) { /* Need to check umrefc again */ ETHR_MEMBAR(ETHR_StoreLoad); refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); if (refc == 0) { /* Need to force wakeup check */ force_wakeup_check = 1; } } if ((force_wakeup_check || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER | ERTS_THR_PRGR_LFLG_WAITING_UM | ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) == ERTS_THR_PRGR_LFLG_NO_LEADER)) && got_sched_wakeups()) { /* Someone need to make progress */ wakeup_managed(0); } } } return tpd->leader; }
static ETHR_INLINE int wait__(ethr_event *e, int spincount, ethr_sint64_t timeout) { unsigned sc = spincount; int res; ethr_sint32_t val; int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; ethr_sint64_t time = 0; /* SHUT UP annoying faulty warning... */ struct timespec ts, *tsp; #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME ethr_sint64_t start = 0; /* SHUT UP annoying faulty warning... */ #endif if (spincount < 0) ETHR_FATAL_ERROR__(EINVAL); if (timeout < 0) { tsp = NULL; } else { #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME start = ethr_get_monotonic_time(); #endif tsp = &ts; time = timeout; if (spincount == 0) { val = ethr_atomic32_read(&e->futex); if (val == ETHR_EVENT_ON__) goto return_event_on; goto set_timeout; } } while (1) { while (1) { val = ethr_atomic32_read(&e->futex); if (val == ETHR_EVENT_ON__) goto return_event_on; if (sc == 0) break; sc--; ETHR_SPIN_BODY; if (--until_yield == 0) { until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; res = ETHR_YIELD(); if (res != 0) ETHR_FATAL_ERROR__(res); } } if (timeout >= 0) { #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME time = timeout - (ethr_get_monotonic_time() - start); #endif set_timeout: if (time <= 0) { val = ethr_atomic32_read(&e->futex); if (val == ETHR_EVENT_ON__) goto return_event_on; return ETIMEDOUT; } ts.tv_sec = time / (1000*1000*1000); ts.tv_nsec = time % (1000*1000*1000); } if (val != ETHR_EVENT_OFF_WAITER__) { val = ethr_atomic32_cmpxchg(&e->futex, ETHR_EVENT_OFF_WAITER__, ETHR_EVENT_OFF__); if (val == ETHR_EVENT_ON__) goto return_event_on; ETHR_ASSERT(val == ETHR_EVENT_OFF__); } res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAIT__, ETHR_EVENT_OFF_WAITER__, tsp); switch (res) { case EINTR: case ETIMEDOUT: return res; case 0: case EWOULDBLOCK: break; default: ETHR_FATAL_ERROR__(res); } } return_event_on: ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); return 0; }
static ETHR_INLINE int wait__(ethr_event *e, int spincount, ethr_sint64_t timeout) { int sc = spincount; ethr_sint32_t val; int res, ulres; int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; ethr_sint64_t time = 0; /* SHUT UP annoying faulty warning... */ #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME ethr_sint64_t start = 0; /* SHUT UP annoying faulty warning... */ #endif #ifdef ETHR_HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC struct timespec cond_timeout; #endif val = ethr_atomic32_read(&e->state); if (val == ETHR_EVENT_ON__) goto return_event_on; if (timeout < 0) { if (spincount == 0) goto set_event_off_waiter; } if (timeout == 0) return ETIMEDOUT; else { time = timeout; switch (e->fd[0]) { case ETHR_EVENT_INVALID_FD__: #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME start = ethr_get_monotonic_time(); #endif setup_nonblocking_pipe(e); break; #ifdef ETHR_HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC case ETHR_EVENT_COND_TIMEDWAIT__: time += ethr_get_monotonic_time(); cond_timeout.tv_sec = time / (1000*1000*1000); cond_timeout.tv_nsec = time % (1000*1000*1000); if (spincount == 0) goto set_event_off_waiter; break; #endif default: /* Already initialized pipe... */ if (spincount == 0) goto set_select_timeout; #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME start = ethr_get_monotonic_time(); #endif break; } } if (spincount < 0) ETHR_FATAL_ERROR__(EINVAL); while (1) { val = ethr_atomic32_read(&e->state); if (val == ETHR_EVENT_ON__) goto return_event_on; if (sc == 0) break; sc--; ETHR_SPIN_BODY; if (--until_yield == 0) { until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; res = ETHR_YIELD(); if (res != 0) ETHR_FATAL_ERROR__(res); } } if (timeout < 0 #ifdef ETHR_HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC || e->fd[0] == ETHR_EVENT_COND_TIMEDWAIT__ #endif ) { set_event_off_waiter: if (val != ETHR_EVENT_OFF_WAITER__) { ethr_sint32_t act; act = ethr_atomic32_cmpxchg(&e->state, ETHR_EVENT_OFF_WAITER__, val); if (act == ETHR_EVENT_ON__) goto return_event_on; ETHR_ASSERT(act == val); } res = pthread_mutex_lock(&e->mtx); if (res != 0) ETHR_FATAL_ERROR__(res); while (1) { val = ethr_atomic32_read(&e->state); if (val == ETHR_EVENT_ON__) { ETHR_ASSERT(res == 0); ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); break; } #ifdef ETHR_HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC if (timeout > 0) { res = pthread_cond_timedwait(&e->cnd, &e->mtx, &cond_timeout); if (res == EINTR || res == ETIMEDOUT) break; } else #endif { res = pthread_cond_wait(&e->cnd, &e->mtx); if (res == EINTR) break; } if (res != 0) ETHR_FATAL_ERROR__(res); } ulres = pthread_mutex_unlock(&e->mtx); if (ulres != 0) ETHR_FATAL_ERROR__(ulres); } else { int fd; int sres; ssize_t rres; #ifndef __DARWIN__ fd_set rset, eset; #endif fd_set *rsetp, *esetp; struct timeval select_timeout; #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME time -= ethr_get_monotonic_time() - start; if (time <= 0) return ETIMEDOUT; #endif set_select_timeout: ETHR_ASSERT(time > 0); /* * timeout in nano-second, but we can only wait * for micro-seconds... */ time = ((time - 1) / 1000) + 1; select_timeout.tv_sec = time / (1000*1000); select_timeout.tv_usec = time % (1000*1000); ETHR_ASSERT(val != ETHR_EVENT_ON__); fd = e->fd[0]; /* Cleanup pipe... */ do { char buf[64]; rres = read(fd, buf, sizeof(buf)); } while (rres > 0 || (rres < 0 && errno == EINTR)); if (rres < 0 && errno != EAGAIN && errno != EWOULDBLOCK) ETHR_FATAL_ERROR__(errno); /* * Need to verify that state is still off * after cleaning the pipe... */ if (val == ETHR_EVENT_OFF_WAITER_SELECT__) { val = ethr_atomic32_read(&e->state); if (val == ETHR_EVENT_ON__) goto return_event_on; } else { ethr_sint32_t act; act = ethr_atomic32_cmpxchg(&e->state, ETHR_EVENT_OFF_WAITER_SELECT__, val); if (act == ETHR_EVENT_ON__) goto return_event_on; ETHR_ASSERT(act == val); } #ifdef __DARWIN__ rsetp = e->fdsets->rsetp; esetp = e->fdsets->esetp; memset((void *) &e->fdsets->mem[0], 0, e->fdsets->mem_size); #else FD_ZERO(&rset); FD_ZERO(&eset); rsetp = &rset; esetp = &eset; #endif FD_SET(fd, rsetp); FD_SET(fd, esetp); sres = select(fd + 1, rsetp, NULL, esetp, &select_timeout); if (sres == 0) res = ETIMEDOUT; else { res = EINTR; if (sres < 0 && errno != EINTR) ETHR_FATAL_ERROR__(errno); /* else: * Event is *probably* set, but it can be a * lingering writer. That is, it is important * that we verify that it actually is set. If * it isn't, return EINTR (spurious wakeup). */ } val = ethr_atomic32_read(&e->state); if (val == ETHR_EVENT_ON__) goto return_event_on; } return res; return_event_on: ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); return 0; }
static ETHR_INLINE int wait(ethr_event *e, int spincount, ethr_sint64_t timeout) { DWORD code, tmo; int sc, res, until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; if (timeout < 0) tmo = INFINITE; else if (timeout == 0) { ethr_sint32_t state = ethr_atomic32_read(&e->state); if (state == ETHR_EVENT_ON__) { ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); return 0; } return ETIMEDOUT; } else { /* * Timeout in nano-seconds, but we can only * wait for milli-seconds... */ tmo = (DWORD) (timeout - 1) / (1000*1000) + 1; } if (spincount < 0) ETHR_FATAL_ERROR__(EINVAL); sc = spincount; while (1) { ethr_sint32_t state; while (1) { state = ethr_atomic32_read(&e->state); if (state == ETHR_EVENT_ON__) { ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); return 0; } if (sc == 0) break; sc--; ETHR_SPIN_BODY; if (--until_yield == 0) { until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; res = ETHR_YIELD(); if (res != 0) ETHR_FATAL_ERROR__(res); } } if (state != ETHR_EVENT_OFF_WAITER__) { state = ethr_atomic32_cmpxchg(&e->state, ETHR_EVENT_OFF_WAITER__, ETHR_EVENT_OFF__); if (state == ETHR_EVENT_ON__) return 0; ETHR_ASSERT(state == ETHR_EVENT_OFF__); } code = WaitForSingleObject(e->handle, tmo); if (code == WAIT_TIMEOUT) return ETIMEDOUT; if (code != WAIT_OBJECT_0) ETHR_FATAL_ERROR__(ethr_win_get_errno__()); } }