static erts_aint32_t thr_progress_block(ErtsThrPrgrData *tpd, int wait) { erts_tse_t *event = NULL; /* Remove erroneous warning... sigh... */ erts_aint32_t lflgs, bc; if (tpd->is_blocking++) return (erts_aint32_t) 0; while (1) { lflgs = erts_atomic32_read_bor_nob(&intrnl->misc.data.lflgs, ERTS_THR_PRGR_LFLG_BLOCK); if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK) block_thread(tpd); else break; } #if ERTS_THR_PRGR_PRINT_BLOCKERS erts_fprintf(stderr, "block(%d)\n", tpd->id); #endif ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&intrnl->misc.data.blocker_event)); if (wait) { event = erts_tse_fetch(); erts_tse_reset(event); erts_atomic_set_nob(&intrnl->misc.data.blocker_event, (erts_aint_t) event); } if (tpd->is_managed) erts_atomic32_dec_nob(&intrnl->misc.data.block_count); bc = erts_atomic32_read_band_mb(&intrnl->misc.data.block_count, ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING); bc &= ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING; if (wait) { while (bc != 0) { erts_tse_wait(event); erts_tse_reset(event); bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count); } } return bc; }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id); erts_tse_t *wtr; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs == 0) erts_pix_unlock(pix_lock); else { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } ASSERT(wtr->uflgs == 0); } ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr); }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id); erts_tse_t *wtr; erts_proc_lock_queues_t *qs; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); qs = wtr->udata; ASSERT(qs); /* Provide the process with waiter queues, if it doesn't have one. */ if (!p->lock.queues) { qs->next = NULL; p->lock.queues = qs; } else { qs->next = p->lock.queues->next; p->lock.queues->next = qs; } #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs) { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } erts_pix_lock(pix_lock); ASSERT(wtr->uflgs == 0); } /* Recover some queues to store in the waiter. */ ERTS_LC_ASSERT(p->lock.queues); if (p->lock.queues->next) { qs = p->lock.queues->next; p->lock.queues->next = qs->next; } else { qs = p->lock.queues; p->lock.queues = NULL; } wtr->udata = qs; erts_pix_unlock(pix_lock); ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr, 0); }
static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_t *tse, ErtsThrQPrepEnQ_t **prep_enq) { #if ERTS_USE_ASYNC_READY_Q int saved_fin_deq = 0; ErtsThrQFinDeQ_t fin_deq; #endif #ifdef USE_VM_PROBES int len; #endif while (1) { ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q); if (a) { #if ERTS_USE_ASYNC_READY_Q *prep_enq = a->q.prep_enq; erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq); if (saved_fin_deq) erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq); #endif #ifdef USE_LTTNG_VM_TRACEPOINTS if (LTTNG_ENABLED(aio_pool_get)) { lttng_decl_portbuf(port_str); int length = erts_thr_q_length_dirty(q); lttng_portid_to_str(a->port, port_str); LTTNG2(aio_pool_get, port_str, length); } #endif #ifdef USE_VM_PROBES if (DTRACE_ENABLED(aio_pool_get)) { DTRACE_CHARBUF(port_str, 16); erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", a->port); /* DTRACE TODO: Get the length from erts_thr_q_dequeue() ? */ len = -1; DTRACE2(aio_pool_get, port_str, len); } #endif return a; } if (ERTS_THR_Q_DIRTY != erts_thr_q_clean(q)) { ErtsThrQFinDeQ_t tmp_fin_deq; erts_tse_reset(tse); #if ERTS_USE_ASYNC_READY_Q chk_fin_deq: if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) { if (!saved_fin_deq) { erts_thr_q_finalize_dequeue_state_init(&fin_deq); saved_fin_deq = 1; } erts_thr_q_append_finalize_dequeue_data(&fin_deq, &tmp_fin_deq); } #endif switch (erts_thr_q_inspect(q, 1)) { case ERTS_THR_Q_DIRTY: break; case ERTS_THR_Q_NEED_THR_PRGR: #ifdef ERTS_SMP { ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q); erts_thr_progress_wakeup(NULL, prgr); /* * We do no dequeue finalizing in hope that a new async * job will arrive before we are woken due to thread * progress... */ erts_tse_wait(tse); break; } #endif case ERTS_THR_Q_CLEAN: #if ERTS_USE_ASYNC_READY_Q if (saved_fin_deq) { if (erts_thr_q_finalize_dequeue(&fin_deq)) goto chk_fin_deq; else saved_fin_deq = 0; } #endif erts_tse_wait(tse); break; default: ASSERT(0); break; } } } }