int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid, void **bcast_val) { if (tg->tid_map[ext_tid] == 0) { tg->envelope = bcast_val ? *bcast_val : NULL; cpu_sfence(); tg->forked = 1; tg->group_sense = tg->thread_sense[0]->sense; // if it's possible that threads are sleeping, signal them if (tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); uv_cond_broadcast(&tg->alarm); uv_mutex_unlock(&tg->alarm_lock); } } else { // spin up to threshold cycles (count sheep), then sleep uint64_t spin_cycles, spin_start = rdtsc(); while (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { if (tg->sleep_threshold) { spin_cycles = rdtsc() - spin_start; if (spin_cycles >= tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); if (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { uv_cond_wait(&tg->alarm, &tg->alarm_lock); } uv_mutex_unlock(&tg->alarm_lock); spin_start = rdtsc(); continue; } } cpu_pause(); } cpu_lfence(); if (bcast_val) *bcast_val = tg->envelope; } return 0; }
static PyObject * Condition_func_broadcast(Condition *self) { RAISE_IF_NOT_INITIALIZED(self, NULL); Py_BEGIN_ALLOW_THREADS uv_cond_broadcast(&self->uv_condition); Py_END_ALLOW_THREADS Py_RETURN_NONE; }
JL_DLLEXPORT void jl_wakeup_thread(int16_t tid) { jl_ptls_t ptls = jl_get_ptls_states(); /* ensure thread tid is awake if necessary */ if (ptls->tid != tid && !_threadedregion && tid != -1) { uv_mutex_lock(&sleep_lock); uv_cond_broadcast(&sleep_alarm); // TODO: make this uv_cond_signal / just wake up correct thread uv_mutex_unlock(&sleep_lock); } if (_threadedregion && jl_uv_mutex.owner != jl_thread_self()) jl_wake_libuv(); else uv_stop(jl_global_event_loop()); }
void Future::internal_set(ScopedMutex& lock) { is_set_ = true; uv_cond_broadcast(&cond_); if (callback_) { if (loop_.load() == NULL) { Callback callback = callback_; void* data = data_; lock.unlock(); callback(CassFuture::to(this), data); } else { run_callback_on_work_thread(); } } }
int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid, void **bcast_val) { if (tg->tid_map[ext_tid] == 0) { tg->envelope = bcast_val ? *bcast_val : NULL; cpu_sfence(); tg->forked = 1; tg->group_sense = tg->thread_sense[0]->sense; // if it's possible that threads are sleeping, signal them if (tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); uv_cond_broadcast(&tg->alarm); uv_mutex_unlock(&tg->alarm_lock); } } else { // spin up to threshold ns (count sheep), then sleep uint64_t spin_ns; uint64_t spin_start = 0; while (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { if (tg->sleep_threshold) { if (!spin_start) { // Lazily initialize spin_start since uv_hrtime is expensive spin_start = uv_hrtime(); continue; } spin_ns = uv_hrtime() - spin_start; // In case uv_hrtime is not monotonic, we'll sleep earlier if (spin_ns >= tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); if (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { uv_cond_wait(&tg->alarm, &tg->alarm_lock); } uv_mutex_unlock(&tg->alarm_lock); spin_start = 0; continue; } } cpu_pause(); } cpu_lfence(); if (bcast_val) *bcast_val = tg->envelope; } return 0; }
static mrb_value mrb_uv_cond_broadcast(mrb_state *mrb, mrb_value self) { return uv_cond_broadcast((uv_cond_t*)mrb_uv_get_ptr(mrb, self, &mrb_uv_cond_type)), self; }
void notify_all() { uv_cond_broadcast(&cond_); }
/* The caller must hold page descriptor lock. */ void pg_cache_wake_up_waiters_unsafe(struct rrdeng_page_cache_descr *descr) { if (descr->waiters) uv_cond_broadcast(&descr->cond); }