static void gvl_yield(rb_vm_t *vm, rb_thread_t *th) { native_mutex_lock(&vm->gvl.lock); gvl_release_common(vm); /* An another thread is processing GVL yield. */ if (UNLIKELY(vm->gvl.wait_yield)) { while (vm->gvl.wait_yield) native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock); goto acquire; } if (vm->gvl.waiting > 0) { /* Wait until another thread task take GVL. */ vm->gvl.need_yield = 1; vm->gvl.wait_yield = 1; while (vm->gvl.need_yield) native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock); vm->gvl.wait_yield = 0; } else { native_mutex_unlock(&vm->gvl.lock); sched_yield(); native_mutex_lock(&vm->gvl.lock); } native_cond_broadcast(&vm->gvl.switch_wait_cond); acquire: gvl_acquire_common(vm); native_mutex_unlock(&vm->gvl.lock); }
static void gvl_acquire_common(rb_vm_t *vm) { if (vm->gvl.acquired) { vm->gvl.waiting++; if (vm->gvl.waiting == 1) { /* * Wake up timer thread iff timer thread is slept. * When timer thread is polling mode, we don't want to * make confusing timer thread interval time. */ rb_thread_wakeup_timer_thread_low(); } while (vm->gvl.acquired) { native_cond_wait(&vm->gvl.cond, &vm->gvl.lock); } vm->gvl.waiting--; if (vm->gvl.need_yield) { vm->gvl.need_yield = 0; native_cond_signal(&vm->gvl.switch_cond); } } vm->gvl.acquired = 1; }
static void gvl_acquire_common(rb_vm_t *vm) { if (vm->gvl.acquired) { vm->gvl.waiting++; if (vm->gvl.waiting == 1) { /* transit to polling mode */ rb_thread_wakeup_timer_thread(); } while (vm->gvl.acquired) { native_cond_wait(&vm->gvl.cond, &vm->gvl.lock); } vm->gvl.waiting--; if (vm->gvl.need_yield) { vm->gvl.need_yield = 0; native_cond_signal(&vm->gvl.switch_cond); } } vm->gvl.acquired = 1; }
static void gvl_acquire(rb_vm_t *vm, rb_thread_t *th) { #if GVL_SIMPLE_LOCK native_mutex_lock(&vm->gvl.lock); #else native_mutex_lock(&vm->gvl.lock); if (vm->gvl.waiting > 0 || vm->gvl.acquired != 0) { if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): sleep\n", th); gvl_waiting_push(vm, th); if (GVL_DEBUG) gvl_show_waiting_threads(vm); while (vm->gvl.acquired != 0 || vm->gvl.waiting_threads != th) { native_cond_wait(&th->native_thread_data.gvl_cond, &vm->gvl.lock); } gvl_waiting_shift(vm, th); } else { /* do nothing */ } vm->gvl.acquired = 1; native_mutex_unlock(&vm->gvl.lock); #endif if (GVL_DEBUG) gvl_show_waiting_threads(vm); if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th); }
int rw_pr_wrlock(rw_pr_lock_t *rwlock) { native_mutex_lock(&rwlock->lock); if (rwlock->active_readers != 0) { /* There are active readers. We have to wait until they are gone. */ rwlock->writers_waiting_readers++; while (rwlock->active_readers != 0) native_cond_wait(&rwlock->no_active_readers, &rwlock->lock); rwlock->writers_waiting_readers--; } /* We own 'lock' mutex so there is no active writers. Also there are no active readers. This means that we can grant wr-lock. Not releasing 'lock' mutex until unlock will block both requests for rd and wr-locks. Set 'active_writer' flag to simplify unlock. Thanks to the fact wr-lock/unlock in the absence of contention from readers is essentially mutex lock/unlock with a few simple checks make this rwlock implementation wr-lock optimized. */ rwlock->active_writer= TRUE; #ifdef SAFE_MUTEX rwlock->writer_thread= pthread_self(); #endif return 0; }
static void native_sleep(rb_thread_t *th, struct timeval *timeout_tv) { struct timespec timeout; rb_nativethread_lock_t *lock = &th->interrupt_lock; rb_nativethread_cond_t *cond = &th->native_thread_data.sleep_cond; if (timeout_tv) { struct timespec timeout_rel; timeout_rel.tv_sec = timeout_tv->tv_sec; timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000; /* Solaris cond_timedwait() return EINVAL if an argument is greater than * current_time + 100,000,000. So cut up to 100,000,000. This is * considered as a kind of spurious wakeup. The caller to native_sleep * should care about spurious wakeup. * * See also [Bug #1341] [ruby-core:29702] * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html */ if (timeout_rel.tv_sec > 100000000) { timeout_rel.tv_sec = 100000000; timeout_rel.tv_nsec = 0; } timeout = native_cond_timeout(cond, timeout_rel); } GVL_UNLOCK_BEGIN(); { native_mutex_lock(lock); th->unblock.func = ubf_pthread_cond_signal; th->unblock.arg = th; if (RUBY_VM_INTERRUPTED(th)) { /* interrupted. return immediate */ thread_debug("native_sleep: interrupted before sleep\n"); } else { if (!timeout_tv) native_cond_wait(cond, lock); else native_cond_timedwait(cond, lock, &timeout); } th->unblock.func = 0; th->unblock.arg = 0; native_mutex_unlock(lock); } GVL_UNLOCK_END(); thread_debug("native_sleep done\n"); }
int NdbCondition_Wait(struct NdbCondition* p_cond, NdbMutex* p_mutex) { int result; if (p_cond == NULL || p_mutex == NULL) return 1; #ifdef NDB_MUTEX_STRUCT result = pthread_cond_wait(&p_cond->cond, &p_mutex->mutex); #else result = native_cond_wait(&p_cond->cond, p_mutex); #endif return result; }
static void native_sleep(rb_thread_t *th, struct timeval *timeout_tv) { struct timespec timeout; struct timeval tvn; pthread_mutex_t *lock = &th->interrupt_lock; rb_thread_cond_t *cond = &th->native_thread_data.sleep_cond; if (timeout_tv) { struct timespec timeout_rel; timeout_rel.tv_sec = timeout_tv->tv_sec; timeout_rel.tv_nsec = timeout_tv->tv_usec; timeout = native_cond_timeout(cond, timeout_rel); } GVL_UNLOCK_BEGIN(); { pthread_mutex_lock(lock); th->unblock.func = ubf_pthread_cond_signal; th->unblock.arg = th; if (RUBY_VM_INTERRUPTED(th)) { /* interrupted. return immediate */ thread_debug("native_sleep: interrupted before sleep\n"); } else { if (!timeout_tv) native_cond_wait(cond, lock); else native_cond_timedwait(cond, lock, &timeout); } th->unblock.func = 0; th->unblock.arg = 0; pthread_mutex_unlock(lock); } GVL_UNLOCK_END(); thread_debug("native_sleep done\n"); }
static int lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms) { int interrupted = 0; int err = 0; mutex->cond_waiting++; for (;;) { if (!mutex->th) { mutex->th = th; break; } if (RUBY_VM_INTERRUPTED(th)) { interrupted = 1; break; } if (err == ETIMEDOUT) { interrupted = 2; break; } if (timeout_ms) { struct timespec timeout_rel; struct timespec timeout; timeout_rel.tv_sec = 0; timeout_rel.tv_nsec = timeout_ms * 1000 * 1000; timeout = native_cond_timeout(&mutex->cond, timeout_rel); err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout); } else { native_cond_wait(&mutex->cond, &mutex->lock); err = 0; } } mutex->cond_waiting--; return interrupted; }