static void gvl_acquire_common(rb_vm_t *vm) { if (vm->gvl.acquired) { vm->gvl.waiting++; if (vm->gvl.waiting == 1) { /* * Wake up timer thread iff timer thread is slept. * When timer thread is polling mode, we don't want to * make confusing timer thread interval time. */ rb_thread_wakeup_timer_thread_low(); } while (vm->gvl.acquired) { native_cond_wait(&vm->gvl.cond, &vm->gvl.lock); } vm->gvl.waiting--; if (vm->gvl.need_yield) { vm->gvl.need_yield = 0; native_cond_signal(&vm->gvl.switch_cond); } } vm->gvl.acquired = 1; }
static int use_cached_thread(rb_thread_t *th) { int result = 0; #if USE_THREAD_CACHE struct cached_thread_entry *entry; if (cached_thread_root) { native_mutex_lock(&thread_cache_lock); entry = cached_thread_root; { if (cached_thread_root) { cached_thread_root = entry->next; *entry->th_area = th; result = 1; } } if (result) { native_cond_signal(entry->cond); } native_mutex_unlock(&thread_cache_lock); } #endif return result; }
static void ubf_pthread_cond_signal(void *ptr) { rb_thread_t *th = (rb_thread_t *)ptr; thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th); native_cond_signal(&th->native_thread_data.sleep_cond); }
static void gvl_release_common(rb_vm_t *vm) { vm->gvl.acquired = 0; if (vm->gvl.waiting > 0) native_cond_signal(&vm->gvl.cond); }
static void gvl_acquire_common(rb_vm_t *vm) { if (vm->gvl.acquired) { vm->gvl.waiting++; if (vm->gvl.waiting == 1) { /* transit to polling mode */ rb_thread_wakeup_timer_thread(); } while (vm->gvl.acquired) { native_cond_wait(&vm->gvl.cond, &vm->gvl.lock); } vm->gvl.waiting--; if (vm->gvl.need_yield) { vm->gvl.need_yield = 0; native_cond_signal(&vm->gvl.switch_cond); } } vm->gvl.acquired = 1; }
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th) { const char *err = NULL; native_mutex_lock(&mutex->lock); if (mutex->th == 0) { err = "Attempt to unlock a mutex which is not locked"; } else if (mutex->th != th) { err = "Attempt to unlock a mutex which is locked by another thread"; } else { mutex->th = 0; if (mutex->cond_waiting > 0) native_cond_signal(&mutex->cond); } native_mutex_unlock(&mutex->lock); if (!err) { rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes; while (*th_mutex != mutex) { th_mutex = &(*th_mutex)->next_mutex; } *th_mutex = mutex->next_mutex; mutex->next_mutex = NULL; } return err; }
int rw_pr_unlock(rw_pr_lock_t *rwlock) { if (rwlock->active_writer) { /* We are unlocking wr-lock. */ #ifdef SAFE_MUTEX rwlock->writer_thread= 0; #endif rwlock->active_writer= FALSE; if (rwlock->writers_waiting_readers) { /* Avoid expensive cond signal in case when there is no contention or it is wr-only. Note that from view point of performance it would be better to signal on the condition variable after unlocking mutex (as it reduces number of contex switches). Unfortunately this would mean that such rwlock can't be safely used by MDL subsystem, which relies on the fact that it is OK to destroy rwlock once it is in unlocked state. */ native_cond_signal(&rwlock->no_active_readers); } native_mutex_unlock(&rwlock->lock); } else { /* We are unlocking rd-lock. */ native_mutex_lock(&rwlock->lock); rwlock->active_readers--; if (rwlock->active_readers == 0 && rwlock->writers_waiting_readers) { /* If we are last reader and there are waiting writers wake them up. */ native_cond_signal(&rwlock->no_active_readers); } native_mutex_unlock(&rwlock->lock); } return 0; }
int NdbCondition_Signal(struct NdbCondition* p_cond){ int result; if (p_cond == NULL) return 1; result = native_cond_signal(&p_cond->cond); return result; }
static void gvl_release(rb_vm_t *vm) { #if GVL_SIMPLE_LOCK native_mutex_unlock(&vm->gvl.lock); #else native_mutex_lock(&vm->gvl.lock); if (vm->gvl.waiting > 0) { rb_thread_t *th = vm->gvl.waiting_threads; if (GVL_DEBUG) fprintf(stderr, "gvl release (%p): wakeup: %p\n", GET_THREAD(), th); native_cond_signal(&th->native_thread_data.gvl_cond); } else { if (GVL_DEBUG) fprintf(stderr, "gvl release (%p): wakeup: %p\n", GET_THREAD(), NULL); /* do nothing */ } vm->gvl.acquired = 0; native_mutex_unlock(&vm->gvl.lock); #endif }