static VALUE rb_mutex_trylock(VALUE self, SEL sel) { if (pthread_mutex_trylock(&GetMutexPtr(self)->mutex) == 0) { GetMutexPtr(self)->thread = GetThreadPtr(rb_vm_current_thread()); return Qtrue; } return Qfalse; }
/* * call-seq: * mutex.locked? -> true or false * * Returns +true+ if this lock is currently held by some thread. */ VALUE rb_mutex_locked_p(VALUE self) { rb_mutex_t *mutex; GetMutexPtr(self, mutex); return mutex->th ? Qtrue : Qfalse; }
void rb_mutex_allow_trap(VALUE self, int val) { rb_mutex_t *m; GetMutexPtr(self, m); m->allow_trap = val; }
static VALUE rb_mutex_lock(VALUE self, SEL sel) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); rb_vm_mutex_t *m = GetMutexPtr(self); rb_vm_thread_status_t prev_status; if (m->thread == current) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } prev_status = current->status; if (current->status == THREAD_ALIVE) { current->status = THREAD_SLEEP; } current->wait_for_mutex_lock = true; pthread_assert(pthread_mutex_lock(&m->mutex)); current->wait_for_mutex_lock = false; current->status = prev_status; m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); OBJ_UNTRUST(current->mutexes); } rb_ary_push(current->mutexes, self); return self; }
static void mutex_locked(rb_thread_t *th, VALUE self) { rb_mutex_t *mutex; GetMutexPtr(self, mutex); if (th->keeping_mutexes) { mutex->next_mutex = th->keeping_mutexes; } th->keeping_mutexes = mutex; }
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th) { rb_mutex_t *mutex; if (!th->locking_mutex) return; GetMutexPtr(th->locking_mutex, mutex); if (mutex->th == th) rb_mutex_abandon_all(mutex); th->locking_mutex = Qfalse; }
/* * call-seq: * mutex.unlock -> self * * Releases the lock. * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread. */ VALUE rb_mutex_unlock(VALUE self) { const char *err; rb_mutex_t *mutex; GetMutexPtr(self, mutex); err = rb_mutex_unlock_th(mutex, GET_THREAD()); if (err) rb_raise(rb_eThreadError, "%s", err); return self; }
/* * call-seq: * mutex.owned? -> true or false * * Returns +true+ if this lock is currently held by current thread. */ VALUE rb_mutex_owned_p(VALUE self) { VALUE owned = Qfalse; rb_thread_t *th = GET_THREAD(); rb_mutex_t *mutex; GetMutexPtr(self, mutex); if (mutex->th == th) owned = Qtrue; return owned; }
static void rb_mutex_unlock0(VALUE self, bool assert_unlockable, bool delete_from_thread_mutexes) { rb_vm_mutex_t *m = GetMutexPtr(self); bool ok = rb_mutex_can_unlock(m, assert_unlockable); if (ok) { if (delete_from_thread_mutexes) { assert(m->thread->mutexes != Qnil); rb_ary_delete(m->thread->mutexes, self); } pthread_assert(pthread_mutex_unlock(&m->mutex)); m->thread = NULL; } }
static VALUE rb_mutex_trylock(VALUE self, SEL sel) { rb_vm_mutex_t *m = GetMutexPtr(self); if (pthread_mutex_trylock(&m->mutex) == 0) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); OBJ_UNTRUST(current->mutexes); } rb_ary_push(current->mutexes, self); return Qtrue; } return Qfalse; }
/* * call-seq: * mutex.try_lock -> true or false * * Attempts to obtain the lock and returns immediately. Returns +true+ if the * lock was granted. */ VALUE rb_mutex_trylock(VALUE self) { rb_mutex_t *mutex; VALUE locked = Qfalse; GetMutexPtr(self, mutex); native_mutex_lock(&mutex->lock); if (mutex->th == 0) { rb_thread_t *th = GET_THREAD(); mutex->th = th; locked = Qtrue; mutex_locked(th, self); } native_mutex_unlock(&mutex->lock); return locked; }
static VALUE rb_mutex_lock(VALUE self, SEL sel) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); rb_vm_mutex_t *m = GetMutexPtr(self); if (m->thread == current) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } current->status = THREAD_SLEEP; pthread_assert(pthread_mutex_lock(&m->mutex)); current->status = THREAD_ALIVE; m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); } rb_ary_push(current->mutexes, self); return self; }
static VALUE rb_mutex_locked_p(VALUE self, SEL sel) { return GetMutexPtr(self)->thread == 0 ? Qfalse : Qtrue; }
static VALUE mutex_initialize(VALUE self, SEL sel) { pthread_assert(pthread_mutex_init(&GetMutexPtr(self)->mutex, NULL)); return self; }
/* * call-seq: * mutex.lock -> self * * Attempts to grab the lock and waits if it isn't available. * Raises +ThreadError+ if +mutex+ was locked by the current thread. */ VALUE rb_mutex_lock(VALUE self) { rb_thread_t *th = GET_THREAD(); rb_mutex_t *mutex; GetMutexPtr(self, mutex); /* When running trap handler */ if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) { rb_raise(rb_eThreadError, "can't be called from trap context"); } if (rb_mutex_trylock(self) == Qfalse) { if (mutex->th == th) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } while (mutex->th != th) { int interrupted; enum rb_thread_status prev_status = th->status; volatile int timeout_ms = 0; struct rb_unblock_callback oldubf; set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE); th->status = THREAD_STOPPED_FOREVER; th->locking_mutex = self; native_mutex_lock(&mutex->lock); th->vm->sleeper++; /* * Carefully! while some contended threads are in lock_func(), * vm->sleepr is unstable value. we have to avoid both deadlock * and busy loop. */ if ((vm_living_thread_num(th->vm) == th->vm->sleeper) && !patrol_thread) { timeout_ms = 100; patrol_thread = th; } GVL_UNLOCK_BEGIN(); interrupted = lock_func(th, mutex, (int)timeout_ms); native_mutex_unlock(&mutex->lock); GVL_UNLOCK_END(); if (patrol_thread == th) patrol_thread = NULL; reset_unblock_function(th, &oldubf); th->locking_mutex = Qfalse; if (mutex->th && interrupted == 2) { rb_check_deadlock(th->vm); } if (th->status == THREAD_STOPPED_FOREVER) { th->status = prev_status; } th->vm->sleeper--; if (mutex->th == th) mutex_locked(th, self); if (interrupted) { RUBY_VM_CHECK_INTS_BLOCKING(th); } } } return self; }
static inline void thgroup_unlock(rb_thread_group_t *tg) { pthread_assert(pthread_mutex_unlock(&GetMutexPtr(tg->mutex)->mutex)); }