static VALUE rb_thread_priority_set(VALUE thread, SEL sel, VALUE prio) { // FIXME this doesn't really minic what 1.9 does, but do we care? int policy; struct sched_param param; rb_secure(4); pthread_assert(pthread_getschedparam(GetThreadPtr(thread)->thread, &policy, ¶m)); const int max = sched_get_priority_max(policy); const int min = sched_get_priority_min(policy); int priority = FIX2INT(prio); if (min > priority) { priority = min; } else if (max > priority) { priority = max; } param.sched_priority = priority; pthread_assert(pthread_setschedparam(GetThreadPtr(thread)->thread, policy, ¶m)); return Qnil; }
static VALUE rb_mutex_lock(VALUE self, SEL sel) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); rb_vm_mutex_t *m = GetMutexPtr(self); rb_vm_thread_status_t prev_status; if (m->thread == current) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } prev_status = current->status; if (current->status == THREAD_ALIVE) { current->status = THREAD_SLEEP; } current->wait_for_mutex_lock = true; pthread_assert(pthread_mutex_lock(&m->mutex)); current->wait_for_mutex_lock = false; current->status = prev_status; m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); OBJ_UNTRUST(current->mutexes); } rb_ary_push(current->mutexes, self); return self; }
static VALUE rb_thread_priority(VALUE thread, SEL sel) { // FIXME this doesn't really minic what 1.9 does, but do we care? struct sched_param param; pthread_assert(pthread_getschedparam(GetThreadPtr(thread)->thread, NULL, ¶m)); return INT2FIX(param.sched_priority); }
static void rb_mutex_unlock0(VALUE self, bool assert_unlockable, bool delete_from_thread_mutexes) { rb_vm_mutex_t *m = GetMutexPtr(self); bool ok = rb_mutex_can_unlock(m, assert_unlockable); if (ok) { if (delete_from_thread_mutexes) { assert(m->thread->mutexes != Qnil); rb_ary_delete(m->thread->mutexes, self); } pthread_assert(pthread_mutex_unlock(&m->mutex)); m->thread = NULL; } }
static VALUE rb_mutex_lock(VALUE self, SEL sel) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); rb_vm_mutex_t *m = GetMutexPtr(self); if (m->thread == current) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } current->status = THREAD_SLEEP; pthread_assert(pthread_mutex_lock(&m->mutex)); current->status = THREAD_ALIVE; m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); } rb_ary_push(current->mutexes, self); return self; }
static VALUE thread_join_m(VALUE self, SEL sel, int argc, VALUE *argv) { VALUE timeout; rb_scan_args(argc, argv, "01", &timeout); rb_vm_thread_t *t = GetThreadPtr(self); if (t->status != THREAD_DEAD) { if (timeout == Qnil) { // No timeout given: block until the thread finishes. pthread_assert(pthread_join(t->thread, NULL)); } else { // Timeout given: sleep then check if the thread is dead. // TODO do multiple sleeps instead of only one. struct timeval tv = rb_time_interval(timeout); struct timespec ts; ts.tv_sec = tv.tv_sec; ts.tv_nsec = tv.tv_usec * 1000; while (ts.tv_nsec >= 1000000000) { ts.tv_sec += 1; ts.tv_nsec -= 1000000000; } nanosleep(&ts, NULL); if (t->status != THREAD_DEAD) { return Qnil; } } } // If the thread was terminated because of an exception, we need to // propagate it. if (t->exception != Qnil) { rb_exc_raise(t->exception); } return self; }
static void atomic_once( void( *func ) (), pthread_once_t &once_state ) { pthread_assert( pthread_once( &once_state, func ), "pthread_once failed" ); }
~atomic_incrementer() { pthread_assert( pthread_spin_destroy( &my_lock ), "pthread_spin_destroy failed" ); }
operator size_t() { pthread_assert( pthread_spin_lock( &my_lock ), "pthread_spin_lock failed" ); size_t val = my_val; pthread_assert( pthread_spin_unlock( &my_lock ), "pthread_spin_unlock failed" ); return val; }
size_t operator++(int) { pthread_assert( pthread_spin_lock( &my_lock ), "pthread_spin_lock failed" ); size_t prev_val = my_val++; pthread_assert( pthread_spin_unlock( &my_lock ), "pthread_spin_unlock failed" ); return prev_val; }
void init() { my_val = 0; pthread_assert( pthread_spin_init( &my_lock, PTHREAD_PROCESS_PRIVATE ), "pthread_spin_init failed" ); }
static VALUE mutex_initialize(VALUE self, SEL sel) { pthread_assert(pthread_mutex_init(&GetMutexPtr(self)->mutex, NULL)); return self; }
static inline void thgroup_unlock(rb_thread_group_t *tg) { pthread_assert(pthread_mutex_unlock(&GetMutexPtr(tg->mutex)->mutex)); }