static VALUE rb_thread_priority_set(VALUE thread, SEL sel, VALUE prio) { // FIXME this doesn't really minic what 1.9 does, but do we care? int policy; struct sched_param param; rb_secure(4); pthread_assert(pthread_getschedparam(GetThreadPtr(thread)->thread, &policy, ¶m)); const int max = sched_get_priority_max(policy); const int min = sched_get_priority_min(policy); int priority = FIX2INT(prio); if (min > priority) { priority = min; } else if (max > priority) { priority = max; } param.sched_priority = priority; pthread_assert(pthread_setschedparam(GetThreadPtr(thread)->thread, policy, ¶m)); return Qnil; }
static VALUE thread_initialize(VALUE thread, SEL sel, int argc, const VALUE *argv) { if (!rb_block_given_p()) { rb_raise(rb_eThreadError, "must be called with a block"); } rb_vm_block_t *b = rb_vm_current_block(); assert(b != NULL); rb_vm_thread_t *t = GetThreadPtr(thread); rb_vm_thread_pre_init(t, b, argc, argv, rb_vm_create_vm()); // The thread's group is always the parent's one. rb_thgroup_add(GetThreadPtr(rb_vm_current_thread())->group, thread); // Retain the Thread object to avoid a potential GC, the corresponding // release is done in rb_vm_thread_run(). rb_objc_retain((void *)thread); if (pthread_create(&t->thread, NULL, (void *(*)(void *))rb_vm_thread_run, (void *)thread) != 0) { rb_sys_fail("pthread_create() failed"); } return thread; }
static VALUE rb_thread_kill(VALUE thread, SEL sel) { rb_vm_thread_t *t = GetThreadPtr(thread); rb_vm_thread_t *t_main = GetThreadPtr(rb_vm_main_thread()); if (t->thread == t_main->thread) { rb_exit(EXIT_SUCCESS); } if (t->status != THREAD_KILLED) { rb_vm_thread_cancel(t); } return thread; }
void stack_dump_th(VALUE thval) { rb_thread_t *th; GetThreadPtr(thval, th); vm_stack_dump_raw(th, th->cfp); }
static rb_thread_t * thval2thread_t(VALUE thval) { rb_thread_t *th; GetThreadPtr(thval, th); return th; }
static VALUE rb_thread_abort_exc_set(VALUE thread, SEL sel, VALUE val) { rb_secure(4); GetThreadPtr(thread)->abort_on_exception = RTEST(val); return val; }
static VALUE rb_thread_run(VALUE thread, SEL sel) { rb_vm_thread_wakeup(GetThreadPtr(thread)); pthread_yield_np(); return thread; }
static VALUE frame_count(VALUE self) { rb_thread_t *th; GetThreadPtr(rb_thread_current(), th); rb_control_frame_t *cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp); rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size); int i = 1; while (cfp < limit_cfp) { cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); if (cfp >= limit_cfp) return INT2FIX(i); // skip invalid frames if (!valid_frame_p(cfp, limit_cfp)) cfp = find_valid_frame(cfp, limit_cfp); if (!cfp) break; i++; } return INT2FIX(i); }
static VALUE rb_mutex_lock(VALUE self, SEL sel) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); rb_vm_mutex_t *m = GetMutexPtr(self); rb_vm_thread_status_t prev_status; if (m->thread == current) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } prev_status = current->status; if (current->status == THREAD_ALIVE) { current->status = THREAD_SLEEP; } current->wait_for_mutex_lock = true; pthread_assert(pthread_mutex_lock(&m->mutex)); current->wait_for_mutex_lock = false; current->status = prev_status; m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); OBJ_UNTRUST(current->mutexes); } rb_ary_push(current->mutexes, self); return self; }
void rb_vmdebug_thread_dump_regs(VALUE thval) { rb_thread_t *th; GetThreadPtr(thval, th); rb_vmdebug_debug_print_register(th); }
void rb_vmdebug_stack_dump_th(VALUE thval) { rb_thread_t *th; GetThreadPtr(thval, th); rb_vmdebug_stack_dump_raw(th, th->cfp); }
static VALUE thread_initialize(VALUE thread, SEL sel, int argc, const VALUE *argv) { if (!rb_block_given_p()) { rb_raise(rb_eThreadError, "must be called with a block"); } rb_vm_block_t *b = rb_vm_current_block(); assert(b != NULL); rb_vm_thread_t *t = GetThreadPtr(thread); if (t->thread != 0) { rb_raise(rb_eThreadError, "already initialized thread"); } rb_vm_thread_pre_init(t, b, argc, argv, rb_vm_create_vm()); // The thread's group is always the parent's one. // The parent group might be nil (ex. if created from GCD). VALUE group = GetThreadPtr(rb_vm_current_thread())->group; if (group != Qnil) { thgroup_add_m(group, thread, false); } // Retain the Thread object to avoid a potential GC, the corresponding // release is done in rb_vm_thread_run(). GC_RETAIN(thread); // Prepare attributes for the thread. pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED); pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); // Register the thread to the core. We are doing this before actually // running it because the current thread might perform a method poking at // the current registered threads (such as Kernel#sleep) right after that. rb_vm_register_thread(thread); // Launch it. if (pthread_create(&t->thread, &attr, (void *(*)(void *))rb_vm_thread_run, (void *)thread) != 0) { rb_sys_fail("pthread_create() failed"); } pthread_attr_destroy(&attr); return thread; }
static int clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag) { rb_thread_t *th; GetThreadPtr((VALUE)key, th); rb_threadptr_remove_event_hook(th, 0, Qundef); return ST_CONTINUE; }
static VALUE rb_thread_status(VALUE thread, SEL sel) { rb_vm_thread_t *t = GetThreadPtr(thread); if (t->status == THREAD_DEAD) { return t->exception == Qnil ? Qfalse : Qnil; } return rb_str_new2(rb_thread_status_cstr(thread)); }
static VALUE thread_add_trace_func_m(VALUE obj, VALUE trace) { rb_thread_t *th; GetThreadPtr(obj, th); thread_add_trace_func(th, trace); return trace; }
static VALUE rb_thread_kill(VALUE thread, SEL sel) { rb_vm_thread_t *t = GetThreadPtr(thread); if (t->status != THREAD_KILLED) { rb_vm_thread_cancel(t); } return thread; }
static VALUE rb_mutex_trylock(VALUE self, SEL sel) { if (pthread_mutex_trylock(&GetMutexPtr(self)->mutex) == 0) { GetMutexPtr(self)->thread = GetThreadPtr(rb_vm_current_thread()); return Qtrue; } return Qfalse; }
static VALUE rb_thread_priority(VALUE thread, SEL sel) { // FIXME this doesn't really minic what 1.9 does, but do we care? struct sched_param param; pthread_assert(pthread_getschedparam(GetThreadPtr(thread)->thread, NULL, ¶m)); return INT2FIX(param.sched_priority); }
static VALUE thread_backtrace_to_ary(int argc, const VALUE *argv, VALUE thval, int to_str) { rb_thread_t *th; GetThreadPtr(thval, th); if (th->to_kill || th->status == THREAD_KILLED) return Qnil; return vm_backtrace_to_ary(th, argc, argv, 0, 0, to_str); }
static void set_cref_stack(rb_iseq_t * iseqdat, VALUE klass, VALUE noex) { VALUE thread = rb_thread_current(); rb_thread_t * th; rb_control_frame_t * cfp; GetThreadPtr(thread, th); cfp = getcfp(th, th->cfp); iseqdat->cref_stack = NEW_BLOCK(klass); iseqdat->cref_stack->nd_visi = noex; iseqdat->cref_stack->nd_next = cfp->iseq->cref_stack; /* TODO: use lfp? */ }
VALUE rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data) { rb_thread_t *target_th = 0; if (RTEST(target_thval)) { GetThreadPtr(target_thval, target_th); /* TODO: Test it! * Warning: This function is not tested. */ } return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef); }
static void thread_finalize_imp(void *rcv, SEL sel) { rb_vm_thread_t *t = GetThreadPtr(rcv); if (t->exception != Qnil && !t->joined_on_exception) { fprintf(stderr, "*** Thread %p exited prematurely because of an uncaught exception:\n%s\n", t->thread, rb_str_cstr(rb_format_exception_message(t->exception))); } if (thread_finalize_imp_super != NULL) { ((void(*)(void *, SEL))thread_finalize_imp_super)(rcv, sel); } }
VALUE rb_vmdebug_thread_dump_state(VALUE self) { rb_thread_t *th; rb_control_frame_t *cfp; GetThreadPtr(self, th); cfp = th->cfp; fprintf(stderr, "Thread state dump:\n"); fprintf(stderr, "pc : %p, sp : %p\n", (void *)cfp->pc, (void *)cfp->sp); fprintf(stderr, "cfp: %p, lfp: %p, dfp: %p\n", (void *)cfp, (void *)cfp->lfp, (void *)cfp->dfp); return Qnil; }
static VALUE ruby_thread_init(VALUE self) { rb_thread_t *th; rb_vm_t *vm = GET_THREAD()->vm; GetThreadPtr(self, th); th_init(th, self); th->vm = vm; th->top_wrapper = 0; th->top_self = rb_vm_top_self(); return self; }
static VALUE thread_set_trace_func_m(VALUE obj, VALUE trace) { rb_thread_t *th; GetThreadPtr(obj, th); rb_threadptr_remove_event_hook(th, call_trace_func, Qundef); if (NIL_P(trace)) { return Qnil; } thread_add_trace_func(th, trace); return trace; }
static VALUE thread_raise_m(VALUE self, SEL sel, int argc, VALUE *argv) { VALUE exc = rb_make_exception(argc, argv); rb_vm_thread_t *t = GetThreadPtr(self); if (t->thread == pthread_self()) { rb_exc_raise(exc); } else if (t->status != THREAD_DEAD) { rb_vm_thread_raise(t, exc); } return Qnil; }
void rb_thread_remove_from_group(VALUE thread) { rb_vm_thread_t *t = GetThreadPtr(thread); if (t->group != Qnil) { rb_thread_group_t *tg = GetThreadGroupPtr(t->group); thgroup_lock(tg); if (rb_ary_delete(tg->threads, thread) != thread) { printf("trying to remove a thread (%p) from a group that doesn't "\ "contain it\n", (void *)thread); abort(); } thgroup_unlock(tg); t->group = Qnil; } }
static void cont_mark(void *ptr) { RUBY_MARK_ENTER("cont"); if (ptr) { rb_context_t *cont = ptr; rb_gc_mark(cont->value); rb_thread_mark(&cont->saved_thread); rb_gc_mark(cont->saved_thread.self); if (cont->vm_stack) { #ifdef CAPTURE_JUST_VALID_VM_STACK rb_gc_mark_locations(cont->vm_stack, cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen); #else rb_gc_mark_locations(cont->vm_stack, cont->vm_stack, cont->saved_thread.stack_size); #endif } if (cont->machine.stack) { if (cont->type == CONTINUATION_CONTEXT) { /* cont */ rb_gc_mark_locations(cont->machine.stack, cont->machine.stack + cont->machine.stack_size); } else { /* fiber */ rb_thread_t *th; rb_fiber_t *fib = (rb_fiber_t*)cont; GetThreadPtr(cont->saved_thread.self, th); if ((th->fiber != fib) && fib->status == RUNNING) { rb_gc_mark_locations(cont->machine.stack, cont->machine.stack + cont->machine.stack_size); } } } #ifdef __ia64 if (cont->machine.register_stack) { rb_gc_mark_locations(cont->machine.register_stack, cont->machine.register_stack + cont->machine.register_stack_size); } #endif } RUBY_MARK_LEAVE("cont"); }
static VALUE rb_mutex_trylock(VALUE self, SEL sel) { rb_vm_mutex_t *m = GetMutexPtr(self); if (pthread_mutex_trylock(&m->mutex) == 0) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); OBJ_UNTRUST(current->mutexes); } rb_ary_push(current->mutexes, self); return Qtrue; } return Qfalse; }
static const char * rb_thread_status_cstr(VALUE thread) { rb_vm_thread_t *t = GetThreadPtr(thread); switch (t->status) { case THREAD_ALIVE: return "run"; case THREAD_SLEEP: return "sleep"; case THREAD_KILLED: return "aborting"; case THREAD_DEAD: return "dead"; } return "unknown"; }