static inline VALUE fiber_switch(VALUE fib, int argc, VALUE *argv, int is_resume) { VALUE value; rb_context_t *cont; rb_thread_t *th = GET_THREAD(); GetContPtr(fib, cont); if (cont->saved_thread.self != th->self) { rb_raise(rb_eFiberError, "fiber called across threads"); } else if (cont->saved_thread.trap_tag != th->trap_tag) { rb_raise(rb_eFiberError, "fiber called across trap"); } else if (!cont->alive) { rb_raise(rb_eFiberError, "dead fiber called"); } if (is_resume) { cont->prev = rb_fiber_current(); } cont->value = make_passing_arg(argc, argv); if ((value = fiber_store(cont)) == Qundef) { cont_restore_0(cont, &value); rb_bug("rb_fiber_resume: unreachable"); } RUBY_VM_CHECK_INTS(); return value; }
/** Destructs the VM. * * Runs the VM finalization processes as well as ruby_finalize(), and frees * resources used by the VM. * * @param ex Default value to the return value. * @return If an error occurred returns a non-zero. If otherwise, returns the * given ex. * @note This function does not raise any exception. */ int ruby_cleanup(volatile int ex) { int state; volatile VALUE errs[2]; rb_thread_t *th = GET_THREAD(); int nerr; rb_threadptr_interrupt(th); rb_threadptr_check_signal(th); PUSH_TAG(); if ((state = EXEC_TAG()) == 0) { SAVE_ROOT_JMPBUF(th, { RUBY_VM_CHECK_INTS(th); });
static void native_sleep(rb_thread_t *th, struct timeval *tv) { DWORD msec; if (tv) { msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; } else { msec = INFINITE; } GVL_UNLOCK_BEGIN(); { DWORD ret; int status = th->status; th->status = THREAD_STOPPED; th->unblock_function = ubf_handle; th->unblock_function_arg = th; if (RUBY_VM_INTERRUPTED(th)) { /* interrupted. return immediate */ } else { thread_debug("native_sleep start (%d)\n", (int)msec); ret = w32_wait_events(0, 0, msec, th); thread_debug("native_sleep done (%d)\n", ret); } th->unblock_function = 0; th->unblock_function_arg = 0; th->status = status; } GVL_UNLOCK_END(); RUBY_VM_CHECK_INTS(); }
static inline VALUE vm_call0(rb_thread_t * th, VALUE klass, VALUE recv, VALUE id, ID oid, int argc, const VALUE *argv, const NODE *body, int nosuper) { VALUE val; rb_block_t *blockptr = 0; if (0) printf("id: %s, nd: %s, argc: %d, passed: %p\n", rb_id2name(id), ruby_node_name(nd_type(body)), argc, (void *)th->passed_block); if (th->passed_block) { blockptr = th->passed_block; th->passed_block = 0; } again: switch (nd_type(body)) { case RUBY_VM_METHOD_NODE:{ rb_control_frame_t *reg_cfp; VALUE iseqval = (VALUE)body->nd_body; int i; rb_vm_set_finish_env(th); reg_cfp = th->cfp; CHECK_STACK_OVERFLOW(reg_cfp, argc + 1); *reg_cfp->sp++ = recv; for (i = 0; i < argc; i++) { *reg_cfp->sp++ = argv[i]; } vm_setup_method(th, reg_cfp, argc, blockptr, 0, iseqval, recv); val = vm_exec(th); break; } case NODE_CFUNC: { EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, id, klass); { rb_control_frame_t *reg_cfp = th->cfp; rb_control_frame_t *cfp = vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1); cfp->method_id = oid; cfp->method_class = klass; val = call_cfunc(body->nd_cfnc, recv, body->nd_argc, argc, argv); if (reg_cfp != th->cfp + 1) { SDR2(reg_cfp); SDR2(th->cfp-5); rb_bug("cfp consistency error - call0"); th->cfp = reg_cfp; } vm_pop_frame(th); } EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, id, klass); break; } case NODE_ATTRSET:{ if (argc != 1) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", argc); } val = rb_ivar_set(recv, body->nd_vid, argv[0]); break; } case NODE_IVAR: { if (argc != 0) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", argc); } val = rb_attr_get(recv, body->nd_vid); break; } case NODE_BMETHOD:{ val = vm_call_bmethod(th, oid, body->nd_cval, recv, klass, argc, (VALUE *)argv, blockptr); break; } case NODE_ZSUPER:{ klass = RCLASS_SUPER(klass); if (!klass || !(body = rb_method_node(klass, id))) { return method_missing(recv, id, argc, argv, 0); } RUBY_VM_CHECK_INTS(); nosuper = CALL_SUPER; body = body->nd_body; goto again; } default: rb_bug("unsupported: vm_call0(%s)", ruby_node_name(nd_type(body))); } RUBY_VM_CHECK_INTS(); return val; }
static inline VALUE vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, int num, const rb_block_t *blockptr, VALUE flag, ID id, const rb_method_entry_t *me, VALUE recv) { VALUE val; start_method_dispatch: if (me != 0) { if ((me->flag == 0)) { normal_method_dispatch: switch (me->def->type) { case VM_METHOD_TYPE_ISEQ:{ vm_setup_method(th, cfp, recv, num, blockptr, flag, me); return Qundef; } case VM_METHOD_TYPE_NOTIMPLEMENTED: case VM_METHOD_TYPE_CFUNC:{ val = vm_call_cfunc(th, cfp, num, recv, blockptr, me); break; } case VM_METHOD_TYPE_ATTRSET:{ if (num != 1) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", num); } val = rb_ivar_set(recv, me->def->body.attr.id, *(cfp->sp - 1)); cfp->sp -= 2; break; } case VM_METHOD_TYPE_IVAR:{ if (num != 0) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num); } val = rb_attr_get(recv, me->def->body.attr.id); cfp->sp -= 1; break; } case VM_METHOD_TYPE_MISSING:{ VALUE *argv = ALLOCA_N(VALUE, num+1); argv[0] = ID2SYM(me->def->original_id); MEMCPY(argv+1, cfp->sp - num, VALUE, num); cfp->sp += - num - 1; th->passed_block = blockptr; val = rb_funcall2(recv, rb_intern("method_missing"), num+1, argv); break; } case VM_METHOD_TYPE_BMETHOD:{ VALUE *argv = ALLOCA_N(VALUE, num); MEMCPY(argv, cfp->sp - num, VALUE, num); cfp->sp += - num - 1; val = vm_call_bmethod(th, recv, num, argv, blockptr, me); break; } case VM_METHOD_TYPE_ZSUPER:{ VALUE klass = RCLASS_SUPER(me->klass); me = rb_method_entry(klass, id); if (me != 0) { goto normal_method_dispatch; } else { goto start_method_dispatch; } } case VM_METHOD_TYPE_OPTIMIZED:{ switch (me->def->body.optimize_type) { case OPTIMIZED_METHOD_TYPE_SEND: { rb_control_frame_t *reg_cfp = cfp; rb_num_t i = num - 1; VALUE sym; if (num == 0) { rb_raise(rb_eArgError, "no method name given"); } sym = TOPN(i); id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym); /* shift arguments */ if (i > 0) { MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i); } me = rb_method_entry(CLASS_OF(recv), id); num -= 1; DEC_SP(1); flag |= VM_CALL_FCALL_BIT | VM_CALL_OPT_SEND_BIT; goto start_method_dispatch; } case OPTIMIZED_METHOD_TYPE_CALL: { rb_proc_t *proc; int argc = num; VALUE *argv = ALLOCA_N(VALUE, num); GetProcPtr(recv, proc); MEMCPY(argv, cfp->sp - num, VALUE, num); cfp->sp -= num + 1; val = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, blockptr); break; } default: rb_bug("eval_invoke_method: unsupported optimized method type (%d)", me->def->body.optimize_type); } break; } default:{ rb_bug("eval_invoke_method: unsupported method type (%d)", me->def->type); break; } } } else { int noex_safe; if (!(flag & VM_CALL_FCALL_BIT) && (me->flag & NOEX_MASK) & NOEX_PRIVATE) { int stat = NOEX_PRIVATE; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } val = vm_method_missing(th, id, recv, num, blockptr, stat); } else if (!(flag & VM_CALL_OPT_SEND_BIT) && (me->flag & NOEX_MASK) & NOEX_PROTECTED) { VALUE defined_class = me->klass; if (RB_TYPE_P(defined_class, T_ICLASS)) { defined_class = RBASIC(defined_class)->klass; } if (!rb_obj_is_kind_of(cfp->self, defined_class)) { val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED); } else { goto normal_method_dispatch; } } else if ((noex_safe = NOEX_SAFE(me->flag)) > th->safe_level && (noex_safe > 2)) { rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id)); } else { goto normal_method_dispatch; } } } else { /* method missing */ int stat = 0; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } if (flag & VM_CALL_SUPER_BIT) { stat |= NOEX_SUPER; } if (id == idMethodMissing) { VALUE *argv = ALLOCA_N(VALUE, num); vm_method_missing_args(th, argv, num - 1, 0, stat); rb_raise_method_missing(th, num, argv, recv, stat); } else { val = vm_method_missing(th, id, recv, num, blockptr, stat); } } RUBY_VM_CHECK_INTS(); return val; }
static inline VALUE vm_call_method(rb_thread_t * const th, rb_control_frame_t * const cfp, const int num, rb_block_t * const blockptr, const VALUE flag, const ID id, const NODE * mn, const VALUE recv) { VALUE val; start_method_dispatch: if (mn != 0) { if ((mn->nd_noex == 0)) { /* dispatch method */ NODE *node; normal_method_dispatch: node = mn->nd_body; switch (nd_type(node)) { case RUBY_VM_METHOD_NODE:{ vm_setup_method(th, cfp, num, blockptr, flag, (VALUE)node->nd_body, recv); return Qundef; } case NODE_CFUNC:{ val = vm_call_cfunc(th, cfp, num, id, (ID)mn->nd_file, recv, mn->nd_clss, flag, node, blockptr); break; } case NODE_ATTRSET:{ val = rb_ivar_set(recv, node->nd_vid, *(cfp->sp - 1)); cfp->sp -= 2; break; } case NODE_IVAR:{ if (num != 0) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num); } val = rb_attr_get(recv, node->nd_vid); cfp->sp -= 1; break; } case NODE_BMETHOD:{ VALUE *argv = ALLOCA_N(VALUE, num); MEMCPY(argv, cfp->sp - num, VALUE, num); cfp->sp += - num - 1; val = vm_call_bmethod(th, (ID)mn->nd_file, node->nd_cval, recv, mn->nd_clss, num, argv, blockptr); break; } case NODE_ZSUPER:{ VALUE klass; klass = RCLASS_SUPER(mn->nd_clss); mn = rb_method_node(klass, id); if (mn != 0) { goto normal_method_dispatch; } else { goto start_method_dispatch; } } default:{ printf("node: %s\n", ruby_node_name(nd_type(node))); rb_bug("eval_invoke_method: unreachable"); /* unreachable */ break; } } } else { int noex_safe; if (!(flag & VM_CALL_FCALL_BIT) && (mn->nd_noex & NOEX_MASK) & NOEX_PRIVATE) { int stat = NOEX_PRIVATE; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } val = vm_method_missing(th, id, recv, num, blockptr, stat); } else if (((mn->nd_noex & NOEX_MASK) & NOEX_PROTECTED) && !(flag & VM_CALL_SEND_BIT)) { VALUE defined_class = mn->nd_clss; if (TYPE(defined_class) == T_ICLASS) { defined_class = RBASIC(defined_class)->klass; } if (!rb_obj_is_kind_of(cfp->self, rb_class_real(defined_class))) { val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED); } else { goto normal_method_dispatch; } } else if ((noex_safe = NOEX_SAFE(mn->nd_noex)) > th->safe_level && (noex_safe > 2)) { rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id)); } else { goto normal_method_dispatch; } } } else { /* method missing */ if (id == idMethodMissing) { rb_bug("method missing"); } else { int stat = 0; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } if (flag & VM_CALL_SUPER_BIT) { stat |= NOEX_SUPER; } val = vm_method_missing(th, id, recv, num, blockptr, stat); } } RUBY_VM_CHECK_INTS(); return val; }
static void native_sleep(rb_thread_t *th, struct timeval *tv) { int prev_status = th->status; struct timespec ts; struct timeval tvn; if (tv) { gettimeofday(&tvn, NULL); ts.tv_sec = tvn.tv_sec + tv->tv_sec; ts.tv_nsec = (tvn.tv_usec + tv->tv_usec) * 1000; if (ts.tv_nsec >= 1000000000) { ts.tv_sec += 1; ts.tv_nsec -= 1000000000; } } th->status = THREAD_STOPPED; thread_debug("native_sleep %ld\n", tv ? tv->tv_sec : -1); GVL_UNLOCK_BEGIN(); { pthread_mutex_lock(&th->interrupt_lock); th->unblock.func = ubf_pthread_cond_signal; th->unblock.arg = th; if (RUBY_VM_INTERRUPTED(th)) { /* interrupted. return immediate */ thread_debug("native_sleep: interrupted before sleep\n"); } else { if (tv == 0 || ts.tv_sec < tvn.tv_sec /* overflow */ ) { int r; thread_debug("native_sleep: pthread_cond_wait start\n"); r = pthread_cond_wait(&th->native_thread_data.sleep_cond, &th->interrupt_lock); if (r) rb_bug("pthread_cond_wait: %d", r); thread_debug("native_sleep: pthread_cond_wait end\n"); } else { int r; thread_debug("native_sleep: pthread_cond_timedwait start (%ld, %ld)\n", (unsigned long)ts.tv_sec, ts.tv_nsec); r = pthread_cond_timedwait(&th->native_thread_data.sleep_cond, &th->interrupt_lock, &ts); if (r && r != ETIMEDOUT) rb_bug("pthread_cond_timedwait: %d", r); thread_debug("native_sleep: pthread_cond_timedwait end (%d)\n", r); } } th->unblock.func = 0; th->unblock.arg = 0; pthread_mutex_unlock(&th->interrupt_lock); th->status = prev_status; } GVL_UNLOCK_END(); RUBY_VM_CHECK_INTS(); thread_debug("native_sleep done\n"); }