static void collect_caller_bindings_iseq(void *arg, const rb_control_frame_t *cfp) { struct collect_caller_bindings_data *data = (struct collect_caller_bindings_data *)arg; VALUE frame = rb_ary_new2(5); rb_ary_store(frame, CALLER_BINDING_SELF, cfp->self); rb_ary_store(frame, CALLER_BINDING_CLASS, get_klass(cfp)); rb_ary_store(frame, CALLER_BINDING_BINDING, GC_GUARDED_PTR(cfp)); /* create later */ rb_ary_store(frame, CALLER_BINDING_ISEQ, cfp->iseq ? cfp->iseq->self : Qnil); rb_ary_store(frame, CALLER_BINDING_CFP, GC_GUARDED_PTR(cfp)); rb_ary_push(data->ary, frame); }
VALUE yarv_invoke_Array_each_special_block(VALUE ary) { rb_thread_t *th = GET_THREAD(); rb_block_t *orig_block = GC_GUARDED_PTR_REF(th->cfp->lfp[0]); if (BUILTIN_TYPE(orig_block->iseq) != T_NODE) { VALUE tsiseqval = yarv_iseq_special_block(orig_block->iseq, build_Array_each_node); rb_iseq_t *tsiseq; VALUE argv[2]; if (tsiseqval) { VALUE val; rb_block_t block = *orig_block; GetISeqPtr(tsiseqval, tsiseq); block.iseq = tsiseq; th->cfp->lfp[0] = GC_GUARDED_PTR(&block); argv[0] = 0; argv[1] = ary; val = th_invoke_yield(th, 2, argv); if (val == Qundef) { return ary; } else { return val; } } } return Qundef; }
VALUE yarv_invoke_Range_each_special_block(VALUE range, VALUE beg, VALUE end, int excl) { rb_thread_t *th = GET_THREAD(); rb_block_t *orig_block = GC_GUARDED_PTR_REF(th->cfp->lfp[0]); if (BUILTIN_TYPE(orig_block->iseq) != T_NODE) { void *builder = excl ? build_Range_each_node_LT : build_Range_each_node_LE; VALUE tsiseqval = yarv_iseq_special_block(orig_block->iseq, builder); rb_iseq_t *tsiseq; VALUE argv[2]; if (tsiseqval) { VALUE val; rb_block_t block = *orig_block; GetISeqPtr(tsiseqval, tsiseq); block.iseq = tsiseq; th->cfp->lfp[0] = GC_GUARDED_PTR(&block); argv[0] = beg; argv[1] = end; val = th_invoke_yield(th, 2, argv); if (val == Qundef) { return range; } else { return val; } } } return Qundef; }
VALUE yarv_invoke_Integer_times_special_block(VALUE num) { rb_thread_t *th = GET_THREAD(); rb_block_t *orig_block = GC_GUARDED_PTR_REF(th->cfp->lfp[0]); if (orig_block && BUILTIN_TYPE(orig_block->iseq) != T_NODE) { VALUE tsiseqval = yarv_iseq_special_block(orig_block->iseq, build_Integer_times_node); rb_iseq_t *tsiseq; VALUE argv[2], val; if (tsiseqval) { rb_block_t block = *orig_block; GetISeqPtr(tsiseqval, tsiseq); block.iseq = tsiseq; th->cfp->lfp[0] = GC_GUARDED_PTR(&block); argv[0] = INT2FIX(0); argv[1] = num; val = th_invoke_yield(th, 2, argv); if (val == Qundef) { return num; } else { return val; } } } return Qundef; }
static inline rb_control_frame_t * vm_push_frame(rb_thread_t * th, const rb_iseq_t * iseq, VALUE type, VALUE self, VALUE specval, const VALUE *pc, VALUE *sp, VALUE *lfp, int local_size) { rb_control_frame_t * const cfp = th->cfp - 1; int i; if ((void *)(sp + local_size) >= (void *)cfp) { rb_exc_raise(sysstack_error); } th->cfp = cfp; /* setup vm value stack */ /* nil initialize */ for (i=0; i < local_size; i++) { *sp = Qnil; sp++; } /* set special val */ *sp = GC_GUARDED_PTR(specval); if (lfp == 0) { lfp = sp; } /* setup vm control frame stack */ cfp->pc = (VALUE *)pc; cfp->sp = sp + 1; cfp->bp = sp + 1; cfp->iseq = (rb_iseq_t *) iseq; cfp->flag = type; cfp->self = self; cfp->lfp = lfp; cfp->dfp = sp; cfp->block_iseq = 0; cfp->proc = 0; cfp->me = 0; #define COLLECT_PROFILE 0 #if COLLECT_PROFILE cfp->prof_time_self = clock(); cfp->prof_time_chld = 0; #endif if (VMDEBUG == 2) { SDR(); } return cfp; }
static void collect_caller_bindings_cfunc(void *arg, const rb_control_frame_t *cfp, ID mid) { struct collect_caller_bindings_data *data = (struct collect_caller_bindings_data *)arg; VALUE frame = rb_ary_new2(5); rb_ary_store(frame, CALLER_BINDING_SELF, cfp->self); rb_ary_store(frame, CALLER_BINDING_CLASS, get_klass(cfp)); rb_ary_store(frame, CALLER_BINDING_BINDING, Qnil); /* not available */ rb_ary_store(frame, CALLER_BINDING_ISEQ, Qnil); /* not available */ rb_ary_store(frame, CALLER_BINDING_CFP, GC_GUARDED_PTR(cfp)); rb_ary_push(data->ary, frame); }
VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass) { VALUE procval, envval, blockprocval = 0; rb_proc_t *proc; rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block); if (block->proc) { rb_bug("rb_vm_make_proc: Proc value is already created."); } if (GC_GUARDED_PTR_REF(cfp->lfp[0])) { rb_proc_t *p; blockprocval = vm_make_proc_from_block( th, (rb_block_t *)GC_GUARDED_PTR_REF(*cfp->lfp)); GetProcPtr(blockprocval, p); *cfp->lfp = GC_GUARDED_PTR(&p->block); } envval = rb_vm_make_env_object(th, cfp); if (PROCDEBUG) { check_env_value(envval); } procval = rb_proc_alloc(klass); GetProcPtr(procval, proc); proc->blockprocval = blockprocval; proc->block.self = block->self; proc->block.lfp = block->lfp; proc->block.dfp = block->dfp; proc->block.iseq = block->iseq; proc->block.proc = procval; proc->envval = envval; proc->safe_level = th->safe_level; if (VMDEBUG) { if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) { rb_bug("invalid ptr: block->dfp"); } if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) { rb_bug("invalid ptr: block->lfp"); } } return procval; }
static inline VALUE invoke_block_from_c(rb_thread_t *th, const rb_block_t *block, VALUE self, int argc, const VALUE *argv, const rb_block_t *blockptr, const NODE *cref) { if (SPECIAL_CONST_P(block->iseq)) return Qnil; else if (BUILTIN_TYPE(block->iseq) != T_NODE) { const rb_iseq_t *iseq = block->iseq; const rb_control_frame_t *cfp; rb_control_frame_t *ncfp; int i, opt_pc, arg_size = iseq->arg_size; int type = block_proc_is_lambda(block->proc) ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK; rb_vm_set_finish_env(th); cfp = th->cfp; CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max); for (i=0; i<argc; i++) { cfp->sp[i] = argv[i]; } opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr, type == VM_FRAME_MAGIC_LAMBDA); ncfp = vm_push_frame(th, iseq, type, self, GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp, iseq->local_size - arg_size); ncfp->me = th->passed_me; th->passed_me = 0; th->passed_block = blockptr; if (cref) { th->cfp->dfp[-1] = (VALUE)cref; } return vm_exec(th); } else { return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr); } }
static inline VALUE vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block, VALUE self, int argc, const VALUE *argv, const rb_block_t *blockargptr) { NODE *ifunc = (NODE *) block->iseq; VALUE val, arg, blockarg; int lambda = block_proc_is_lambda(block->proc); if (lambda) { arg = rb_ary_new4(argc, argv); } else if (argc == 0) { arg = Qnil; } else { arg = argv[0]; } if (blockargptr) { if (blockargptr->proc) { blockarg = blockargptr->proc; } else { blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc); } } else { blockarg = Qnil; } vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC, self, (VALUE)block->dfp, 0, th->cfp->sp, block->lfp, 1); if (blockargptr) { th->cfp->lfp[0] = GC_GUARDED_PTR((VALUE)blockargptr); } val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg); th->cfp++; return val; }
static void vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref) { rb_iseq_t *iseq; rb_block_t * const block = th->base_block; GetISeqPtr(iseqval, iseq); /* for return */ rb_vm_set_finish_env(th); vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self, GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded, th->cfp->sp, block->lfp, iseq->local_size); if (cref) { th->cfp->dfp[-1] = (VALUE)cref; } CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max); }
static VALUE yield_under(VALUE under, VALUE self, VALUE values) { rb_thread_t *th = GET_THREAD(); rb_block_t block, *blockptr; NODE *cref = vm_cref_push(th, under, NOEX_PUBLIC); if ((blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0])) != 0) { block = *blockptr; block.self = self; th->cfp->lfp[0] = GC_GUARDED_PTR(&block); } if (values == Qundef) { return vm_yield_with_cref(th, 0, 0, cref); } else { return vm_yield_with_cref(th, RARRAY_LEN(values), RARRAY_PTR(values), cref); } }
static VALUE yarv_iseq_special_block(rb_iseq_t *iseq, void *builder) { #if OPT_BLOCKINLINING VALUE parent = Qfalse; VALUE iseqval; if (iseq->argc > 1 || iseq->arg_simple == 0) { /* argument check */ return 0; } if (iseq->cached_special_block_builder) { if (iseq->cached_special_block_builder == builder) { return iseq->cached_special_block; } else { return 0; } } else { iseq->cached_special_block_builder = (void *)1; } if (iseq->parent_iseq) { parent = iseq->parent_iseq->self; } iseqval = rb_iseq_new_with_bopt(iseq->node, iseq->name, iseq->filename, parent, iseq->type, GC_GUARDED_PTR(builder)); if (0) { printf("%s\n", RSTRING_PTR(ruby_iseq_disasm(iseqval))); } iseq->cached_special_block = iseqval; iseq->cached_special_block_builder = builder; return iseqval; #else return 0; #endif }
static VALUE vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp, VALUE *envptr, VALUE * const endptr) { VALUE envval, penvval = 0; rb_env_t *env; VALUE *nenvptr; int i, local_size; if (ENV_IN_HEAP_P(th, envptr)) { return ENV_VAL(envptr); } if (envptr != endptr) { VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr); rb_control_frame_t *pcfp = cfp; if (ENV_IN_HEAP_P(th, penvptr)) { penvval = ENV_VAL(penvptr); } else { while (pcfp->dfp != penvptr) { pcfp++; if (pcfp->dfp == 0) { SDR(); rb_bug("invalid dfp"); } } penvval = vm_make_env_each(th, pcfp, penvptr, endptr); cfp->lfp = pcfp->lfp; *envptr = GC_GUARDED_PTR(pcfp->dfp); } } /* allocate env */ envval = env_alloc(); GetEnvPtr(envval, env); if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) { local_size = 2; } else { local_size = cfp->iseq->local_size; } env->env_size = local_size + 1 + 2; env->local_size = local_size; env->env = ALLOC_N(VALUE, env->env_size); env->prev_envval = penvval; for (i = 0; i <= local_size; i++) { env->env[i] = envptr[-local_size + i]; #if 0 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]); if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) { /* clear value stack for GC */ envptr[-local_size + i] = 0; } #endif } *envptr = envval; /* GC mark */ nenvptr = &env->env[i - 1]; nenvptr[1] = envval; /* frame self */ nenvptr[2] = penvval; /* frame prev env object */ /* reset lfp/dfp in cfp */ cfp->dfp = nenvptr; if (envptr == endptr) { cfp->lfp = nenvptr; } /* as Binding */ env->block.self = cfp->self; env->block.lfp = cfp->lfp; env->block.dfp = cfp->dfp; env->block.iseq = cfp->iseq; if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) { /* TODO */ env->block.iseq = 0; } return envval; }