Module* module() { if(multiple_scopes_p()) { return top_scope_->module(); } else { return scope->module(); } }
FREObject FlashRuby_eval(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[]) { uint32_t length = 0; const uint8_t* fl_str = NULL; FREGetObjectAsUTF8(argv[0], &length, &fl_str); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(0); frame->prepare(0); frame->previous = NULL; frame->dispatch_data = NULL; frame->flags = 0; CompiledMethod* cm = CompiledMethod::create(state); cm->metadata(state, state->symbol("__script__")); cm->name(state, state->symbol("__script__")); frame->cm = cm; StackVariables* scope = ALLOCA_STACKVARIABLES(0); scope->initialize(G(main), cNil, G(object), 0); scope->on_heap_ = VariableScope::synthesize(state, cm, G(object), cNil, G(main), cNil, state->new_object<Tuple>(G(tuple))); frame->scope = scope; Arguments* arguments = new Arguments(state->symbol("script"), G(main), cNil, 0, 0); frame->arguments = arguments; state->set_call_frame(frame); String* str = String::create(state, (const char*)fl_str); Array* eval_args = Array::create(state, 1); eval_args->append(state, str); Object* result_obj = G(main)->send(state, frame, state->symbol("instance_eval"), eval_args); const char* result_c_str = result_obj->to_s(state)->c_str_null_safe(state); FREObject result_str; FRENewObjectFromUTF8(strlen(result_c_str), (const uint8_t*)result_c_str, &result_str); return result_str; }
Object* MachineCode::execute_as_script(STATE, CompiledCode* code, CallFrame* previous) { MachineCode* mcode = code->machine_code(); StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(G(main), cNil, G(object), mcode->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); Arguments args(state->symbol("__script__"), G(main), cNil, 0, 0); frame->previous = previous; frame->constant_scope_ = 0; frame->dispatch_data = 0; frame->compiled_code = code; frame->flags = 0; frame->optional_jit_data = 0; frame->top_scope_ = 0; frame->scope = scope; frame->arguments = &args; // Do NOT check if we should JIT this. We NEVER want to jit a script. // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); // Don't generate profiling info here, it's expected // to be done by the caller. return (*mcode->run)(state, mcode, frame); }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->machine_code() because it might lock and the work should // already be done. MachineCode* const mcode = env->compiled_code_->machine_code(); if(!mcode) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_threshold_compile) { OnStack<1> os(state, env); G(jit)->compile_soon(state, env->compiled_code(), previous, invocation.self->direct_class(state), env, true); } else { mcode->call_count++; } } #endif StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); Module* mod = invocation.module; if(!mod) mod = env->module(); Object* block = cNil; if(VariableScope* scope = env->top_scope_) { if(!scope->nil_p()) block = scope->block(); } scope->initialize(invocation.self, block, mod, mcode->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = invocation.constant_scope; frame->arguments = &args; frame->dispatch_data = env; frame->compiled_code = env->compiled_code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cMultipleScopes | CallFrame::cBlock; if(!GenericArguments::call(state, frame, mcode, scope, args, invocation.flags)) { if(state->vm()->thread_state()->raise_reason() == cNone) { Exception* exc = Exception::make_argument_error(state, mcode->required_args, args.total(), mcode->name()); exc->locations(state, Location::from_call_stack(state, previous)); state->raise_exception(exc); } return NULL; } #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->singleton())) { mod = ma; } } OnStack<2> os(state, env, mod); // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::BlockEntry method(state, env, mod); return (*mcode->run)(state, mcode, frame); } else { // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); } #else // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); #endif }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* const env, Arguments& args, BlockInvocation& invocation) { VMMethod* const vmm = env->vmmethod(state); if(!vmm) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_soon(state, env->code(), env); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->arguments = &args; frame->dispatch_data = reinterpret_cast<BlockEnvironment*>(env); frame->cm = env->code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes | CallFrame::cBlock; // Check the stack and interrupts here rather than in the interpreter // loop itself. if(state->detect_stack_condition(frame)) { if(!state->check_interrupts(frame, frame)) return NULL; } state->global_lock().checkpoint(state, frame); if(unlikely(state->interrupts.check)) { state->interrupts.checked(); if(state->interrupts.perform_gc) { state->interrupts.perform_gc = false; state->collect_maybe(frame); } } #ifdef RBX_PROFILER if(unlikely(state->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } tooling::BlockEntry method(state, env, mod); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* const env, Arguments& args, BlockInvocation& invocation) { if(!env->vmm) { env->method_->formalize(state, false); env->vmm = env->method_->backend_method(); // Not sure why we hit this case currenly, so just disable the JIT // for them all together. env->vmm->call_count = -1; } VMMethod* const vmm = env->vmm; #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); if(state->shared.config.jit_inline_blocks) { if(VMMethod* parent = vmm->parent()) { while(VMMethod* next = parent->parent()) { parent = next; } if(parent->call_count >= 200) { ls->compile_soon(state, parent); } } } ls->compile_soon(state, vmm, env); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->msg = NULL; frame->cm = env->method_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes; #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, env->top_scope_->method()->name(), scope->module(), env->method_); return (*vmm->run)(state, vmm, frame, args); } else { return (*vmm->run)(state, vmm, frame, args); } #else return (*vmm->run)(state, vmm, frame, args); #endif }
Object* MachineCode::execute_specialized(STATE, CallFrame* previous, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); MachineCode* mcode = code->machine_code(); StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(args.recv(), args.block(), mod, mcode->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); // If argument handling fails.. if(ArgumentHandler::call(state, mcode, scope, args) == false) { Exception* exc = Exception::make_argument_error(state, mcode->total_args, args.total(), args.name()); exc->locations(state, Location::from_call_stack(state, previous)); state->raise_exception(exc); return NULL; } frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = 0; frame->dispatch_data = 0; frame->compiled_code = code; frame->flags = 0; frame->optional_jit_data = 0; frame->top_scope_ = 0; frame->scope = scope; frame->arguments = &args; GCTokenImpl gct; #ifdef ENABLE_LLVM // A negative call_count means we've disabled usage based JIT // for this method. if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); OnStack<3> os(state, exec, mod, code); ls->compile_callframe(state, gct, code, frame); } else { mcode->call_count++; } } #endif OnStack<3> os(state, exec, mod, code); #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { // Check the stack and interrupts here rather than in the interpreter // loop itself. if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::MethodEntry method(state, exec, mod, args, code); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; } else { if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; } #else if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; #endif }
VariableScope* promote_scope(STATE) { if(!scope) rubinius::bug("bad CallFrame to promote"); if(VariableScope* vs = scope->on_heap()) return vs; return promote_scope_full(state); }
Module* module() { return scope->module(); }
Object* self() { return scope->self(); }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->machine_code() because it might lock and the work should // already be done. MachineCode* const mcode = env->compiled_code_->machine_code(); if(!mcode) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); GCTokenImpl gct; OnStack<1> os(state, env); ls->compile_soon(state, gct, env->compiled_code(), previous, invocation.self->lookup_begin(state), env, true); } else { mcode->call_count++; } } #endif StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, mcode->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = invocation.constant_scope; frame->arguments = &args; frame->dispatch_data = env; frame->compiled_code = env->compiled_code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomConstantScope | CallFrame::cMultipleScopes | CallFrame::cBlock; // TODO: this is a quick hack to process block arguments in 1.9. if(!LANGUAGE_18_ENABLED(state)) { if(!GenericArguments::call(state, frame, mcode, scope, args, invocation.flags)) { return NULL; } } #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } OnStack<2> os(state, env, mod); // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::BlockEntry method(state, env, mod); return (*mcode->run)(state, mcode, frame); } else { // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); } #else // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); #endif }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->vmmethod() because it mighc lock and the work should already // be done. VMMethod* const vmm = env->code_->backend_method(); if(!vmm) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_soon(state, env->code(), env, true); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->arguments = &args; frame->dispatch_data = reinterpret_cast<BlockEnvironment*>(env); frame->cm = env->code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes | CallFrame::cBlock; frame->stack_top_ptr_ptr = NULL; // TODO: this is a quick hack to process block arguments in 1.9. if(!LANGUAGE_18_ENABLED(state)) { if(!GenericArguments::call(state, frame, vmm, scope, args, invocation.flags)) { return NULL; } } // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(state->detect_stack_condition(frame)) { if(!state->check_interrupts(gct, frame, frame)) return NULL; } state->checkpoint(gct, frame); #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } tooling::BlockEntry method(state, env, mod); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
Object* VMMethod::execute_specialized(STATE, CallFrame* previous, Dispatch& msg, Arguments& args) { CompiledMethod* cm = as<CompiledMethod>(msg.method); VMMethod* vmm = cm->backend_method(); #ifdef ENABLE_LLVM // A negative call_count means we've disabled usage based JIT // for this method. if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_callframe(state, cm, previous); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(args.recv(), args.block(), msg.module, vmm->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); // If argument handling fails.. if(ArgumentHandler::call(state, vmm, scope, args) == false) { Exception* exc = Exception::make_argument_error(state, vmm->required_args, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, previous)); state->thread_state()->raise_exception(exc); return NULL; } frame->prepare(vmm->stack_size); frame->previous = previous; frame->flags = 0; frame->arguments = &args; frame->dispatch_data = &msg; frame->cm = cm; frame->scope = scope; #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, msg, args, cm); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
VariableScope* promote_scope(STATE) { if(VariableScope* vs = scope->on_heap()) return vs; return promote_scope_full(state); }