Object* Proc::call_on_object(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { bool lambda_style = !lambda_->nil_p(); int flags = 0; // Check the arity in lambda mode if(lambda_style) { flags = CallFrame::cIsLambda; int required = block_->code()->required_args()->to_native(); if(args.total() < 1 || (required >= 0 && (size_t)required != args.total() - 1)) { Exception* exc = Exception::make_argument_error(state, required, args.total(), state->symbol("__block__")); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } } // Since we allow Proc's to be created without setting block_, we need to check // it. if(block_->nil_p()) { Exception* exc = Exception::make_type_error(state, BlockEnvironment::type, block_, "Invalid proc style"); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } return block_->call_on_object(state, call_frame, args, flags); }
bool State::process_async(CallFrame* call_frame) { set_call_frame(call_frame); vm_->clear_check_local_interrupts(); if(vm_->run_signals_) { if(!vm_->shared.signal_handler()->deliver_signals(this, call_frame)) { return false; } } Exception* exc = vm_->interrupted_exception_.get(); if(!exc->nil_p()) { vm_->interrupted_exception_.set(nil<Exception>()); // Only write the locations if there are none. if(exc->locations()->nil_p() || exc->locations()->size() == 0) { exc->locations(this, Location::from_call_stack(this, call_frame)); } vm_->thread_state_.raise_exception(exc); return false; } if(vm_->interrupt_by_kill()) { vm_->clear_interrupt_by_kill(); vm_->thread_state_.raise_thread_kill(); return false; } return true; }
bool VM::process_async(CallFrame* call_frame) { check_local_interrupts = false; if(run_signals_) { shared.signal_handler()->deliver_signals(call_frame); } switch(thread_state_.raise_reason()) { case cException: { Exception* exc = thread_state_.current_exception(); if(exc->locations()->nil_p() || exc->locations()->size() == 0) { exc->locations(this, Location::from_call_stack(this, call_frame)); } return false; } case cNone: return true; default: return false; } }
/* Run when a NativeFunction is executed. Executes the related C function. */ Object* NativeFunction::execute(STATE, Executable* exec, Module* mod, Arguments& args) { NativeFunction* nfunc = as<NativeFunction>(exec); try { OnStack<2> os(state, exec, mod); #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { tooling::MethodEntry method(state, exec, mod, args); RUBINIUS_METHOD_FFI_ENTRY_HOOK(state, mod, args.name()); Object* ret = nfunc->call(state, args); RUBINIUS_METHOD_FFI_RETURN_HOOK(state, mod, args.name()); return ret; } else { RUBINIUS_METHOD_FFI_ENTRY_HOOK(state, mod, args.name()); Object* ret = nfunc->call(state, args); RUBINIUS_METHOD_FFI_RETURN_HOOK(state, mod, args.name()); return ret; } #else RUBINIUS_METHOD_FFI_ENTRY_HOOK(state, mod, args.name()); Object* ret = nfunc->call(state, args); RUBINIUS_METHOD_FFI_RETURN_HOOK(state, mod, args.name()); return ret; #endif } catch(TypeError &e) { Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state)); state->raise_exception(exc); RUBINIUS_METHOD_FFI_RETURN_HOOK(state, mod, args.name()); return NULL; } }
/* Run when a NativeFunction is executed. Executes the related C function. */ Object* NativeFunction::execute(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { NativeFunction* nfunc = as<NativeFunction>(msg.method); state->set_call_frame(call_frame); try { #ifdef RBX_PROFILER if(unlikely(state->tooling())) { tooling::MethodEntry method(state, msg, args); return nfunc->call(state, args, msg, call_frame); } else { return nfunc->call(state, args, msg, call_frame); } #else return nfunc->call(state, args, msg, call_frame); #endif } catch(TypeError &e) { Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } }
void Exception::Info::show(STATE, Object* self, int level) { Exception* exc = as<Exception>(self); class_header(state, self); indent_attribute(++level, "message"); exc->message()->show(state, level); indent_attribute(level, "locations"); exc->locations()->show_simple(state, level); close_body(level); }
Object* rbx_arg_error(STATE, CallFrame* call_frame, Arguments& args, int required) { Exception* exc = Exception::make_argument_error(state, required, args.total(), args.name()); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); return NULL; }
Object* rbx_arg_error(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args, int required) { Exception* exc = Exception::make_argument_error(state, required, args.total(), msg.name); exc->locations(state, System::vm_backtrace(state, Fixnum::from(0), call_frame)); state->thread_state()->raise_exception(exc); return NULL; }
Object* Proc::call_on_object(STATE, Executable* exec, CallFrame* call_frame, Dispatch& msg, Arguments& args) { bool lambda_style = !lambda_->nil_p(); int flags = 0; // Check the arity in lambda mode if(lambda_style) { flags = CallFrame::cIsLambda; int required = block_->method()->required_args()->to_native(); if(args.total() < 1 || (required >= 0 && (size_t)required != args.total() - 1)) { Exception* exc = Exception::make_argument_error(state, required, args.total(), state->symbol("__block__")); exc->locations(state, System::vm_backtrace(state, Fixnum::from(0), call_frame)); state->thread_state()->raise_exception(exc); return NULL; } } // Since we allow Proc's to be created without setting block_, we need to check // it. if(block_->nil_p()) { Exception* exc = Exception::make_type_error(state, BlockEnvironment::type, block_, "Invalid proc style"); exc->locations(state, System::vm_backtrace(state, Fixnum::from(0), call_frame)); state->thread_state()->raise_exception(exc); return NULL; } Object* ret = block_->call_on_object(state, call_frame, args, flags); if(lambda_style && !ret) { RaiseReason reason = state->thread_state()->raise_reason(); if(reason == cReturn || reason == cBreak) { // TODO investigate if we should check the destination_scope here. // It doesn't appear that MRI checks anything similar. ret = state->thread_state()->raise_value(); state->thread_state()->clear_exception(true); } } return ret; }
Object* rbx_string_dup(STATE, CallFrame* call_frame, Object* obj) { try { return as<String>(obj)->string_dup(state); } catch(TypeError& e) { Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } }
Object* Proc::call_prim(STATE, Executable* exec, CallFrame* call_frame, Dispatch& msg, Arguments& args) { bool lambda_style = RTEST(lambda_); int flags = 0; // Check the arity in lambda mode if(lambda_style) { flags = CallFrame::cIsLambda; int required = block_->method()->required_args()->to_native(); bool arity_ok = false; if(Fixnum* fix = try_as<Fixnum>(block_->method()->splat())) { if(fix->to_native() == -2) { arity_ok = true; } else if(args.total() >= (size_t)required) { arity_ok = true; } // Bug-to-bug compatibility: when required is 1, we accept any number of // args. Why? No f*****g clue. I guess perhaps you then get all the arguments // as an array? } else if(required == 1) { arity_ok = true; } else { arity_ok = ((size_t)required == args.total()); } if(!arity_ok) { Exception* exc = Exception::make_argument_error(state, required, args.total(), state->symbol("__block__")); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } } Object* ret; if(bound_method_->nil_p()) { ret = block_->call(state, call_frame, args, flags); } else if(NativeMethod* nm = try_as<NativeMethod>(bound_method_)) { Dispatch dis(state->symbol("call")); dis.method = nm; dis.module = G(rubinius); ret = nm->execute(state, call_frame, dis, args); } else { Dispatch dis(state->symbol("__yield__")); ret = dis.send(state, call_frame, args); } return ret; }
bool VM::check_thread_raise_or_kill(STATE) { Exception* exc = interrupted_exception(); if(!exc->nil_p()) { clear_interrupted_exception(); // Only write the locations if there are none. if(exc->locations()->nil_p() || exc->locations()->size() == 0) { exc->locations(this, Location::from_call_stack(state)); } thread_state_.raise_exception(exc); return true; } if(interrupt_by_kill()) { Fiber* fib = current_fiber.get(); if(fib->nil_p() || fib->root_p()) { clear_interrupt_by_kill(); } else { set_check_local_interrupts(); } thread_state_.raise_thread_kill(); return true; } // If the current thread is trying to step, debugger wise, then assist! if(thread_step()) { clear_thread_step(); if(!Helpers::yield_debugger(state, cNil)) return true; } return false; }
Object* NativeMethod::executor_implementation(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { NativeMethod* nm = as<NativeMethod>(msg.method); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), msg.name); exc->locations(state, System::vm_backtrace(state, Fixnum::from(1), call_frame)); state->thread_state()->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); NativeMethodFrame nmf(env->current_native_frame()); CallFrame* saved_frame = env->current_call_frame(); Object* saved_block = env->block(); env->set_current_call_frame(call_frame); env->set_current_native_frame(&nmf); env->set_current_block(args.block()); Object* ret; ExceptionPoint ep(env); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, msg, args); ret = nm->call(state, env, args); } else { ret = nm->call(state, env, args); } #else ret = nm->call(state, env, args); #endif } env->set_current_block(saved_block); env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); return ret; }
intptr_t Interpreter::execute(STATE, MachineCode* const machine_code) { InterpreterState is; Exception* exception = 0; intptr_t* opcodes = (intptr_t*)machine_code->opcodes; CallFrame* call_frame = state->vm()->call_frame(); call_frame->stack_ptr_ = call_frame->stk - 1; call_frame->machine_code = machine_code; call_frame->is = &is; try { return ((instructions::Instruction)opcodes[call_frame->ip()])(state, call_frame, opcodes); } catch(TypeError& e) { exception = Exception::make_type_error(state, e.type, e.object, e.reason); exception->locations(state, Location::from_call_stack(state)); call_frame->scope->flush_to_heap(state); } catch(RubyException& exc) { if(exc.exception->locations()->nil_p()) { exc.exception->locations(state, Location::from_call_stack(state)); } exception = exc.exception; } catch(std::exception& e) { exception = Exception::make_interpreter_error(state, e.what()); exception->locations(state, Location::from_call_stack(state)); call_frame->scope->flush_to_heap(state); } catch(...) { exception = Exception::make_interpreter_error(state, "unknown C++ exception thrown"); exception->locations(state, Location::from_call_stack(state)); call_frame->scope->flush_to_heap(state); } state->raise_exception(exception); return 0; }
Object* Thunk::thunk_executor(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { Thunk* thunk = as<Thunk>(msg.method); if(args.total() != 0) { Exception* exc = Exception::make_argument_error(state, 0, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } return thunk->value(); }
Object* Thunk::thunk_executor(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { Thunk* thunk = as<Thunk>(exec); if(args.total() != 0) { Exception* exc = Exception::make_argument_error(state, 0, args.total(), args.name()); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); return NULL; } return thunk->value(); }
Object* BlockEnvironment::call_on_object(STATE, CallFrame* call_frame, Arguments& args, int flags) { if(args.total() < 1) { Exception* exc = Exception::make_argument_error(state, 1, args.total(), state->symbol("__block__")); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } Object* recv = args.shift(state); BlockInvocation invocation(recv, code_->scope(), flags); return invoke(state, call_frame, this, args, invocation); }
Object* BlockEnvironment::call_on_object(STATE, CallFrame* call_frame, Arguments& args, int flags) { if(args.total() < 1) { Exception* exc = Exception::make_argument_error(state, 1, args.total(), state->symbol("__block__")); exc->locations(state, System::vm_backtrace(state, Fixnum::from(0), call_frame)); state->thread_state()->raise_exception(exc); return NULL; } Object* recv = args.shift(state); BlockInvocation invocation(recv, method_->scope(), flags); return (*execute)(state, call_frame, this, args, invocation); }
static Class* check_superclass(STATE, CallFrame* call_frame, Class* cls, Object* super) { if(super->nil_p()) return cls; if(cls->true_superclass(state) != super) { std::ostringstream message; message << "Superclass mismatch: given " << as<Module>(super)->debug_str(state) << " but previously set to " << cls->true_superclass(state)->debug_str(state); Exception* exc = Exception::make_type_error(state, Class::type, super, message.str().c_str()); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); return NULL; } return cls; }
Object* BlockEnvironment::call_under(STATE, Executable* exec, CallFrame* call_frame, Dispatch& msg, Arguments& args) { if(args.total() < 2) { Exception* exc = Exception::make_argument_error(state, 2, args.total(), state->symbol("__block__")); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } Object* recv = args.shift(state); StaticScope* static_scope = as<StaticScope>(args.shift(state)); BlockInvocation invocation(recv, static_scope, 0); return invoke(state, call_frame, this, args, invocation); }
Object* BlockEnvironment::call_on_object(STATE, Arguments& args, int flags) { if(args.total() < 1) { Exception* exc = Exception::make_argument_error(state, 1, args.total(), compiled_code()->name()); exc->locations(state, Location::from_call_stack(state)); state->raise_exception(exc); return NULL; } Object* recv = args.shift(state); BlockInvocation invocation(recv, constant_scope(), flags); return invoke(state, this, args, invocation); }
Object* BlockEnvironment::call_under(STATE,CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { if(args.total() < 2) { Exception* exc = Exception::make_argument_error(state, 2, args.total(), compiled_code_->name()); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); return NULL; } Object* recv = args.shift(state); ConstantScope* constant_scope = as<ConstantScope>(args.shift(state)); BlockInvocation invocation(recv, constant_scope, 0); return invoke(state, call_frame, this, args, invocation); }
Object* Proc::yield(STATE, CallFrame* call_frame, Arguments& args) { if(bound_method_->nil_p()) { if(block_->nil_p()) { return call(state, call_frame, args); } else { int flags = CBOOL(lambda_) ? CallFrame::cIsLambda : 0; return block_->call(state, call_frame, args, flags); } } else if(NativeMethod* nm = try_as<NativeMethod>(bound_method_)) { return nm->execute(state, call_frame, nm, G(object), args); } else if(NativeFunction* nf = try_as<NativeFunction>(bound_method_)) { return nf->call(state, args, call_frame); } else { Exception* exc = Exception::make_type_error(state, BlockEnvironment::type, bound_method_, "NativeMethod nor NativeFunction bound to proc"); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); return NULL; } }
Object* Proc::yield(STATE, CallFrame* call_frame, Arguments& args) { if(bound_method_->nil_p()) { if(block_->nil_p()) { Exception* exc = Exception::make_type_error(state, BlockEnvironment::type, block_, "No code bound to proc"); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); return NULL; } else { // NOTE! To match MRI semantics, this explicitely ignores lambda_. return block_->call(state, call_frame, args, 0); } } else if(NativeMethod* nm = try_as<NativeMethod>(bound_method_)) { return nm->execute(state, call_frame, nm, G(object), args); } else if(NativeFunction* nf = try_as<NativeFunction>(bound_method_)) { return nf->call(state, args, call_frame); } else { return call(state, call_frame, args); } }
Object* Proc::call_prim(STATE, Executable* exec, CallFrame* call_frame, Dispatch& msg, Arguments& args) { bool lambda_style = !lambda_->nil_p(); int flags = 0; // Check the arity in lambda mode if(lambda_style) { flags = CallFrame::cIsLambda; int required = block_->method()->required_args()->to_native(); // Bug-to-bug compatibility: when required is 0 or 1, we accept any number of // args. Why? No f*****g clue. So thats why we test for 2 here. if(required >= 2 && (size_t)required != args.total()) { Exception* exc = Exception::make_argument_error(state, required, args.total(), state->symbol("__block__")); exc->locations(state, System::vm_backtrace(state, Fixnum::from(0), call_frame)); state->thread_state()->raise_exception(exc); return NULL; } } Object* ret; if(bound_method_->nil_p()) { ret = block_->call(state, call_frame, args, flags); } else { Dispatch dis(G(sym_call)); ret = dis.send(state, call_frame, args); } if(lambda_style && !ret) { RaiseReason reason = state->thread_state()->raise_reason(); if(reason == cReturn || reason == cBreak) { // TODO investigate if we should check the destination_scope here. // It doesn't appear that MRI checks anything similar. ret = state->thread_state()->raise_value(); state->thread_state()->clear_exception(true); } } return ret; }
Object* BlockEnvironment::call_under(STATE, Executable* exec, Module* mod, Arguments& args) { if(args.total() < 3) { Exception* exc = Exception::make_argument_error(state, 3, args.total(), compiled_code()->name()); exc->locations(state, Location::from_call_stack(state)); state->raise_exception(exc); return NULL; } Object* recv = args.shift(state); ConstantScope* constant_scope = as<ConstantScope>(args.shift(state)); Object* visibility_scope = args.shift(state); int flags = CBOOL(visibility_scope) ? CallFrame::cTopLevelVisibility : 0; BlockInvocation invocation(recv, constant_scope, flags); return invoke(state, this, args, invocation); }
Object* VMMethod::debugger_interpreter_continue(STATE, VMMethod* const vmm, CallFrame* const call_frame, int sp, InterpreterState& is, int current_unwind, UnwindInfo* unwinds) { #include "vm/gen/instruction_locations.hpp" GCTokenImpl gct; opcode* stream = vmm->opcodes; Object** stack_ptr = call_frame->stk + sp; continue_to_run: try { #undef DISPATCH #define DISPATCH \ if(Object* bp = call_frame->find_breakpoint(state)) { \ if(!Helpers::yield_debugger(state, gct, call_frame, bp)) goto exception; \ } \ goto *insn_locations[stream[call_frame->inc_ip()]]; #undef next_int #undef cache_ip #undef flush_ip #define next_int ((opcode)(stream[call_frame->inc_ip()])) #define cache_ip(which) #define flush_ip() #include "vm/gen/instruction_implementations.hpp" } catch(TypeError& e) { flush_ip(); Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); call_frame->scope->flush_to_heap(state); return NULL; } catch(const RubyException& exc) { exc.exception->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc.exception); return NULL; } // No reason to be here! rubinius::bug("Control flow error in interpreter"); exception: ThreadState* th = state->vm()->thread_state(); // switch(th->raise_reason()) { case cException: if(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); goto continue_to_run; } else { call_frame->scope->flush_to_heap(state); return NULL; } case cBreak: // If we're trying to break to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { stack_push(th->raise_value()); th->clear_break(); goto continue_to_run; // Don't return here, because we want to loop back to the top // and keep running this method. } // Otherwise, fall through and run the unwinds case cReturn: case cCatchThrow: // Otherwise, we're doing a long return/break unwind through // here. We need to run ensure blocks. while(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; if(info->for_ensure()) { stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); // Don't reset ep here, we're still handling the return/break. goto continue_to_run; } } // Ok, no ensures to run. if(th->raise_reason() == cReturn) { call_frame->scope->flush_to_heap(state); // If we're trying to return to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { Object* val = th->raise_value(); th->clear_return(); return val; } else { // Give control of this exception to the caller. return NULL; } } else { // It's cBreak thats not for us! call_frame->scope->flush_to_heap(state); // Give control of this exception to the caller. return NULL; } case cExit: call_frame->scope->flush_to_heap(state); return NULL; default: break; } // switch rubinius::bug("Control flow error in interpreter"); return NULL; }
Object* VMMethod::interpreter(STATE, VMMethod* const vmm, InterpreterCallFrame* const call_frame) { #include "vm/gen/instruction_locations.hpp" if(unlikely(state == 0)) { VMMethod::instructions = const_cast<void**>(insn_locations); return NULL; } InterpreterState is; GCTokenImpl gct; register void** ip_ptr = vmm->addresses; Object** stack_ptr = call_frame->stk - 1; int current_unwind = 0; UnwindInfo unwinds[kMaxUnwindInfos]; continue_to_run: try { #undef DISPATCH #define DISPATCH goto **ip_ptr++ #undef next_int #define next_int ((opcode)(*ip_ptr++)) #define cache_ip(which) ip_ptr = vmm->addresses + which #define flush_ip() call_frame->calculate_ip(ip_ptr) #include "vm/gen/instruction_implementations.hpp" } catch(TypeError& e) { flush_ip(); Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); call_frame->scope->flush_to_heap(state); return NULL; } catch(const RubyException& exc) { exc.exception->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc.exception); return NULL; } // There is no reason to be here. Either the bytecode loop exits, // or it jumps to exception; rubinius::bug("Control flow error in interpreter"); // If control finds it's way down here, there is an exception. exception: ThreadState* th = state->vm()->thread_state(); // switch(th->raise_reason()) { case cException: if(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); goto continue_to_run; } else { call_frame->scope->flush_to_heap(state); return NULL; } case cBreak: // If we're trying to break to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { stack_push(th->raise_value()); th->clear_break(); goto continue_to_run; // Don't return here, because we want to loop back to the top // and keep running this method. } // Otherwise, fall through and run the unwinds case cReturn: case cCatchThrow: // Otherwise, we're doing a long return/break unwind through // here. We need to run ensure blocks. while(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; if(info->for_ensure()) { stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); // Don't reset ep here, we're still handling the return/break. goto continue_to_run; } } // Ok, no ensures to run. if(th->raise_reason() == cReturn) { call_frame->scope->flush_to_heap(state); // If we're trying to return to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { Object* val = th->raise_value(); th->clear_return(); return val; } else { // Give control of this exception to the caller. return NULL; } } else { // Not for us! call_frame->scope->flush_to_heap(state); // Give control of this exception to the caller. return NULL; } case cExit: call_frame->scope->flush_to_heap(state); return NULL; default: break; } // switch rubinius::bug("Control flow error in interpreter"); return NULL; }
Object* NativeMethod::executor_implementation(STATE, CallFrame* call_frame, Dispatch& msg, Arguments& args) { NativeMethod* nm = as<NativeMethod>(msg.method); int arity = nm->arity()->to_int(); if(arity >= 0 && (size_t)arity != args.total()) { Exception* exc = Exception::make_argument_error( state, arity, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); return NULL; } NativeMethodEnvironment* env = native_method_environment.get(); // Optionally get the handles back to the proper state. if(state->shared.config.capi_global_flush) { capi::Handles* handles = state->shared.cached_handles(); if(handles->size() > 0) { for(capi::Handles::Iterator i(*handles); i.more(); i.advance()) { i->update(env); } } } // Register the CallFrame, because we might GC below this. state->set_call_frame(call_frame); NativeMethodFrame nmf(env->current_native_frame()); CallFrame* saved_frame = env->current_call_frame(); env->set_current_call_frame(call_frame); env->set_current_native_frame(&nmf); // Be sure to do this after installing nmf as the current // native frame. nmf.setup( env->get_handle(args.recv()), env->get_handle(args.block()), env->get_handle(msg.method), env->get_handle(msg.module)); Object* ret; ExceptionPoint ep(env); PLACE_EXCEPTION_POINT(ep); if(unlikely(ep.jumped_to())) { ret = NULL; } else { #ifdef RBX_PROFILER if(unlikely(state->tooling())) { tooling::MethodEntry method(state, msg, args); ret = ArgumentHandler::invoke(state, nm, env, args); } else { ret = ArgumentHandler::invoke(state, nm, env, args); } #else ret = ArgumentHandler::invoke(state, nm, env, args); #endif } env->set_current_call_frame(saved_frame); env->set_current_native_frame(nmf.previous()); ep.pop(env); // Handle any signals that occurred while the native method // was running. if(!state->check_async(call_frame)) return NULL; return ret; }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->machine_code() because it might lock and the work should // already be done. MachineCode* const mcode = env->compiled_code_->machine_code(); if(!mcode) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_threshold_compile) { OnStack<1> os(state, env); G(jit)->compile_soon(state, env->compiled_code(), previous, invocation.self->direct_class(state), env, true); } else { mcode->call_count++; } } #endif StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); Module* mod = invocation.module; if(!mod) mod = env->module(); Object* block = cNil; if(VariableScope* scope = env->top_scope_) { if(!scope->nil_p()) block = scope->block(); } scope->initialize(invocation.self, block, mod, mcode->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = invocation.constant_scope; frame->arguments = &args; frame->dispatch_data = env; frame->compiled_code = env->compiled_code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cMultipleScopes | CallFrame::cBlock; if(!GenericArguments::call(state, frame, mcode, scope, args, invocation.flags)) { if(state->vm()->thread_state()->raise_reason() == cNone) { Exception* exc = Exception::make_argument_error(state, mcode->required_args, args.total(), mcode->name()); exc->locations(state, Location::from_call_stack(state, previous)); state->raise_exception(exc); } return NULL; } #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->singleton())) { mod = ma; } } OnStack<2> os(state, env, mod); // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::BlockEntry method(state, env, mod); return (*mcode->run)(state, mcode, frame); } else { // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); } #else // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); #endif }