Tuple* System::vm_thread_state(STATE) { ThreadState* ts = state->thread_state(); Tuple* tuple = Tuple::create(state, 5); Symbol* reason = 0; switch(ts->raise_reason()) { case cNone: reason = state->symbol("none"); break; case cException: reason = state->symbol("exception"); break; case cReturn: reason = state->symbol("return"); break; case cBreak: reason = state->symbol("break"); break; case cExit: reason = state->symbol("exit"); break; case cCatchThrow: reason = state->symbol("catch_throw"); break; default: reason = state->symbol("unknown"); } tuple->put(state, 0, reason); tuple->put(state, 1, ts->raise_value()); tuple->put(state, 2, ts->destination_scope()); tuple->put(state, 3, ts->current_exception()); tuple->put(state, 4, ts->throw_dest()); return tuple; }
Object* VMMethod::interpreter(STATE, VMMethod* const vmm, InterpreterCallFrame* const call_frame) { #include "vm/gen/instruction_locations.hpp" if(unlikely(state == 0)) { VMMethod::instructions = const_cast<void**>(insn_locations); return NULL; } InterpreterState is; GCTokenImpl gct; register void** ip_ptr = vmm->addresses; Object** stack_ptr = call_frame->stk - 1; int current_unwind = 0; UnwindInfo unwinds[kMaxUnwindInfos]; continue_to_run: try { #undef DISPATCH #define DISPATCH goto **ip_ptr++ #undef next_int #define next_int ((opcode)(*ip_ptr++)) #define cache_ip(which) ip_ptr = vmm->addresses + which #define flush_ip() call_frame->calculate_ip(ip_ptr) #include "vm/gen/instruction_implementations.hpp" } catch(TypeError& e) { flush_ip(); Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); call_frame->scope->flush_to_heap(state); return NULL; } catch(const RubyException& exc) { exc.exception->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc.exception); return NULL; } // There is no reason to be here. Either the bytecode loop exits, // or it jumps to exception; rubinius::bug("Control flow error in interpreter"); // If control finds it's way down here, there is an exception. exception: ThreadState* th = state->vm()->thread_state(); // switch(th->raise_reason()) { case cException: if(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); goto continue_to_run; } else { call_frame->scope->flush_to_heap(state); return NULL; } case cBreak: // If we're trying to break to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { stack_push(th->raise_value()); th->clear_break(); goto continue_to_run; // Don't return here, because we want to loop back to the top // and keep running this method. } // Otherwise, fall through and run the unwinds case cReturn: case cCatchThrow: // Otherwise, we're doing a long return/break unwind through // here. We need to run ensure blocks. while(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; if(info->for_ensure()) { stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); // Don't reset ep here, we're still handling the return/break. goto continue_to_run; } } // Ok, no ensures to run. if(th->raise_reason() == cReturn) { call_frame->scope->flush_to_heap(state); // If we're trying to return to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { Object* val = th->raise_value(); th->clear_return(); return val; } else { // Give control of this exception to the caller. return NULL; } } else { // Not for us! call_frame->scope->flush_to_heap(state); // Give control of this exception to the caller. return NULL; } case cExit: call_frame->scope->flush_to_heap(state); return NULL; default: break; } // switch rubinius::bug("Control flow error in interpreter"); return NULL; }
Object* VMMethod::debugger_interpreter_continue(STATE, VMMethod* const vmm, CallFrame* const call_frame, int sp, InterpreterState& is, int current_unwind, UnwindInfo* unwinds) { #include "vm/gen/instruction_locations.hpp" GCTokenImpl gct; opcode* stream = vmm->opcodes; Object** stack_ptr = call_frame->stk + sp; continue_to_run: try { #undef DISPATCH #define DISPATCH \ if(Object* bp = call_frame->find_breakpoint(state)) { \ if(!Helpers::yield_debugger(state, gct, call_frame, bp)) goto exception; \ } \ goto *insn_locations[stream[call_frame->inc_ip()]]; #undef next_int #undef cache_ip #undef flush_ip #define next_int ((opcode)(stream[call_frame->inc_ip()])) #define cache_ip(which) #define flush_ip() #include "vm/gen/instruction_implementations.hpp" } catch(TypeError& e) { flush_ip(); Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); call_frame->scope->flush_to_heap(state); return NULL; } catch(const RubyException& exc) { exc.exception->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc.exception); return NULL; } // No reason to be here! rubinius::bug("Control flow error in interpreter"); exception: ThreadState* th = state->vm()->thread_state(); // switch(th->raise_reason()) { case cException: if(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); goto continue_to_run; } else { call_frame->scope->flush_to_heap(state); return NULL; } case cBreak: // If we're trying to break to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { stack_push(th->raise_value()); th->clear_break(); goto continue_to_run; // Don't return here, because we want to loop back to the top // and keep running this method. } // Otherwise, fall through and run the unwinds case cReturn: case cCatchThrow: // Otherwise, we're doing a long return/break unwind through // here. We need to run ensure blocks. while(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; if(info->for_ensure()) { stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); // Don't reset ep here, we're still handling the return/break. goto continue_to_run; } } // Ok, no ensures to run. if(th->raise_reason() == cReturn) { call_frame->scope->flush_to_heap(state); // If we're trying to return to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { Object* val = th->raise_value(); th->clear_return(); return val; } else { // Give control of this exception to the caller. return NULL; } } else { // It's cBreak thats not for us! call_frame->scope->flush_to_heap(state); // Give control of this exception to the caller. return NULL; } case cExit: call_frame->scope->flush_to_heap(state); return NULL; default: break; } // switch rubinius::bug("Control flow error in interpreter"); return NULL; }
Object* VMMethod::uncommon_interpreter(STATE, VMMethod* const vmm, CallFrame* const call_frame, int32_t entry_ip, native_int sp, CallFrame* const method_call_frame, jit::RuntimeDataHolder* rd, int32_t unwind_count, int32_t* input_unwinds) { VMMethod* method_vmm = method_call_frame->cm->backend_method(); if(++method_vmm->uncommon_count > state->shared().config.jit_deoptimize_threshold) { if(state->shared().config.jit_uncommon_print) { std::cerr << "[[[ Deoptimizing uncommon method ]]]\n"; call_frame->print_backtrace(state); std::cerr << "Method Call Frame:\n"; method_call_frame->print_backtrace(state); } method_vmm->uncommon_count = 0; method_vmm->deoptimize(state, method_call_frame->cm, rd); } #include "vm/gen/instruction_locations.hpp" opcode* stream = vmm->opcodes; InterpreterState is; GCTokenImpl gct; Object** stack_ptr = call_frame->stk + sp; int current_unwind = unwind_count; UnwindInfo unwinds[kMaxUnwindInfos]; for(int i = 0, j = 0; j < unwind_count; i += 3, j++) { UnwindInfo& uw = unwinds[j]; uw.target_ip = input_unwinds[i]; uw.stack_depth = input_unwinds[i + 1]; uw.type = (UnwindType)input_unwinds[i + 2]; } continue_to_run: try { #undef DISPATCH #define DISPATCH goto *insn_locations[stream[call_frame->inc_ip()]]; #undef next_int #undef cache_ip #undef flush_ip #define next_int ((opcode)(stream[call_frame->inc_ip()])) #define cache_ip(which) #define flush_ip() #include "vm/gen/instruction_implementations.hpp" } catch(TypeError& e) { flush_ip(); Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc); call_frame->scope->flush_to_heap(state); return NULL; } catch(const RubyException& exc) { exc.exception->locations(state, Location::from_call_stack(state, call_frame)); state->raise_exception(exc.exception); return NULL; } // No reason to be here! rubinius::bug("Control flow error in interpreter"); exception: ThreadState* th = state->vm()->thread_state(); // switch(th->raise_reason()) { case cException: if(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); goto continue_to_run; } else { call_frame->scope->flush_to_heap(state); return NULL; } case cBreak: // If we're trying to break to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { stack_push(th->raise_value()); th->clear_break(); goto continue_to_run; // Don't return here, because we want to loop back to the top // and keep running this method. } // Otherwise, fall through and run the unwinds case cReturn: case cCatchThrow: // Otherwise, we're doing a long return/break unwind through // here. We need to run ensure blocks. while(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; if(info->for_ensure()) { stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); // Don't reset ep here, we're still handling the return/break. goto continue_to_run; } } // Ok, no ensures to run. if(th->raise_reason() == cReturn) { call_frame->scope->flush_to_heap(state); // If we're trying to return to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { Object* val = th->raise_value(); th->clear_return(); return val; } else { // Give control of this exception to the caller. return NULL; } } else { // It's cBreak thats not for us! call_frame->scope->flush_to_heap(state); // Give control of this exception to the caller. return NULL; } case cExit: call_frame->scope->flush_to_heap(state); return NULL; default: break; } // switch rubinius::bug("Control flow error in interpreter"); return NULL; }
/* The debugger interpreter loop is used to run a method when a breakpoint * has been set. It has additional overhead, since it needs to inspect * each opcode for the breakpoint flag. It is installed on the VMMethod when * a breakpoint is set on compiled method. */ Object* VMMethod::debugger_interpreter(STATE, VMMethod* const vmm, InterpreterCallFrame* const call_frame) { #include "vm/gen/instruction_locations.hpp" opcode* stream = vmm->opcodes; InterpreterState is; int current_unwind = 0; UnwindInfo unwinds[kMaxUnwindInfos]; // TODO: ug, cut and paste of the whole interpreter above. Needs to be fast, // maybe could use a function template? // // The only thing different is the DISPATCH macro, to check for debugging // instructions. Object** stack_ptr = call_frame->stk - 1; continue_to_run: try { #undef DISPATCH #define DISPATCH \ if(Object* bp = call_frame->find_breakpoint(state)) { \ if(!Helpers::yield_debugger(state, call_frame, bp)) goto exception; \ } \ goto *insn_locations[stream[call_frame->inc_ip()]]; #undef next_int #undef cache_ip #undef flush_ip #define next_int ((opcode)(stream[call_frame->inc_ip()])) #define cache_ip(which) #define flush_ip() #include "vm/gen/instruction_implementations.hpp" } catch(TypeError& e) { flush_ip(); Exception* exc = Exception::make_type_error(state, e.type, e.object, e.reason); exc->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc); call_frame->scope->flush_to_heap(state); return NULL; } catch(const RubyException& exc) { exc.exception->locations(state, Location::from_call_stack(state, call_frame)); state->thread_state()->raise_exception(exc.exception); return NULL; } // no reason to be here! abort(); // If control finds it's way down here, there is an exception. exception: ThreadState* th = state->thread_state(); // switch(th->raise_reason()) { case cException: if(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); goto continue_to_run; } else { call_frame->scope->flush_to_heap(state); return NULL; } case cBreak: // If we're trying to break to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { stack_push(th->raise_value()); th->clear_break(); goto continue_to_run; // Don't return here, because we want to loop back to the top // and keep running this method. } // Otherwise, fall through and run the unwinds case cReturn: case cCatchThrow: // Otherwise, we're doing a long return/break unwind through // here. We need to run ensure blocks. while(current_unwind > 0) { UnwindInfo* info = &unwinds[--current_unwind]; stack_position(info->stack_depth); if(info->for_ensure()) { stack_position(info->stack_depth); call_frame->set_ip(info->target_ip); cache_ip(info->target_ip); // Don't reset ep here, we're still handling the return/break. goto continue_to_run; } } // Ok, no ensures to run. if(th->raise_reason() == cReturn) { call_frame->scope->flush_to_heap(state); // If we're trying to return to here, we're done! if(th->destination_scope() == call_frame->scope->on_heap()) { Object* val = th->raise_value(); th->clear_return(); return val; } else { // Give control of this exception to the caller. return NULL; } } else { // It's cBreak thats not for us! call_frame->scope->flush_to_heap(state); // Give control of this exception to the caller. return NULL; } case cExit: call_frame->scope->flush_to_heap(state); return NULL; default: break; } // switch std::cout << "bug!\n"; call_frame->print_backtrace(state); abort(); return NULL; }