示例#1
0
// Prints one unwind code. Because an unwind code can occupy up to 3 slots in
// the unwind codes array, this function requires that the correct number of
// slots is provided.
void Dumper::printUnwindCode(const UnwindInfo& UI, ArrayRef<UnwindCode> UC) {
  assert(UC.size() >= getNumUsedSlots(UC[0]));

  SW.startLine() << format("0x%02X: ", unsigned(UC[0].u.CodeOffset))
                 << getUnwindCodeTypeName(UC[0].getUnwindOp());

  switch (UC[0].getUnwindOp()) {
  case UOP_PushNonVol:
    OS << " reg=" << getUnwindRegisterName(UC[0].getOpInfo());
    break;

  case UOP_AllocLarge:
    OS << " size="
       << ((UC[0].getOpInfo() == 0) ? UC[1].FrameOffset * 8
                                    : getLargeSlotValue(UC));
    break;

  case UOP_AllocSmall:
    OS << " size=" << (UC[0].getOpInfo() + 1) * 8;
    break;

  case UOP_SetFPReg:
    if (UI.getFrameRegister() == 0)
      OS << " reg=<invalid>";
    else
      OS << " reg=" << getUnwindRegisterName(UI.getFrameRegister())
         << format(", offset=0x%X", UI.getFrameOffset() * 16);
    break;

  case UOP_SaveNonVol:
    OS << " reg=" << getUnwindRegisterName(UC[0].getOpInfo())
       << format(", offset=0x%X", UC[1].FrameOffset * 8);
    break;

  case UOP_SaveNonVolBig:
    OS << " reg=" << getUnwindRegisterName(UC[0].getOpInfo())
       << format(", offset=0x%X", getLargeSlotValue(UC));
    break;

  case UOP_SaveXMM128:
    OS << " reg=XMM" << static_cast<uint32_t>(UC[0].getOpInfo())
       << format(", offset=0x%X", UC[1].FrameOffset * 16);
    break;

  case UOP_SaveXMM128Big:
    OS << " reg=XMM" << static_cast<uint32_t>(UC[0].getOpInfo())
       << format(", offset=0x%X", getLargeSlotValue(UC));
    break;

  case UOP_PushMachFrame:
    OS << " errcode=" << (UC[0].getOpInfo() == 0 ? "no" : "yes");
    break;
  }

  OS << "\n";
}
示例#2
0
void Dumper::printUnwindInfo(const Context &Ctx, const coff_section *Section,
                             off_t Offset, const UnwindInfo &UI) {
  DictScope UIS(SW, "UnwindInfo");
  SW.printNumber("Version", UI.getVersion());
  SW.printFlags("Flags", UI.getFlags(), makeArrayRef(UnwindFlags));
  SW.printNumber("PrologSize", UI.PrologSize);
  if (UI.getFrameRegister()) {
    SW.printEnum("FrameRegister", UI.getFrameRegister(),
                 makeArrayRef(UnwindOpInfo));
    SW.printHex("FrameOffset", UI.getFrameOffset());
  } else {
    SW.printString("FrameRegister", StringRef("-"));
    SW.printString("FrameOffset", StringRef("-"));
  }

  SW.printNumber("UnwindCodeCount", UI.NumCodes);
  {
    ListScope UCS(SW, "UnwindCodes");
    ArrayRef<UnwindCode> UC(&UI.UnwindCodes[0], UI.NumCodes);
    for (const UnwindCode *UCI = UC.begin(), *UCE = UC.end(); UCI < UCE; ++UCI) {
      unsigned UsedSlots = getNumUsedSlots(*UCI);
      if (UsedSlots > UC.size()) {
        errs() << "corrupt unwind data";
        return;
      }

      printUnwindCode(UI, ArrayRef<UnwindCode>(UCI, UCE));
      UCI = UCI + UsedSlots - 1;
    }
  }

  uint64_t LSDAOffset = Offset + getOffsetOfLSDA(UI);
  if (UI.getFlags() & (UNW_ExceptionHandler | UNW_TerminateHandler)) {
    SW.printString("Handler",
                   formatSymbol(Ctx, Section, LSDAOffset,
                                UI.getLanguageSpecificHandlerOffset()));
  } else if (UI.getFlags() & UNW_ChainInfo) {
    if (const RuntimeFunction *Chained = UI.getChainedFunctionEntry()) {
      DictScope CS(SW, "Chained");
      printRuntimeFunctionEntry(Ctx, Section, LSDAOffset, *Chained);
    }
  }
}
示例#3
0
Object* VMMethod::interpreter(STATE,
                              VMMethod* const vmm,
                              InterpreterCallFrame* const call_frame)
{

#include "vm/gen/instruction_locations.hpp"

  if(unlikely(state == 0)) {
    VMMethod::instructions = const_cast<void**>(insn_locations);
    return NULL;
  }

  InterpreterState is;
  GCTokenImpl gct;

  register void** ip_ptr = vmm->addresses;

  Object** stack_ptr = call_frame->stk - 1;

  int current_unwind = 0;
  UnwindInfo unwinds[kMaxUnwindInfos];

continue_to_run:
  try {

#undef DISPATCH
#define DISPATCH goto **ip_ptr++

#undef next_int
#define next_int ((opcode)(*ip_ptr++))

#define cache_ip(which) ip_ptr = vmm->addresses + which
#define flush_ip() call_frame->calculate_ip(ip_ptr)

#include "vm/gen/instruction_implementations.hpp"

  } catch(TypeError& e) {
    flush_ip();
    Exception* exc =
      Exception::make_type_error(state, e.type, e.object, e.reason);
    exc->locations(state, Location::from_call_stack(state, call_frame));

    state->raise_exception(exc);
    call_frame->scope->flush_to_heap(state);
    return NULL;
  } catch(const RubyException& exc) {
    exc.exception->locations(state,
          Location::from_call_stack(state, call_frame));
    state->raise_exception(exc.exception);
    return NULL;
  }

  // There is no reason to be here. Either the bytecode loop exits,
  // or it jumps to exception;
  rubinius::bug("Control flow error in interpreter");

  // If control finds it's way down here, there is an exception.
exception:
  ThreadState* th = state->vm()->thread_state();
  //
  switch(th->raise_reason()) {
  case cException:
    if(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      stack_position(info->stack_depth);
      call_frame->set_ip(info->target_ip);
      cache_ip(info->target_ip);
      goto continue_to_run;
    } else {
      call_frame->scope->flush_to_heap(state);
      return NULL;
    }

  case cBreak:
    // If we're trying to break to here, we're done!
    if(th->destination_scope() == call_frame->scope->on_heap()) {
      stack_push(th->raise_value());
      th->clear_break();
      goto continue_to_run;
      // Don't return here, because we want to loop back to the top
      // and keep running this method.
    }

    // Otherwise, fall through and run the unwinds
  case cReturn:
  case cCatchThrow:
    // Otherwise, we're doing a long return/break unwind through
    // here. We need to run ensure blocks.
    while(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      if(info->for_ensure()) {
        stack_position(info->stack_depth);
        call_frame->set_ip(info->target_ip);
        cache_ip(info->target_ip);

        // Don't reset ep here, we're still handling the return/break.
        goto continue_to_run;
      }
    }

    // Ok, no ensures to run.
    if(th->raise_reason() == cReturn) {
      call_frame->scope->flush_to_heap(state);

      // If we're trying to return to here, we're done!
      if(th->destination_scope() == call_frame->scope->on_heap()) {
        Object* val = th->raise_value();
        th->clear_return();
        return val;
      } else {
        // Give control of this exception to the caller.
        return NULL;
      }

    } else { // Not for us!
      call_frame->scope->flush_to_heap(state);
      // Give control of this exception to the caller.
      return NULL;
    }

  case cExit:
    call_frame->scope->flush_to_heap(state);
    return NULL;
  default:
    break;
  } // switch

  rubinius::bug("Control flow error in interpreter");
  return NULL;
}
示例#4
0
Object* VMMethod::debugger_interpreter_continue(STATE,
                                       VMMethod* const vmm,
                                       CallFrame* const call_frame,
                                       int sp,
                                       InterpreterState& is,
                                       int current_unwind,
                                       UnwindInfo* unwinds)
{

#include "vm/gen/instruction_locations.hpp"

  GCTokenImpl gct;
  opcode* stream = vmm->opcodes;

  Object** stack_ptr = call_frame->stk + sp;

continue_to_run:
  try {

#undef DISPATCH
#define DISPATCH \
    if(Object* bp = call_frame->find_breakpoint(state)) { \
      if(!Helpers::yield_debugger(state, gct, call_frame, bp)) goto exception; \
    } \
    goto *insn_locations[stream[call_frame->inc_ip()]];

#undef next_int
#undef cache_ip
#undef flush_ip

#define next_int ((opcode)(stream[call_frame->inc_ip()]))
#define cache_ip(which)
#define flush_ip()

#include "vm/gen/instruction_implementations.hpp"

  } catch(TypeError& e) {
    flush_ip();
    Exception* exc =
      Exception::make_type_error(state, e.type, e.object, e.reason);
    exc->locations(state, Location::from_call_stack(state, call_frame));

    state->raise_exception(exc);
    call_frame->scope->flush_to_heap(state);
    return NULL;
  } catch(const RubyException& exc) {
    exc.exception->locations(state,
          Location::from_call_stack(state, call_frame));
    state->raise_exception(exc.exception);
    return NULL;
  }

  // No reason to be here!
  rubinius::bug("Control flow error in interpreter");

exception:
  ThreadState* th = state->vm()->thread_state();
  //
  switch(th->raise_reason()) {
  case cException:
    if(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      stack_position(info->stack_depth);
      call_frame->set_ip(info->target_ip);
      cache_ip(info->target_ip);
      goto continue_to_run;
    } else {
      call_frame->scope->flush_to_heap(state);
      return NULL;
    }

  case cBreak:
    // If we're trying to break to here, we're done!
    if(th->destination_scope() == call_frame->scope->on_heap()) {
      stack_push(th->raise_value());
      th->clear_break();
      goto continue_to_run;
      // Don't return here, because we want to loop back to the top
      // and keep running this method.
    }

    // Otherwise, fall through and run the unwinds
  case cReturn:
  case cCatchThrow:
    // Otherwise, we're doing a long return/break unwind through
    // here. We need to run ensure blocks.
    while(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      if(info->for_ensure()) {
        stack_position(info->stack_depth);
        call_frame->set_ip(info->target_ip);
        cache_ip(info->target_ip);

        // Don't reset ep here, we're still handling the return/break.
        goto continue_to_run;
      }
    }

    // Ok, no ensures to run.
    if(th->raise_reason() == cReturn) {
      call_frame->scope->flush_to_heap(state);

      // If we're trying to return to here, we're done!
      if(th->destination_scope() == call_frame->scope->on_heap()) {
        Object* val = th->raise_value();
        th->clear_return();
        return val;
      } else {
        // Give control of this exception to the caller.
        return NULL;
      }

    } else { // It's cBreak thats not for us!
      call_frame->scope->flush_to_heap(state);
      // Give control of this exception to the caller.
      return NULL;
    }

  case cExit:
    call_frame->scope->flush_to_heap(state);
    return NULL;
  default:
    break;
  } // switch

  rubinius::bug("Control flow error in interpreter");
  return NULL;
}
示例#5
0
Object* VMMethod::uncommon_interpreter(STATE,
                                       VMMethod* const vmm,
                                       CallFrame* const call_frame,
                                       int32_t entry_ip,
                                       native_int sp,
                                       CallFrame* const method_call_frame,
                                       jit::RuntimeDataHolder* rd,
                                       int32_t unwind_count,
                                       int32_t* input_unwinds)
{

  VMMethod* method_vmm = method_call_frame->cm->backend_method();

  if(++method_vmm->uncommon_count > state->shared().config.jit_deoptimize_threshold) {
    if(state->shared().config.jit_uncommon_print) {
      std::cerr << "[[[ Deoptimizing uncommon method ]]]\n";
      call_frame->print_backtrace(state);

      std::cerr << "Method Call Frame:\n";
      method_call_frame->print_backtrace(state);
    }

    method_vmm->uncommon_count = 0;
    method_vmm->deoptimize(state, method_call_frame->cm, rd);
  }

#include "vm/gen/instruction_locations.hpp"

  opcode* stream = vmm->opcodes;
  InterpreterState is;
  GCTokenImpl gct;

  Object** stack_ptr = call_frame->stk + sp;

  int current_unwind = unwind_count;
  UnwindInfo unwinds[kMaxUnwindInfos];

  for(int i = 0, j = 0; j < unwind_count; i += 3, j++) {
    UnwindInfo& uw = unwinds[j];
    uw.target_ip = input_unwinds[i];
    uw.stack_depth = input_unwinds[i + 1];
    uw.type = (UnwindType)input_unwinds[i + 2];
  }

continue_to_run:
  try {

#undef DISPATCH
#define DISPATCH goto *insn_locations[stream[call_frame->inc_ip()]];

#undef next_int
#undef cache_ip
#undef flush_ip

#define next_int ((opcode)(stream[call_frame->inc_ip()]))
#define cache_ip(which)
#define flush_ip()

#include "vm/gen/instruction_implementations.hpp"

  } catch(TypeError& e) {
    flush_ip();
    Exception* exc =
      Exception::make_type_error(state, e.type, e.object, e.reason);
    exc->locations(state, Location::from_call_stack(state, call_frame));

    state->raise_exception(exc);
    call_frame->scope->flush_to_heap(state);
    return NULL;
  } catch(const RubyException& exc) {
    exc.exception->locations(state,
          Location::from_call_stack(state, call_frame));
    state->raise_exception(exc.exception);
    return NULL;
  }

  // No reason to be here!
  rubinius::bug("Control flow error in interpreter");

exception:
  ThreadState* th = state->vm()->thread_state();
  //
  switch(th->raise_reason()) {
  case cException:
    if(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      stack_position(info->stack_depth);
      call_frame->set_ip(info->target_ip);
      cache_ip(info->target_ip);
      goto continue_to_run;
    } else {
      call_frame->scope->flush_to_heap(state);
      return NULL;
    }

  case cBreak:
    // If we're trying to break to here, we're done!
    if(th->destination_scope() == call_frame->scope->on_heap()) {
      stack_push(th->raise_value());
      th->clear_break();
      goto continue_to_run;
      // Don't return here, because we want to loop back to the top
      // and keep running this method.
    }

    // Otherwise, fall through and run the unwinds
  case cReturn:
  case cCatchThrow:
    // Otherwise, we're doing a long return/break unwind through
    // here. We need to run ensure blocks.
    while(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      if(info->for_ensure()) {
        stack_position(info->stack_depth);
        call_frame->set_ip(info->target_ip);
        cache_ip(info->target_ip);

        // Don't reset ep here, we're still handling the return/break.
        goto continue_to_run;
      }
    }

    // Ok, no ensures to run.
    if(th->raise_reason() == cReturn) {
      call_frame->scope->flush_to_heap(state);

      // If we're trying to return to here, we're done!
      if(th->destination_scope() == call_frame->scope->on_heap()) {
        Object* val = th->raise_value();
        th->clear_return();
        return val;
      } else {
        // Give control of this exception to the caller.
        return NULL;
      }

    } else { // It's cBreak thats not for us!
      call_frame->scope->flush_to_heap(state);
      // Give control of this exception to the caller.
      return NULL;
    }

  case cExit:
    call_frame->scope->flush_to_heap(state);
    return NULL;
  default:
    break;
  } // switch

  rubinius::bug("Control flow error in interpreter");
  return NULL;
}
示例#6
0
/* The debugger interpreter loop is used to run a method when a breakpoint
 * has been set. It has additional overhead, since it needs to inspect
 * each opcode for the breakpoint flag. It is installed on the VMMethod when
 * a breakpoint is set on compiled method.
 */
Object* VMMethod::debugger_interpreter(STATE,
                                       VMMethod* const vmm,
                                       InterpreterCallFrame* const call_frame)
{

#include "vm/gen/instruction_locations.hpp"

  opcode* stream = vmm->opcodes;
  InterpreterState is;

  int current_unwind = 0;
  UnwindInfo unwinds[kMaxUnwindInfos];

  // TODO: ug, cut and paste of the whole interpreter above. Needs to be fast,
  // maybe could use a function template?
  //
  // The only thing different is the DISPATCH macro, to check for debugging
  // instructions.

  Object** stack_ptr = call_frame->stk - 1;

continue_to_run:
  try {

#undef DISPATCH
#define DISPATCH \
    if(Object* bp = call_frame->find_breakpoint(state)) { \
      if(!Helpers::yield_debugger(state, call_frame, bp)) goto exception; \
    } \
    goto *insn_locations[stream[call_frame->inc_ip()]];

#undef next_int
#undef cache_ip
#undef flush_ip

#define next_int ((opcode)(stream[call_frame->inc_ip()]))
#define cache_ip(which)
#define flush_ip()

#include "vm/gen/instruction_implementations.hpp"

  } catch(TypeError& e) {
    flush_ip();
    Exception* exc =
      Exception::make_type_error(state, e.type, e.object, e.reason);
    exc->locations(state, Location::from_call_stack(state, call_frame));

    state->thread_state()->raise_exception(exc);
    call_frame->scope->flush_to_heap(state);
    return NULL;
  } catch(const RubyException& exc) {
    exc.exception->locations(state,
          Location::from_call_stack(state, call_frame));
    state->thread_state()->raise_exception(exc.exception);
    return NULL;
  }

  // no reason to be here!
  abort();

  // If control finds it's way down here, there is an exception.
exception:
  ThreadState* th = state->thread_state();
  //
  switch(th->raise_reason()) {
  case cException:
    if(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      stack_position(info->stack_depth);
      call_frame->set_ip(info->target_ip);
      cache_ip(info->target_ip);
      goto continue_to_run;
    } else {
      call_frame->scope->flush_to_heap(state);
      return NULL;
    }

  case cBreak:
    // If we're trying to break to here, we're done!
    if(th->destination_scope() == call_frame->scope->on_heap()) {
      stack_push(th->raise_value());
      th->clear_break();
      goto continue_to_run;
      // Don't return here, because we want to loop back to the top
      // and keep running this method.
    }

    // Otherwise, fall through and run the unwinds
  case cReturn:
  case cCatchThrow:
    // Otherwise, we're doing a long return/break unwind through
    // here. We need to run ensure blocks.
    while(current_unwind > 0) {
      UnwindInfo* info = &unwinds[--current_unwind];
      stack_position(info->stack_depth);

      if(info->for_ensure()) {
        stack_position(info->stack_depth);
        call_frame->set_ip(info->target_ip);
        cache_ip(info->target_ip);

        // Don't reset ep here, we're still handling the return/break.
        goto continue_to_run;
      }
    }

    // Ok, no ensures to run.
    if(th->raise_reason() == cReturn) {
      call_frame->scope->flush_to_heap(state);

      // If we're trying to return to here, we're done!
      if(th->destination_scope() == call_frame->scope->on_heap()) {
        Object* val = th->raise_value();
        th->clear_return();
        return val;
      } else {
        // Give control of this exception to the caller.
        return NULL;
      }

    } else { // It's cBreak thats not for us!
      call_frame->scope->flush_to_heap(state);
      // Give control of this exception to the caller.
      return NULL;
    }

  case cExit:
    call_frame->scope->flush_to_heap(state);
    return NULL;
  default:
    break;
  } // switch

  std::cout << "bug!\n";
  call_frame->print_backtrace(state);
  abort();
  return NULL;
}
示例#7
0
static uint64_t getOffsetOfLSDA(const UnwindInfo& UI) {
  return static_cast<const char*>(UI.getLanguageSpecificData())
         - reinterpret_cast<const char*>(&UI);
}
示例#8
0
/* The tooling interpreter calls the registered tool for every instruction
 * executed.
 */
Object* MachineCode::tooling_interpreter(STATE,
                                         MachineCode* const mcode,
                                         InterpreterCallFrame* const call_frame)
{

#include "vm/gen/instruction_locations.hpp"

  opcode* stream = mcode->opcodes;
  InterpreterState is;
  GCTokenImpl gct;

  UnwindInfoSet unwinds;

  Object** stack_ptr = call_frame->stk - 1;

continue_to_run:
  try {

#undef DISPATCH
#define DISPATCH \
    state->shared().tool_broker()->at_ip(state, mcode, call_frame->ip()); \
    goto *insn_locations[stream[call_frame->inc_ip()]];

#undef next_int
#undef cache_ip
#undef flush_ip

#define next_int ((opcode)(stream[call_frame->inc_ip()]))
#define cache_ip(which)
#define flush_ip()

#include "vm/gen/instruction_implementations.hpp"

  } catch(TypeError& e) {
    flush_ip();
    Exception* exc =
      Exception::make_type_error(state, e.type, e.object, e.reason);
    exc->locations(state, Location::from_call_stack(state, call_frame));

    state->raise_exception(exc);
    call_frame->scope->flush_to_heap(state);
    return NULL;
  } catch(const RubyException& exc) {
    exc.exception->locations(state,
          Location::from_call_stack(state, call_frame));
    state->raise_exception(exc.exception);
    return NULL;
  }

  // no reason to be here!
  rubinius::bug("Control flow error in interpreter");

  // If control finds it's way down here, there is an exception.
exception:
  VMThreadState* th = state->vm()->thread_state();
  //
  switch(th->raise_reason()) {
  case cException:
    if(unwinds.has_unwinds()) {
      UnwindInfo info = unwinds.pop();
      stack_position(info.stack_depth);
      call_frame->set_ip(info.target_ip);
      cache_ip(info.target_ip);
      goto continue_to_run;
    } else {
      call_frame->scope->flush_to_heap(state);
      return NULL;
    }

  case cBreak:
    // If we're trying to break to here, we're done!
    if(th->destination_scope() == call_frame->scope->on_heap()) {
      stack_push(th->raise_value());
      th->clear_break();
      goto continue_to_run;
      // Don't return here, because we want to loop back to the top
      // and keep running this method.
    }

    // Otherwise, fall through and run the unwinds
  case cReturn:
  case cCatchThrow:
  case cThreadKill:
    // Otherwise, we're doing a long return/break unwind through
    // here. We need to run ensure blocks.
    while(unwinds.has_unwinds()) {
      UnwindInfo info = unwinds.pop();
      stack_position(info.stack_depth);

      if(info.for_ensure()) {
        stack_position(info.stack_depth);
        call_frame->set_ip(info.target_ip);
        cache_ip(info.target_ip);

        // Don't reset ep here, we're still handling the return/break.
        goto continue_to_run;
      }
    }

    // Ok, no ensures to run.
    if(th->raise_reason() == cReturn) {
      call_frame->scope->flush_to_heap(state);

      // If we're trying to return to here, we're done!
      if(th->destination_scope() == call_frame->scope->on_heap()) {
        Object* val = th->raise_value();
        th->clear_return();
        return val;
      } else {
        // Give control of this exception to the caller.
        return NULL;
      }

    } else { // It's cBreak thats not for us!
      call_frame->scope->flush_to_heap(state);
      // Give control of this exception to the caller.
      return NULL;
    }

  case cExit:
    call_frame->scope->flush_to_heap(state);
    return NULL;
  default:
    break;
  } // switch

  rubinius::bug("Control flow error in interpreter");
  return NULL;
}