Beispiel #1
0
GrowableArray<MonitorInfo*>* interpretedVFrame::monitors() const {
  GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(5);
  for (BasicObjectLock* current = (fr().previous_monitor_in_interpreter_frame(fr().interpreter_frame_monitor_begin()));
       current >= fr().interpreter_frame_monitor_end();
       current = fr().previous_monitor_in_interpreter_frame(current)) {
    result->push(new MonitorInfo(current->obj(), current->lock(), false, false));
  }
  return result;
}
Beispiel #2
0
InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
  JavaThread *thread = (JavaThread *) THREAD;
  ZeroStack *stack = thread->zero_stack();

  // Calculate the size of the frame we'll build, including
  // any adjustments to the caller's frame that we'll make.
  int extra_locals  = 0;
  int monitor_words = 0;
  int stack_words   = 0;

  if (!method->is_native()) {
    extra_locals = method->max_locals() - method->size_of_parameters();
    stack_words  = method->max_stack();
  }
  if (method->is_synchronized()) {
    monitor_words = frame::interpreter_frame_monitor_size();
  }
  stack->overflow_check(
    extra_locals + header_words + monitor_words + stack_words, CHECK_NULL);

  // Adjust the caller's stack frame to accomodate any additional
  // local variables we have contiguously with our parameters.
  for (int i = 0; i < extra_locals; i++)
    stack->push(0);

  intptr_t *locals;
  if (method->is_native())
    locals = stack->sp() + (method->size_of_parameters() - 1);
  else
    locals = stack->sp() + (method->max_locals() - 1);

  stack->push(0); // next_frame, filled in later
  intptr_t *fp = stack->sp();
  assert(fp - stack->sp() == next_frame_off, "should be");

  stack->push(INTERPRETER_FRAME);
  assert(fp - stack->sp() == frame_type_off, "should be");

  interpreterState istate =
    (interpreterState) stack->alloc(sizeof(BytecodeInterpreter));
  assert(fp - stack->sp() == istate_off, "should be");

  istate->set_locals(locals);
  istate->set_method(method);
  istate->set_self_link(istate);
  istate->set_prev_link(NULL);
  istate->set_thread(thread);
  istate->set_bcp(method->is_native() ? NULL : method->code_base());
  istate->set_constants(method->constants()->cache());
  istate->set_msg(BytecodeInterpreter::method_entry);
  istate->set_oop_temp(NULL);
  istate->set_mdx(NULL);
  istate->set_callee(NULL);

  istate->set_monitor_base((BasicObjectLock *) stack->sp());
  if (method->is_synchronized()) {
    BasicObjectLock *monitor =
      (BasicObjectLock *) stack->alloc(monitor_words * wordSize);
    oop object;
    if (method->is_static())
      object = method->constants()->pool_holder()->java_mirror();
    else
      object = (oop) locals[0];
    monitor->set_obj(object);
  }

  istate->set_stack_base(stack->sp());
  istate->set_stack(stack->sp() - 1);
  if (stack_words)
    stack->alloc(stack_words * wordSize);
  istate->set_stack_limit(stack->sp() - 1);

  return (InterpreterFrame *) fp;
}
Beispiel #3
0
int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
  // Make sure method is native and not abstract
  assert(method->is_native() && !method->is_abstract(), "should be");

  JavaThread *thread = (JavaThread *) THREAD;
  ZeroStack *stack = thread->zero_stack();

  // Allocate and initialize our frame
  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
  thread->push_zero_frame(frame);
  interpreterState istate = frame->interpreter_state();
  intptr_t *locals = istate->locals();

  // Update the invocation counter
  if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
    InvocationCounter *counter = method->invocation_counter();
    counter->increment();
    if (counter->reached_InvocationLimit()) {
      CALL_VM_NOCHECK(
        InterpreterRuntime::frequency_counter_overflow(thread, NULL));
      if (HAS_PENDING_EXCEPTION)
        goto unwind_and_return;
    }
  }

  // Lock if necessary
  BasicObjectLock *monitor;
  monitor = NULL;
  if (method->is_synchronized()) {
    monitor = (BasicObjectLock*) istate->stack_base();
    oop lockee = monitor->obj();
    markOop disp = lockee->mark()->set_unlocked();

    monitor->lock()->set_displaced_header(disp);
    if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) {
      if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
        monitor->lock()->set_displaced_header(NULL);
      }
      else {
        CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
        if (HAS_PENDING_EXCEPTION)
          goto unwind_and_return;
      }
    }
  }

  // Get the signature handler
  InterpreterRuntime::SignatureHandler *handler; {
    address handlerAddr = method->signature_handler();
    if (handlerAddr == NULL) {
      CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
      if (HAS_PENDING_EXCEPTION)
        goto unlock_unwind_and_return;

      handlerAddr = method->signature_handler();
      assert(handlerAddr != NULL, "eh?");
    }
    if (handlerAddr == (address) InterpreterRuntime::slow_signature_handler) {
      CALL_VM_NOCHECK(handlerAddr =
        InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
      if (HAS_PENDING_EXCEPTION)
        goto unlock_unwind_and_return;
    }
    handler = \
      InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
  }

  // Get the native function entry point
  address function;
  function = method->native_function();
  assert(function != NULL, "should be set if signature handler is");

  // Build the argument list
  stack->overflow_check(handler->argument_count() * 2, THREAD);
  if (HAS_PENDING_EXCEPTION)
    goto unlock_unwind_and_return;

  void **arguments;
  void *mirror; {
    arguments =
      (void **) stack->alloc(handler->argument_count() * sizeof(void **));
    void **dst = arguments;

    void *env = thread->jni_environment();
    *(dst++) = &env;

    if (method->is_static()) {
      istate->set_oop_temp(
        method->constants()->pool_holder()->java_mirror());
      mirror = istate->oop_temp_addr();
      *(dst++) = &mirror;
    }

    intptr_t *src = locals;
    for (int i = dst - arguments; i < handler->argument_count(); i++) {
      ffi_type *type = handler->argument_type(i);
      if (type == &ffi_type_pointer) {
        if (*src) {
          stack->push((intptr_t) src);
          *(dst++) = stack->sp();
        }
        else {
          *(dst++) = src;
        }
        src--;
      }
      else if (type->size == 4) {
        *(dst++) = src--;
      }
      else if (type->size == 8) {
        src--;
        *(dst++) = src--;
      }
      else {
        ShouldNotReachHere();
      }
    }
  }

  // Set up the Java frame anchor
  thread->set_last_Java_frame();

  // Change the thread state to _thread_in_native
  ThreadStateTransition::transition_from_java(thread, _thread_in_native);

  // Make the call
  intptr_t result[4 - LogBytesPerWord];
  ffi_call(handler->cif(), (void (*)()) function, result, arguments);

  // Change the thread state back to _thread_in_Java.
  // ThreadStateTransition::transition_from_native() cannot be used
  // here because it does not check for asynchronous exceptions.
  // We have to manage the transition ourself.
  thread->set_thread_state(_thread_in_native_trans);

  // Make sure new state is visible in the GC thread
  if (os::is_MP()) {
    if (UseMembar) {
      OrderAccess::fence();
    }
    else {
      InterfaceSupport::serialize_memory(thread);
    }
  }

  // Handle safepoint operations, pending suspend requests,
  // and pending asynchronous exceptions.
  if (SafepointSynchronize::do_call_back() ||
      thread->has_special_condition_for_native_trans()) {
    JavaThread::check_special_condition_for_native_trans(thread);
    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops());
  }

  // Finally we can change the thread state to _thread_in_Java.
  thread->set_thread_state(_thread_in_Java);
  fixup_after_potential_safepoint();

  // Clear the frame anchor
  thread->reset_last_Java_frame();

  // If the result was an oop then unbox it and store it in
  // oop_temp where the garbage collector can see it before
  // we release the handle it might be protected by.
  if (handler->result_type() == &ffi_type_pointer) {
    if (result[0])
      istate->set_oop_temp(*(oop *) result[0]);
    else
      istate->set_oop_temp(NULL);
  }

  // Reset handle block
  thread->active_handles()->clear();

 unlock_unwind_and_return:

  // Unlock if necessary
  if (monitor) {
    BasicLock *lock = monitor->lock();
    markOop header = lock->displaced_header();
    oop rcvr = monitor->obj();
    monitor->set_obj(NULL);

    if (header != NULL) {
      if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
        monitor->set_obj(rcvr); {
          HandleMark hm(thread);
          CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));
        }
      }
    }
  }

 unwind_and_return:

  // Unwind the current activation
  thread->pop_zero_frame();

  // Pop our parameters
  stack->set_sp(stack->sp() + method->size_of_parameters());

  // Push our result
  if (!HAS_PENDING_EXCEPTION) {
    BasicType type = result_type_of(method);
    stack->set_sp(stack->sp() - type2size[type]);

    switch (type) {
    case T_VOID:
      break;

    case T_BOOLEAN:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerByte);
#endif
      SET_LOCALS_INT(*(jboolean *) result != 0, 0);
      break;

    case T_CHAR:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerShort);
#endif
      SET_LOCALS_INT(*(jchar *) result, 0);
      break;

    case T_BYTE:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerByte);
#endif
      SET_LOCALS_INT(*(jbyte *) result, 0);
      break;

    case T_SHORT:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerShort);
#endif
      SET_LOCALS_INT(*(jshort *) result, 0);
      break;

    case T_INT:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerInt);
#endif
      SET_LOCALS_INT(*(jint *) result, 0);
      break;

    case T_LONG:
      SET_LOCALS_LONG(*(jlong *) result, 0);
      break;

    case T_FLOAT:
      SET_LOCALS_FLOAT(*(jfloat *) result, 0);
      break;

    case T_DOUBLE:
      SET_LOCALS_DOUBLE(*(jdouble *) result, 0);
      break;

    case T_OBJECT:
    case T_ARRAY:
      SET_LOCALS_OBJECT(istate->oop_temp(), 0);
      break;

    default:
      ShouldNotReachHere();
    }
  }

  // No deoptimized frames on the stack
  return 0;
}
Beispiel #4
0
void vframeArrayElement::fill_in(compiledVFrame* vf) {

// Copy the information from the compiled vframe to the
// interpreter frame we will be creating to replace vf

  _method = vf->method();
  _bci    = vf->raw_bci();
  _reexecute = vf->should_reexecute();

  int index;

  // Get the monitors off-stack

  GrowableArray<MonitorInfo*>* list = vf->monitors();
  if (list->is_empty()) {
    _monitors = NULL;
  } else {

    // Allocate monitor chunk
    _monitors = new MonitorChunk(list->length());
    vf->thread()->add_monitor_chunk(_monitors);

    // Migrate the BasicLocks from the stack to the monitor chunk
    for (index = 0; index < list->length(); index++) {
      MonitorInfo* monitor = list->at(index);
      assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
      assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
      BasicObjectLock* dest = _monitors->at(index);
      dest->set_obj(monitor->owner());
      monitor->lock()->move_to(monitor->owner(), dest->lock());
    }
  }

  // Convert the vframe locals and expressions to off stack
  // values. Because we will not gc all oops can be converted to
  // intptr_t (i.e. a stack slot) and we are fine. This is
  // good since we are inside a HandleMark and the oops in our
  // collection would go away between packing them here and
  // unpacking them in unpack_on_stack.

  // First the locals go off-stack

  // FIXME this seems silly it creates a StackValueCollection
  // in order to get the size to then copy them and
  // convert the types to intptr_t size slots. Seems like it
  // could do it in place... Still uses less memory than the
  // old way though

  StackValueCollection *locs = vf->locals();
  _locals = new StackValueCollection(locs->size());
  for(index = 0; index < locs->size(); index++) {
    StackValue* value = locs->at(index);
    switch(value->type()) {
      case T_OBJECT:
        assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
        // preserve object type
        _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
        break;
      case T_CONFLICT:
        // A dead local.  Will be initialized to null/zero.
        _locals->add( new StackValue());
        break;
      case T_INT:
        _locals->add( new StackValue(value->get_int()));
        break;
      default:
        ShouldNotReachHere();
    }
  }

  // Now the expressions off-stack
  // Same silliness as above

  StackValueCollection *exprs = vf->expressions();
  _expressions = new StackValueCollection(exprs->size());
  for(index = 0; index < exprs->size(); index++) {
    StackValue* value = exprs->at(index);
    switch(value->type()) {
      case T_OBJECT:
        assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
        // preserve object type
        _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
        break;
      case T_CONFLICT:
        // A dead stack element.  Will be initialized to null/zero.
        // This can occur when the compiler emits a state in which stack
        // elements are known to be dead (because of an imminent exception).
        _expressions->add( new StackValue());
        break;
      case T_INT:
        _expressions->add( new StackValue(value->get_int()));
        break;
      default:
        ShouldNotReachHere();
    }
  }
}
void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
                                         int callee_parameters,
                                         int callee_locals,
                                         frame* caller,
                                         bool is_top_frame,
                                         bool is_bottom_frame,
                                         int exec_mode) {
  JavaThread* thread = (JavaThread*) Thread::current();

  // Look at bci and decide on bcp and continuation pc
  address bcp;
  // C++ interpreter doesn't need a pc since it will figure out what to do when it
  // begins execution
  address pc;
  bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
                             // rather than the one associated with bcp
  if (raw_bci() == SynchronizationEntryBCI) {
    // We are deoptimizing while hanging in prologue code for synchronized method
    bcp = method()->bcp_from(0); // first byte code
    pc  = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
  } else if (should_reexecute()) { //reexecute this bytecode
    assert(is_top_frame, "reexecute allowed only for the top frame");
    bcp = method()->bcp_from(bci());
    pc  = Interpreter::deopt_reexecute_entry(method(), bcp);
  } else {
    bcp = method()->bcp_from(bci());
    pc  = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
    use_next_mdp = true;
  }
  assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");

  // Monitorenter and pending exceptions:
  //
  // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
  // because there is no safepoint at the null pointer check (it is either handled explicitly
  // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
  // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER).  If an asynchronous
  // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
  // the monitorenter to place it in the proper exception range.
  //
  // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
  // in which case bcp should point to the monitorenter since it is within the exception's range.

  assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
  assert(thread->deopt_nmethod() != NULL, "nmethod should be known");
  guarantee(!(thread->deopt_nmethod()->is_compiled_by_c2() &&
              *bcp == Bytecodes::_monitorenter             &&
              exec_mode == Deoptimization::Unpack_exception),
            "shouldn't get exception during monitorenter");

  int popframe_preserved_args_size_in_bytes = 0;
  int popframe_preserved_args_size_in_words = 0;
  if (is_top_frame) {
    JvmtiThreadState *state = thread->jvmti_thread_state();
    if (JvmtiExport::can_pop_frame() &&
        (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
      if (thread->has_pending_popframe()) {
        // Pop top frame after deoptimization
#ifndef CC_INTERP
        pc = Interpreter::remove_activation_preserving_args_entry();
#else
        // Do an uncommon trap type entry. c++ interpreter will know
        // to pop frame and preserve the args
        pc = Interpreter::deopt_entry(vtos, 0);
        use_next_mdp = false;
#endif
      } else {
        // Reexecute invoke in top frame
        pc = Interpreter::deopt_entry(vtos, 0);
        use_next_mdp = false;
        popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
        // Note: the PopFrame-related extension of the expression stack size is done in
        // Deoptimization::fetch_unroll_info_helper
        popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
      }
    } else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
      // Force early return from top frame after deoptimization
#ifndef CC_INTERP
      pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
#else
     // TBD: Need to implement ForceEarlyReturn for CC_INTERP (ia64)
#endif
    } else {
      // Possibly override the previous pc computation of the top (youngest) frame
      switch (exec_mode) {
      case Deoptimization::Unpack_deopt:
        // use what we've got
        break;
      case Deoptimization::Unpack_exception:
        // exception is pending
        pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
        // [phh] We're going to end up in some handler or other, so it doesn't
        // matter what mdp we point to.  See exception_handler_for_exception()
        // in interpreterRuntime.cpp.
        break;
      case Deoptimization::Unpack_uncommon_trap:
      case Deoptimization::Unpack_reexecute:
        // redo last byte code
        pc  = Interpreter::deopt_entry(vtos, 0);
        use_next_mdp = false;
        break;
      default:
        ShouldNotReachHere();
      }
    }
  }

  // Setup the interpreter frame

  assert(method() != NULL, "method must exist");
  int temps = expressions()->size();

  int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();

  Interpreter::layout_activation(method(),
                                 temps + callee_parameters,
                                 popframe_preserved_args_size_in_words,
                                 locks,
                                 caller_actual_parameters,
                                 callee_parameters,
                                 callee_locals,
                                 caller,
                                 iframe(),
                                 is_top_frame,
                                 is_bottom_frame);

  // Update the pc in the frame object and overwrite the temporary pc
  // we placed in the skeletal frame now that we finally know the
  // exact interpreter address we should use.

  _frame.patch_pc(thread, pc);

  assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors");

  BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
  for (int index = 0; index < locks; index++) {
    top = iframe()->previous_monitor_in_interpreter_frame(top);
    BasicObjectLock* src = _monitors->at(index);
    top->set_obj(src->obj());
    src->lock()->move_to(src->obj(), top->lock());
  }
  if (ProfileInterpreter) {
    iframe()->interpreter_frame_set_mdx(0); // clear out the mdp.
  }
  iframe()->interpreter_frame_set_bcx((intptr_t)bcp); // cannot use bcp because frame is not initialized yet
  if (ProfileInterpreter) {
    methodDataOop mdo = method()->method_data();
    if (mdo != NULL) {
      int bci = iframe()->interpreter_frame_bci();
      if (use_next_mdp) ++bci;
      address mdp = mdo->bci_to_dp(bci);
      iframe()->interpreter_frame_set_mdp(mdp);
    }
  }

  // Unpack expression stack
  // If this is an intermediate frame (i.e. not top frame) then this
  // only unpacks the part of the expression stack not used by callee
  // as parameters. The callee parameters are unpacked as part of the
  // callee locals.
  int i;
  for(i = 0; i < expressions()->size(); i++) {
    StackValue *value = expressions()->at(i);
    intptr_t*   addr  = iframe()->interpreter_frame_expression_stack_at(i);
    switch(value->type()) {
      case T_INT:
        *addr = value->get_int();
        break;
      case T_OBJECT:
        *addr = value->get_int(T_OBJECT);
        break;
      case T_CONFLICT:
        // A dead stack slot.  Initialize to null in case it is an oop.
        *addr = NULL_WORD;
        break;
      default:
        ShouldNotReachHere();
    }
  }


  // Unpack the locals
  for(i = 0; i < locals()->size(); i++) {
    StackValue *value = locals()->at(i);
    intptr_t* addr  = iframe()->interpreter_frame_local_at(i);
    switch(value->type()) {
      case T_INT:
        *addr = value->get_int();
        break;
      case T_OBJECT:
        *addr = value->get_int(T_OBJECT);
        break;
      case T_CONFLICT:
        // A dead location. If it is an oop then we need a NULL to prevent GC from following it
        *addr = NULL_WORD;
        break;
      default:
        ShouldNotReachHere();
    }
  }

  if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
    // An interpreted frame was popped but it returns to a deoptimized
    // frame. The incoming arguments to the interpreted activation
    // were preserved in thread-local storage by the
    // remove_activation_preserving_args_entry in the interpreter; now
    // we put them back into the just-unpacked interpreter frame.
    // Note that this assumes that the locals arena grows toward lower
    // addresses.
    if (popframe_preserved_args_size_in_words != 0) {
      void* saved_args = thread->popframe_preserved_args();
      assert(saved_args != NULL, "must have been saved by interpreter");
#ifdef ASSERT
      assert(popframe_preserved_args_size_in_words <=
             iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
             "expression stack size should have been extended");
#endif // ASSERT
      int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
      intptr_t* base;
      if (frame::interpreter_frame_expression_stack_direction() < 0) {
        base = iframe()->interpreter_frame_expression_stack_at(top_element);
      } else {
        base = iframe()->interpreter_frame_expression_stack();
      }
      Copy::conjoint_jbytes(saved_args,
                            base,
                            popframe_preserved_args_size_in_bytes);
      thread->popframe_free_preserved_args();
    }
  }

#ifndef PRODUCT
  if (TraceDeoptimization && Verbose) {
    ttyLocker ttyl;
    tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
    iframe()->print_on(tty);
    RegisterMap map(thread);
    vframe* f = vframe::new_vframe(iframe(), &map, thread);
    f->print();

    tty->print_cr("locals size     %d", locals()->size());
    tty->print_cr("expression size %d", expressions()->size());

    method()->print_value();
    tty->cr();
    // method()->print_codes();
  } else if (TraceDeoptimization) {
    tty->print("     ");
    method()->print_value();
    Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
    int bci = method()->bci_from(bcp);
    tty->print(" - %s", Bytecodes::name(code));
    tty->print(" @ bci %d ", bci);
    tty->print_cr("sp = " PTR_FORMAT, iframe()->sp());
  }
#endif // PRODUCT

  // The expression stack and locals are in the resource area don't leave
  // a dangling pointer in the vframeArray we leave around for debug
  // purposes

  _locals = _expressions = NULL;

}