コード例 #1
0
bool MethodComparator::methods_EMCP(methodOop old_method, methodOop new_method) {
  if (old_method->code_size() != new_method->code_size())
    return false;
  if (check_stack_and_locals_size(old_method, new_method) != 0) {
    // RC_TRACE macro has an embedded ResourceMark
    RC_TRACE(0x00800000, ("Methods %s non-comparable with diagnosis %d",
      old_method->name()->as_C_string(),
      check_stack_and_locals_size(old_method, new_method)));
    return false;
  }

  _old_cp = old_method->constants();
  _new_cp = new_method->constants();
  BytecodeStream s_old(old_method);
  BytecodeStream s_new(new_method);
  _s_old = &s_old;
  _s_new = &s_new;
  _switchable_test = false;
  Bytecodes::Code c_old, c_new;

  while ((c_old = s_old.next()) >= 0) {
    if ((c_new = s_new.next()) < 0 || c_old != c_new)
      return false;

    if (! args_same(c_old, c_new))
      return false;
  }
  return true;
}
コード例 #2
0
bool MethodComparator::methods_switchable(methodOop old_method, methodOop new_method,
                                          BciMap &bci_map) {
  if (old_method->code_size() > new_method->code_size())
    // Something has definitely been deleted in the new method, compared to the old one.
    return false;

  if (! check_stack_and_locals_size(old_method, new_method))
    return false;

  _old_cp = old_method->constants();
  _new_cp = new_method->constants();
  BytecodeStream s_old(old_method);
  BytecodeStream s_new(new_method);
  _s_old = &s_old;
  _s_new = &s_new;
  _bci_map = &bci_map;
  _switchable_test = true;
  GrowableArray<int> fwd_jmps(16);
  _fwd_jmps = &fwd_jmps;
  Bytecodes::Code c_old, c_new;

  while ((c_old = s_old.next()) >= 0) {
    if ((c_new = s_new.next()) < 0)
      return false;
    if (! (c_old == c_new && args_same(c_old, c_new))) {
      int old_bci = s_old.bci();
      int new_st_bci = s_new.bci();
      bool found_match = false;
      do {
        c_new = s_new.next();
        if (c_new == c_old && args_same(c_old, c_new)) {
          found_match = true;
          break;
        }
      } while (c_new >= 0);
      if (! found_match)
        return false;
      int new_end_bci = s_new.bci();
      bci_map.store_fragment_location(old_bci, new_st_bci, new_end_bci);
    }
  }

  // Now we can test all forward jumps
  for (int i = 0; i < fwd_jmps.length() / 2; i++) {
    if (! bci_map.old_and_new_locations_same(fwd_jmps.at(i*2), fwd_jmps.at(i*2+1))) {
      RC_TRACE(0x00800000,
        ("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d",
        fwd_jmps.at(i*2), bci_map.new_bci_for_old(fwd_jmps.at(i*2)),
        fwd_jmps.at(i*2+1)));
      return false;
    }
  }

  return true;
}
コード例 #3
0
void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
                                                  frame*    caller,
                                                  frame*    current,
                                                  methodOop method,
                                                  intptr_t* locals,
                                                  intptr_t* stack,
                                                  intptr_t* stack_base,
                                                  intptr_t* monitor_base,
                                                  intptr_t* frame_bottom,
                                                  bool      is_top_frame) {
  istate->set_locals(locals);
  istate->set_method(method);
  istate->set_self_link(istate);
  istate->set_prev_link(NULL);
  // thread will be set by a hacky repurposing of frame::patch_pc()
  // bcp will be set by vframeArrayElement::unpack_on_stack()
  istate->set_constants(method->constants()->cache());
  istate->set_msg(BytecodeInterpreter::method_resume);
  istate->set_bcp_advance(0);
  istate->set_oop_temp(NULL);
  istate->set_mdx(NULL);
  if (caller->is_interpreted_frame()) {
    interpreterState prev = caller->get_interpreterState();
    prev->set_callee(method);
    if (*prev->bcp() == Bytecodes::_invokeinterface)
      prev->set_bcp_advance(5);
    else
      prev->set_bcp_advance(3);
  }
  istate->set_callee(NULL);
  istate->set_monitor_base((BasicObjectLock *) monitor_base);
  istate->set_stack_base(stack_base);
  istate->set_stack(stack);
  istate->set_stack_limit(stack_base - method->max_stack() - 1);
}
コード例 #4
0
BasicType Bytecode_static::result_type(methodOop method) const {
  int index = java_hwrd_at(1);
  constantPoolOop constants = method->constants(); 
  symbolOop field_type = constants->signature_ref_at(index);
  BasicType basic_type = FieldType::basic_type(field_type);
  return basic_type;
}
コード例 #5
0
static BasicType constant_pool_type(methodOop method, int index) {
  constantTag tag = method->constants()->tag_at(index);
       if (tag.is_int              ()) return T_INT;
  else if (tag.is_float            ()) return T_FLOAT;
  else if (tag.is_long             ()) return T_LONG;
  else if (tag.is_double           ()) return T_DOUBLE;
  else if (tag.is_string           ()) return T_OBJECT;
  else if (tag.is_unresolved_string()) return T_OBJECT;
  else if (tag.is_klass		   ()) return T_OBJECT;
  ShouldNotReachHere();
  return T_ILLEGAL;
}
コード例 #6
0
int CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
  JavaThread *thread = (JavaThread *) THREAD;
  ZeroStack *stack = thread->zero_stack();
  intptr_t *locals = stack->sp();

  // Drop into the slow path if we need a safepoint check
  if (SafepointSynchronize::do_call_back()) {
    return normal_entry(method, 0, THREAD);
  }

  // Load the object pointer and drop into the slow path
  // if we have a NullPointerException
  oop object = LOCALS_OBJECT(0);
  if (object == NULL) {
    return normal_entry(method, 0, THREAD);
  }

  // Read the field index from the bytecode, which looks like this:
  //  0:  aload_0
  //  1:  getfield
  //  2:    index
  //  3:    index
  //  4:  ireturn/areturn
  // NB this is not raw bytecode: index is in machine order
  u1 *code = method->code_base();
  assert(code[0] == Bytecodes::_aload_0 &&
         code[1] == Bytecodes::_getfield &&
         (code[4] == Bytecodes::_ireturn ||
          code[4] == Bytecodes::_areturn), "should do");
  u2 index = Bytes::get_native_u2(&code[2]);

  // Get the entry from the constant pool cache, and drop into
  // the slow path if it has not been resolved
  constantPoolCacheOop cache = method->constants()->cache();
  ConstantPoolCacheEntry* entry = cache->entry_at(index);
  if (!entry->is_resolved(Bytecodes::_getfield)) {
    return normal_entry(method, 0, THREAD);
  }

  // Get the result and push it onto the stack
  switch (entry->flag_state()) {
  case ltos:
  case dtos:
    stack->overflow_check(1, CHECK_0);
    stack->alloc(wordSize);
    break;
  }
  if (entry->is_volatile()) {
    switch (entry->flag_state()) {
    case ctos:
      SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0);
      break;

    case btos:
      SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0);
      break;

    case stos:
      SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0);
      break;

    case itos:
      SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0);
      break;

    case ltos:
      SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0);
      break;

    case ftos:
      SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0);
      break;

    case dtos:
      SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0);
      break;

    case atos:
      SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0);
      break;

    default:
      ShouldNotReachHere();
    }
  }
  else {
    switch (entry->flag_state()) {
    case ctos:
      SET_LOCALS_INT(object->char_field(entry->f2()), 0);
      break;

    case btos:
      SET_LOCALS_INT(object->byte_field(entry->f2()), 0);
      break;

    case stos:
      SET_LOCALS_INT(object->short_field(entry->f2()), 0);
      break;

    case itos:
      SET_LOCALS_INT(object->int_field(entry->f2()), 0);
      break;

    case ltos:
      SET_LOCALS_LONG(object->long_field(entry->f2()), 0);
      break;

    case ftos:
      SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0);
      break;

    case dtos:
      SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0);
      break;

    case atos:
      SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0);
      break;

    default:
      ShouldNotReachHere();
    }
  }

  // No deoptimized frames on the stack
  return 0;
}
コード例 #7
0
int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
  // Make sure method is native and not abstract
  assert(method->is_native() && !method->is_abstract(), "should be");

  JavaThread *thread = (JavaThread *) THREAD;
  ZeroStack *stack = thread->zero_stack();

  // Allocate and initialize our frame
  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
  thread->push_zero_frame(frame);
  interpreterState istate = frame->interpreter_state();
  intptr_t *locals = istate->locals();

  // Update the invocation counter
  if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
    InvocationCounter *counter = method->invocation_counter();
    counter->increment();
    if (counter->reached_InvocationLimit()) {
      CALL_VM_NOCHECK(
        InterpreterRuntime::frequency_counter_overflow(thread, NULL));
      if (HAS_PENDING_EXCEPTION)
        goto unwind_and_return;
    }
  }

  // Lock if necessary
  BasicObjectLock *monitor;
  monitor = NULL;
  if (method->is_synchronized()) {
    monitor = (BasicObjectLock*) istate->stack_base();
    oop lockee = monitor->obj();
    markOop disp = lockee->mark()->set_unlocked();

    monitor->lock()->set_displaced_header(disp);
    if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) {
      if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
        monitor->lock()->set_displaced_header(NULL);
      }
      else {
        CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
        if (HAS_PENDING_EXCEPTION)
          goto unwind_and_return;
      }
    }
  }

  // Get the signature handler
  InterpreterRuntime::SignatureHandler *handler; {
    address handlerAddr = method->signature_handler();
    if (handlerAddr == NULL) {
      CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
      if (HAS_PENDING_EXCEPTION)
        goto unlock_unwind_and_return;

      handlerAddr = method->signature_handler();
      assert(handlerAddr != NULL, "eh?");
    }
    if (handlerAddr == (address) InterpreterRuntime::slow_signature_handler) {
      CALL_VM_NOCHECK(handlerAddr =
        InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
      if (HAS_PENDING_EXCEPTION)
        goto unlock_unwind_and_return;
    }
    handler = \
      InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
  }

  // Get the native function entry point
  address function;
  function = method->native_function();
  assert(function != NULL, "should be set if signature handler is");

  // Build the argument list
  stack->overflow_check(handler->argument_count() * 2, THREAD);
  if (HAS_PENDING_EXCEPTION)
    goto unlock_unwind_and_return;

  void **arguments;
  void *mirror; {
    arguments =
      (void **) stack->alloc(handler->argument_count() * sizeof(void **));
    void **dst = arguments;

    void *env = thread->jni_environment();
    *(dst++) = &env;

    if (method->is_static()) {
      istate->set_oop_temp(
        method->constants()->pool_holder()->java_mirror());
      mirror = istate->oop_temp_addr();
      *(dst++) = &mirror;
    }

    intptr_t *src = locals;
    for (int i = dst - arguments; i < handler->argument_count(); i++) {
      ffi_type *type = handler->argument_type(i);
      if (type == &ffi_type_pointer) {
        if (*src) {
          stack->push((intptr_t) src);
          *(dst++) = stack->sp();
        }
        else {
          *(dst++) = src;
        }
        src--;
      }
      else if (type->size == 4) {
        *(dst++) = src--;
      }
      else if (type->size == 8) {
        src--;
        *(dst++) = src--;
      }
      else {
        ShouldNotReachHere();
      }
    }
  }

  // Set up the Java frame anchor
  thread->set_last_Java_frame();

  // Change the thread state to _thread_in_native
  ThreadStateTransition::transition_from_java(thread, _thread_in_native);

  // Make the call
  intptr_t result[4 - LogBytesPerWord];
  ffi_call(handler->cif(), (void (*)()) function, result, arguments);

  // Change the thread state back to _thread_in_Java.
  // ThreadStateTransition::transition_from_native() cannot be used
  // here because it does not check for asynchronous exceptions.
  // We have to manage the transition ourself.
  thread->set_thread_state(_thread_in_native_trans);

  // Make sure new state is visible in the GC thread
  if (os::is_MP()) {
    if (UseMembar) {
      OrderAccess::fence();
    }
    else {
      InterfaceSupport::serialize_memory(thread);
    }
  }

  // Handle safepoint operations, pending suspend requests,
  // and pending asynchronous exceptions.
  if (SafepointSynchronize::do_call_back() ||
      thread->has_special_condition_for_native_trans()) {
    JavaThread::check_special_condition_for_native_trans(thread);
    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops());
  }

  // Finally we can change the thread state to _thread_in_Java.
  thread->set_thread_state(_thread_in_Java);
  fixup_after_potential_safepoint();

  // Clear the frame anchor
  thread->reset_last_Java_frame();

  // If the result was an oop then unbox it and store it in
  // oop_temp where the garbage collector can see it before
  // we release the handle it might be protected by.
  if (handler->result_type() == &ffi_type_pointer) {
    if (result[0])
      istate->set_oop_temp(*(oop *) result[0]);
    else
      istate->set_oop_temp(NULL);
  }

  // Reset handle block
  thread->active_handles()->clear();

 unlock_unwind_and_return:

  // Unlock if necessary
  if (monitor) {
    BasicLock *lock = monitor->lock();
    markOop header = lock->displaced_header();
    oop rcvr = monitor->obj();
    monitor->set_obj(NULL);

    if (header != NULL) {
      if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
        monitor->set_obj(rcvr); {
          HandleMark hm(thread);
          CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));
        }
      }
    }
  }

 unwind_and_return:

  // Unwind the current activation
  thread->pop_zero_frame();

  // Pop our parameters
  stack->set_sp(stack->sp() + method->size_of_parameters());

  // Push our result
  if (!HAS_PENDING_EXCEPTION) {
    BasicType type = result_type_of(method);
    stack->set_sp(stack->sp() - type2size[type]);

    switch (type) {
    case T_VOID:
      break;

    case T_BOOLEAN:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerByte);
#endif
      SET_LOCALS_INT(*(jboolean *) result != 0, 0);
      break;

    case T_CHAR:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerShort);
#endif
      SET_LOCALS_INT(*(jchar *) result, 0);
      break;

    case T_BYTE:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerByte);
#endif
      SET_LOCALS_INT(*(jbyte *) result, 0);
      break;

    case T_SHORT:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerShort);
#endif
      SET_LOCALS_INT(*(jshort *) result, 0);
      break;

    case T_INT:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerInt);
#endif
      SET_LOCALS_INT(*(jint *) result, 0);
      break;

    case T_LONG:
      SET_LOCALS_LONG(*(jlong *) result, 0);
      break;

    case T_FLOAT:
      SET_LOCALS_FLOAT(*(jfloat *) result, 0);
      break;

    case T_DOUBLE:
      SET_LOCALS_DOUBLE(*(jdouble *) result, 0);
      break;

    case T_OBJECT:
    case T_ARRAY:
      SET_LOCALS_OBJECT(istate->oop_temp(), 0);
      break;

    default:
      ShouldNotReachHere();
    }
  }

  // No deoptimized frames on the stack
  return 0;
}
コード例 #8
0
ファイル: interpreter.cpp プロジェクト: BaHbKaTX/openjdk
// If deoptimization happens, this function returns the point of next bytecode to continue execution
address AbstractInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) {
  assert(method->contains(bcp), "just checkin'");
  Bytecodes::Code code   = Bytecodes::java_code_at(bcp);
  assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
  int             bci    = method->bci_from(bcp);
  int             length = -1; // initial value for debugging
  // compute continuation length
  length = Bytecodes::length_at(bcp);
  // compute result type
  BasicType type = T_ILLEGAL;

  switch (code) {
    case Bytecodes::_invokevirtual  :
    case Bytecodes::_invokespecial  :
    case Bytecodes::_invokestatic   :
    case Bytecodes::_invokeinterface: {
      Thread *thread = Thread::current();
      ResourceMark rm(thread);
      methodHandle mh(thread, method);
      type = Bytecode_invoke_at(mh, bci)->result_type(thread);
      // since the cache entry might not be initialized:
      // (NOT needed for the old calling convension)
      if (!is_top_frame) {
        int index = Bytes::get_native_u2(bcp+1);
        method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
      }
      break;
    }

   case Bytecodes::_invokedynamic: {
      Thread *thread = Thread::current();
      ResourceMark rm(thread);
      methodHandle mh(thread, method);
      type = Bytecode_invoke_at(mh, bci)->result_type(thread);
      // since the cache entry might not be initialized:
      // (NOT needed for the old calling convension)
      if (!is_top_frame) {
        int index = Bytes::get_native_u4(bcp+1);
        method->constants()->cache()->secondary_entry_at(index)->set_parameter_size(callee_parameters);
      }
      break;
    }

    case Bytecodes::_ldc   :
    case Bytecodes::_ldc_w : // fall through
    case Bytecodes::_ldc2_w:
      {
        Thread *thread = Thread::current();
        ResourceMark rm(thread);
        methodHandle mh(thread, method);
        type = Bytecode_loadconstant_at(mh, bci)->result_type();
        break;
      }

    default:
      type = Bytecodes::result_type(code);
      break;
  }

  // return entry point for computed continuation state & bytecode length
  return
    is_top_frame
    ? Interpreter::deopt_entry (as_TosState(type), length)
    : Interpreter::return_entry(as_TosState(type), length);
}
コード例 #9
0
// --- build_repack_buffer ---------------------------------------------------
// Build a IFrame structure to help ASM code repack the 1 compiled frame into
// many interpreter (or C1) frames.  Takes in the current thread and a vframe;
// the vframe is pointing and the virtual Java frame needing to be repacked.
// It takes in the callee (which this frame is busy trying to call in it's
// inlined code), and an array of IFrames.  It returns the updated IFrame
// buffer filled in for this frame.
void Deoptimization::build_repack_buffer( JavaThread *thread, frame fr, IFrame *buf, const DebugMap *dm, const DebugScope *ds, intptr_t *jexstk, objectRef *lckstk, bool is_deopt, bool is_c1, bool is_youngest) {
  assert( thread->_deopt_buffer->contains((char*)(buf+1)), "over-ran large deopt buffer?" );

int bci=ds->bci();
if(bci==InvocationEntryBci){
    // We deoptimized while hanging in prologue code for a synchronized
    // method.  We got the lock (after all, deopt happens after returning
    // from the blocking call).  We want to begin execution in the
    // interpreter at BCI 0, and after taking the lock.
    // Also it is possilble to enter the deopt code through the br_s on method
    // entry before the first byte code.
    bci = 0;
  }

  const methodOop moop = ds->method().as_methodOop();
  if( ds->caller() ) {          // Do I have a caller?  Am I mid-call?
    // Initialize the constant pool entry for caller-parameter size.  It
    // might be the case that we inlined and compiled a callee, and are busy
    // calling it in the compiled code, and get deoptimized with that callee
    // in-progress AND we've never executed it in the interpreter - which
    // would have filled in the constant pool cache before making the call.
    // Fill it in now.
    const methodOop caller = ds->caller()->method().as_methodOop();
    int index = Bytes::get_native_u2(caller->bcp_from(ds->caller()->bci())+1);
    ConstantPoolCacheEntry *cpe = caller->constants()->cache()->entry_at(index);
    // Since we are setting the constant pool entry here, and another thread
    // could be busy resolving here we have a race condition setting the
    // flags.  Use a CAS to only set the flags if they are currently 0.
    intx *flags_adr = (intx*)((intptr_t)cpe + in_bytes(ConstantPoolCacheEntry::flags_offset()));
    if( !*flags_adr ) {         // Flags currently 0?
      // Set the flags, because the interpreter-return-entry points need some
      // info from them.  Not all fields are set, because it's too complex to
      // do it here... and not needed.  The cpCacheEntry is left "unresolved"
      // such that the next real use of it from the interpreter will be forced
      // to do a proper resolve, which will fill in the missing fields.

      // Compute new flags needed by the interpreter-return-entry
      intx flags = 
        (moop->size_of_parameters() & 0xFF) | 
        (1 << ConstantPoolCacheEntry::hotSwapBit) |
        (moop->result_type() << ConstantPoolCacheEntry::tosBits);
      // CAS 'em in, but only if there is currently a 0 flags
      assert0( sizeof(jlong)==sizeof(intx) );
      Atomic::cmpxchg((jlong)flags, (jlong*)flags_adr, 0);
      // We don't care about the result, because the cache is monomorphic.
      // Either our CAS succeeded and jammed    the right parameter count, or
      // another thread succeeded and jammed in the right parameter count.
    } 
  }

  if (TraceDeoptimization) {
    BufferedLoggerMark m(NOTAG, Log::M_DEOPT, TraceDeoptimization, true);
    m.out("DEOPT REPACK c%d: ", is_c1 ? 1 : 2);
    moop->print_short_name(m.stream());
    m.out(" @ bci %d %s", bci, ds->caller() ? "called by...": "   (oldest frame)" );
  }

  // If there was a suitable C1 frame, use it.
  // Otherwise, use an interpreter frame.
  if( 1 ) {
    // Build an interpreter-style IFrame.  Naked oops abound.
    assert0( !objectRef(moop).is_stack() );
    buf->_mref = objectRef(moop);
    buf->_cpc = moop->constants()->cacheRef();

    // Compute monitor list length.  If we have coarsened a lock we will end
    // up unlocking it and the repack buffer will not need to see it.
    uint mons_len = ds->numlocks();
    if( ds->is_extra_lock() ) { mons_len--; assert0( mons_len >= 0 ); }
    assert0( mons_len < (256*sizeof(buf->_numlck)) );
    buf->_numlck = mons_len;
    
    // Set up the return pc for the next frame: the next frame is a younger
    // frame which will return to this older frame.  All middle frames return
    // back into the interpreter, just after a call with proper TOS state.
    // Youngest frames always start in vtos state because the uncommon-trap
    // blob sets them up that way.
    const address bcp = moop->bcp_from(bci);
    Bytecodes::Code c = Bytecodes::java_code(Bytecodes::cast(*bcp));
BasicType return_type=T_VOID;

    bool handle_popframe = is_youngest && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution();

    int bci_bump = 0;
    if( !is_youngest ) {        // Middle-frame?
      bool from_call = (c == Bytecodes::_invokevirtual ||
c==Bytecodes::_invokespecial||
c==Bytecodes::_invokestatic||
                        c == Bytecodes::_invokeinterface );
assert(from_call,"Middle frame is in the middle of a call");
      bci_bump = Bytecodes::length_at(bcp); // But need to know how much it will be bumped for the return address
      buf->_bci = bci;          // Save bci without bumping it; normal interpreter call returns bump the bci as needed
      buf[-1]._retadr = Interpreter::return_entry(vtos, bci_bump);

    } else if( thread->pending_exception() ) { 
      // Deopt-with-pending.  Throw up on return to interpreter, which is
      // handled by unpack_and_go.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();

    } else if( !is_deopt ) {    // It is a C2-style uncommon-trap.
      // Do NOT increment the BCP!  We are re-executing the current bytecode.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();
      
    } else {                    // It is a plain deopt
      // It is a deopt without exception.  See if we are C1 in mid-patch.
      // If so, we always need to re-execute the bytecode.
      bool is_C1_mid_patch = false;
      if( is_c1 ) {             // C1 codeblob?
address caller_pc=fr.pc();
if(NativeCall::is_call_before(caller_pc)){
          address target = nativeCall_at(caller_pc)->destination();
          is_C1_mid_patch = target == Runtime1::entry_for(Runtime1::load_klass_patching_id);
        }
      }
      if( is_C1_mid_patch ) {
        Untested("");
        // Do NOT increment the BCP!  We are re-executing the current bytecode.
      } else if( ds->bci() == InvocationEntryBci ) {
        // It is deopt while hanging on a method-entry lock.
        // Do not advance BCP, as we have not executed bci 0 yet.
        
      } else {                  // Else C2 or C1-not-mid-patch
        // It is a deopt.  Whether we re-execute the current bytecode or
        // assume it has completed depends on the bytecode.
        switch( c ) {
case Bytecodes::_lookupswitch:
case Bytecodes::_tableswitch:
case Bytecodes::_fast_binaryswitch:
        case Bytecodes::_fast_linearswitch:
          // recompute condtional expression folded into _if<cond>
        case Bytecodes::_lcmp      :
        case Bytecodes::_fcmpl     :
        case Bytecodes::_fcmpg     :
        case Bytecodes::_dcmpl     :
        case Bytecodes::_dcmpg     :
        case Bytecodes::_ifnull    :
        case Bytecodes::_ifnonnull :
        case Bytecodes::_goto      :
        case Bytecodes::_goto_w    :
        case Bytecodes::_ifeq      :
        case Bytecodes::_ifne      :
        case Bytecodes::_iflt      :
        case Bytecodes::_ifge      :
        case Bytecodes::_ifgt      :
        case Bytecodes::_ifle      :
        case Bytecodes::_if_icmpeq :
        case Bytecodes::_if_icmpne :
        case Bytecodes::_if_icmplt :
        case Bytecodes::_if_icmpge :
        case Bytecodes::_if_icmpgt :
        case Bytecodes::_if_icmple :
        case Bytecodes::_if_acmpeq :
        case Bytecodes::_if_acmpne :
          // special cases
case Bytecodes::_aastore:
          // We are re-executing the current bytecode.
          Untested("");
          break;
          // special cases
case Bytecodes::_putstatic:
case Bytecodes::_getstatic:
case Bytecodes::_getfield:
case Bytecodes::_putfield:
          // We are re-executing the current bytecode.
          break;
        case Bytecodes::_athrow    :
          break;                // Must be deopt-w-exception
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
        }
case Bytecodes::_invokeinterface:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 5; // Increment the BCP to post-call!!!  See below!
          break;
        }
        case Bytecodes::_ldc   : 
          Untested("");
return_type=constant_pool_type(moop,*(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 2; // Increment the BCP to post-call!!!  See below!
          break;
          
        case Bytecodes::_ldc_w : // fall through
        case Bytecodes::_ldc2_w: 
return_type=constant_pool_type(moop,Bytes::get_Java_u2(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
          
        default:
return_type=Bytecodes::result_type(c);
          if( !ds->should_reexecute()) bci_bump = Bytecodes::length_at(bcp); // Increment the BCP to post-call!!!  See below!
          break;
        }
        if (ds->should_reexecute()) return_type = T_VOID;
      }
      // Save (possibly advanced) bci
      buf->_bci = bci+bci_bump;
      buf[-1]._retadr = Interpreter::unpack_and_go(); // Interpreter::return_entry(vtos, bci_bump);
    }

    // ---
    // Now all the Java locals.
    // First set the start of locals for the interpreter frame we are building.
    buf->_loc = (intptr_t)jexstk;

    uint loc_len = moop->max_locals();
for(uint i=0;i<loc_len;i++){
      *jexstk++ = dm->get_value(ds->get_local(i),fr);
    }

    // Now that the locals have been unpacked if we have any deferred local writes
    // added by jvmti then we can free up that structure as the data is now in the
    // buffer
    GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
    if( list ) {
      // Because of inlining we could have multiple vframes for a single frame
      // and several of the vframes could have deferred writes. Find them all.
      Unimplemented();
    }

    // ---
    // Now all the Java Expressions
    uint expr_len = ds->numstk();
for(uint i=0;i<expr_len;i++)
      *jexstk++ = dm->get_value(ds->get_expr(i),fr);

    // If returning from a deoptimized call, we will have return values in
    // registers that need to end up on the Java execution stack.  They are
    // not recorded in the debug info, since they did not exist at the time
    // the call began.
    if( is_youngest && is_deopt ) { 
      if( type2size[return_type] > 0 ) {
        if( type2size[return_type]==2 ) {
          *jexstk++ = (intptr_t)frame::double_slot_primitive_type_empty_slot_id << 32;
        }
        *jexstk++ = pd_fetch_return_values( thread, return_type );
        // Need to adjust the final jexstk_top for the youngest frame
        // returning values.  These returned values are not accounted for in
        // the standard debug info.
        thread->_jexstk_top = jexstk;
      }
    }

    // JVMTI PopFrame support
    // Add the number of words of popframe preserved args to expr_len
    int popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
    int popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
    if (handle_popframe) {
      Unimplemented();
      expr_len += popframe_preserved_args_size_in_words;
      // An interpreted frame was popped but it returns to a deoptimized
      // frame. The incoming arguments to the interpreted activation
      // were preserved in thread-local storage by the
      // remove_activation_preserving_args_entry in the interpreter; now
      // we put them back into the just-unpacked interpreter frame.
      // Note that this assumes that the locals arena grows toward lower
      // addresses.
    }

    // Set the JEX stk top
    buf->_stk = (intptr_t)jexstk;

    // --- 
    // Now move locked objects to the interpreters lock-stack.
    // No need to inflate anything, as we're moving standard oops.
    int numlcks = ds->numlocks();
    if( ds->is_extra_lock() ) { // coarsened a lock
      Untested("");
      // The last lock is "coarsened" - kept locked when it should have been
      // unlocked and relocked.  With no deopt, keeping it locked saves the 2
      // sets of back-to-back CAS's and fences.  However, here we need to
      // unlock it to match the proper Java state.
      ObjectSynchronizer::unlock(ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(numlcks-1),fr)).as_oop());
      numlcks--;
    }
for(int i=0;i<numlcks;i++){
      *lckstk++ = ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(i),fr));
    }

  } else {                    // Make a C1 frame
    
    Unimplemented();
    
  }
}