Ejemplo n.º 1
0
inline bool frame::equal(frame other) const {
  bool ret =  sp() == other.sp()
           && fp() == other.fp()
           && pc() == other.pc();
  assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
  return ret;
}
inline bool frame::equal(frame other) const {
  // Seems close enough
  return sp() == other.sp()
      && fp() == other.fp()
      && pc_addr() == other.pc_addr() 
      && pc() == other.pc();
}
Ejemplo n.º 3
0
void FlatProfiler::record_tick_for_running_frame(frame fr) {
  // The tick happend in real code -> non VM code
  if (fr.is_interpreted_frame()) {
    methodOop method = fr.method();
    if (method == NULL) return;
    assert(method->is_method(), "must be method");
    FlatProfiler::interpreted_update(method, fr.receiver()->klass(), in_code);

  } else if (fr.is_compiled_frame()) {
    FlatProfiler::compiled_update(findNMethod(fr.pc()), in_code);

  } else if (PIC::in_heap(fr.pc())) {
    PIC* pic = PIC::find(fr.pc());
    FlatProfiler::compiled_update(findNMethod((char*) pic->compiled_ic()), in_pic);

  } else if (StubRoutines::contains(fr.pc())) {
    FlatProfiler::stub_ticks++;
  }
}
// --- locked ----------------------------------------------------------------
// Count times an oop is locked in this frame & codeblob
int CodeBlob::locked( frame fr, oop o ) const {
  methodCodeRef mcref = owner();
  if( mcref.is_null() ) return 0; // call stubs hit here
  methodCodeOop mcoop = mcref.as_methodCodeOop();
  if( mcoop->_blob != this ) return 0; // vtable stubs share the mcoop with the real blob
  const DebugMap *dbg = mcoop->_debuginfo;
  if( !dbg ) return 0;            // Commonly natives have no debug info
  const DebugScope *ds = dbg->get( fr.pc() -(address)this );
  if ( !ds ) return 0;
  return ds->count_locks( dbg, fr, o );
}
Ejemplo n.º 5
0
void FlatProfiler::record_tick_for_calling_frame(frame fr) {
  // The tick happend in VM code

  TickPosition where = other;
  if (theCompiler) { 
    where = in_compiler;
  }
  if (fr.is_interpreted_frame()) {
    methodOop method = fr.method();
    if (method == NULL) return;
    assert(method->is_method(), "must be method");
    int bci = method->bci_from(fr.hp());
    if (Bytecodes::code_type((Bytecodes::Code) *method->codes(bci)) == Bytecodes::primitive_call) {
      where = in_primitive;
    }
    FlatProfiler::interpreted_update(method, fr.receiver()->klass(), where);

  } else if (fr.is_compiled_frame()) {
    nmethod* nm = findNMethod(fr.pc());
    relocIterator iter(nm);
    while (iter.next()) {
      if (iter.is_call() && iter.call_end() == fr.pc()) {
        if (iter.type() == relocInfo::prim_type)
	  where = in_primitive;
      }
    }
    FlatProfiler::compiled_update(nm, where);

  } else {
    if (StubRoutines::contains(fr.pc())) {
      FlatProfiler::stub_ticks++;
    } else {
      FlatProfiler::unknown_ticks++;
    }
  }
}
//=============================================================================
// --- do_closure
// Convenience: apply closure to all oops
void OopMapStream::do_closure( CodeBlob *cb, frame fr, OopClosure *f ) {
  address pc = fr.pc();
  assert0( cb->contains(pc) );
  int rel_pc = pc-(address)cb;
  // Derived pointers first: need to record the derived offset before the base moves
  methodCodeRef mcref = cb->owner();
if(mcref.not_null()){
    const DebugMap *dm = mcref.as_methodCodeOop()->_debuginfo;
    if( dm ) {
      const DebugScope *scope = dm->get(rel_pc);
      scope->gc_base_derived(fr,f);
    }
  }
  // Now normal oops
  cb->_oop_maps->do_closure(rel_pc,fr,f);
}
Ejemplo n.º 7
0
void traceCompiledFrame(frame& f) {
    ResourceMark mark;

    // Find the nmethod containing the pc
    compiledVFrame* vf = (compiledVFrame*) vframe::new_vframe(&f);
    assert(vf->is_compiled_frame(), "must be compiled frame");
    nmethod* nm = vf->code();
    lprintf("Found nmethod: 0x%x\n", nm);
    nm->print_value_on(std);

    std->print("\n @%d called from %#x",
               vf->scope()->offset(),
               f.pc() - Assembler::sizeOfCall);
    std->cr();
    
    trace(vf, 0, 10);
}
Ejemplo n.º 8
0
inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
  _mode = compiled_mode;

  // Range check to detect ridiculous offsets.
  if (decode_offset == DebugInformationRecorder::serialized_null ||
      decode_offset < 0 ||
      decode_offset >= nm()->scopes_data_size()) {
    // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
    // If we attempt to read nmethod::scopes_data at serialized_null (== 0),
    // or if we read some at other crazy offset,
    // we will decode garbage and make wild references into the heap,
    // leading to crashes in product mode.
    // (This isn't airtight, of course, since there are internal
    // offsets which are also crazy.)
#ifdef ASSERT
    if (WizardMode) {
      tty->print_cr("Error in fill_from_frame: pc_desc for "
                    INTPTR_FORMAT " not found or invalid at %d",
                    _frame.pc(), decode_offset);
      nm()->print();
      nm()->method()->print_codes();
      nm()->print_code();
      nm()->print_pcs();
    }
#endif
    // Provide a cheap fallback in product mode.  (See comment above.)
    found_bad_method_frame();
    fill_from_compiled_native_frame();
    return;
  }

  // Decode first part of scopeDesc
  DebugInfoReadStream buffer(nm(), decode_offset);
  _sender_decode_offset = buffer.read_int();
  _method               = methodOop(buffer.read_oop());
  _bci                  = buffer.read_bci();

  assert(_method->is_method(), "checking type of decoded method");
}
Ejemplo n.º 9
0
inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
  _mode = compiled_mode;

  // Range check to detect ridiculous offsets.
  if (decode_offset == DebugInformationRecorder::serialized_null ||
      decode_offset < 0 ||
      decode_offset >= nm()->scopes_data_size()) {
    // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
    // If we read nmethod::scopes_data at serialized_null (== 0)
    // or if read some at other invalid offset, invalid values will be decoded.
    // Based on these values, invalid heap locations could be referenced
    // that could lead to crashes in product mode.
    // Therefore, do not use the decode offset if invalid, but fill the frame
    // as it were a native compiled frame (no Java-level assumptions).
#ifdef ASSERT
    if (WizardMode) {
      tty->print_cr("Error in fill_from_frame: pc_desc for "
                    INTPTR_FORMAT " not found or invalid at %d",
                    p2i(_frame.pc()), decode_offset);
      nm()->print();
      nm()->method()->print_codes();
      nm()->print_code();
      nm()->print_pcs();
    }
#endif
    // Provide a cheap fallback in product mode.  (See comment above.)
    found_bad_method_frame();
    fill_from_compiled_native_frame();
    return;
  }

  // Decode first part of scopeDesc
  DebugInfoReadStream buffer(nm(), decode_offset);
  _sender_decode_offset = buffer.read_int();
  _method               = buffer.read_method();
  _bci                  = buffer.read_bci();

  assert(_method->is_method(), "checking type of decoded method");
}
// --- oops_arguments_do -----------------------------------------------------
// oops-do just for arguments when args are passed but the caller is
// not yet claiming responsibility for them (e.g., mid-call resolution).
void CodeBlob::oops_arguments_do(frame fr, OopClosure* f) const {

  // Native calls have all their args handlized.
  if( _type == native ) return;

  // C1 and C2 can call directly into the C++ runtime without a stub but these
  // calls all do not need their arguments GC'd (i.e., by the time a GC point
  // is reached all arguments get handlized).
  if( _type == c1 || _type == c2 ) return;

  guarantee( _type == runtime_stubs, "we only get here for runtime stubs" );

  StubCodeDesc *stub = StubCodeDesc::desc_for(fr.pc());
  Runtime1::StubID id = Runtime1::contains(fr.pc());
  // In a resolve_and_patch_call stub, we have to parse the outgoing signature
  // to find any argument oops.  The call site itself doesn't cover the
  // outgoing oops, and there is no target method yet.
  if( fr.pc() == CodeCache::_caller_must_gc_args_0 ||
      fr.pc() == CodeCache::_caller_must_gc_args_1 ) {
    assert0( (stub && !strcmp(stub->name(), "resolve_and_patch_handler")) ||
             id == Runtime1::frequency_counter_overflow_wrapper_id);
    oops_arguments_do_impl(fr,f);
    return;
  }

  // Assert sanity for stub-generator stubs
  if( stub ) {
    if( !strcmp(stub->name(), "sba_escape_handler") ) return; // no unhandled args from a failed STA/SVB
    if( !strcmp(stub->name(), "new_handler") ) return; // no unhandled args from a new allocation request
    // If we are at an inline-cache we need to construct an official j.l.NPE
    // object.  However, no oops are in callee-save registers and the caller's
    // args are dead because the call isn't happening.  (If we are not at an
    // inline-cache then we just reset the PC to the proper handler and do not
    // do any object construction).
    if( !strcmp(stub->name(), "handler_for_null_ptr_exception") ) return;
    // If we are throwing, then the callers args are long long gone.
    if( !strcmp(stub->name(), "forward_exception") ) return;
    // For safepoints, we generously allow any register to hold oops.
    if( !strcmp(stub->name(), "safepoint_handler") ) return;
    // The args for a blocking lock will be handlerized in the VM
    if( !strcmp(stub->name(), "blocking_lock_stub") ) return;
    // The args for registering a finalized object will be handlerized in the VM
    if( !strcmp(stub->name(), "register_finalizer") ) return;
    // The args for a blocking lock will be handlerized in the VM
    // There are no args saved in registers across an uncommon trap
    if( !strcmp(stub->name(), "deoptimize") ) return;
    if( !strcmp(stub->name(), "uncommon_trap") ) return;
    ShouldNotReachHere();
  }

  // Not a StubGenerator stub; check for sane C1 stubs
  if (id != -1) {
    switch(id) {
    case Runtime1::access_field_patching_id:        return;
    case Runtime1::load_klass_patching_id:          return;
    case Runtime1::monitorenter_id:                 return;
    case Runtime1::new_array_id:                    return;
    case Runtime1::new_instance_id:                 return;
    case Runtime1::new_multi_array_id:              return;
    case Runtime1::register_finalizer_id:           return;
    case Runtime1::throw_array_store_exception_id:  return;
    case Runtime1::throw_class_cast_exception_id:   return;
    case Runtime1::throw_div0_exception_id:         return;
    case Runtime1::throw_index_exception_id:        return;
    case Runtime1::throw_null_pointer_exception_id: return;
    case Runtime1::throw_range_check_failed_id:     return;
    default: tty->print_cr("Unimplemented Runtime1 stub ID %s (%d)", Runtime1::name_for(id), id); Unimplemented();
    }
  }

  // Probably most other stubs are simple returns, but a few need special handling.
  ShouldNotReachHere();
}
Ejemplo n.º 11
0
inline bool vframeStreamCommon::fill_from_frame() {
  // Interpreted frame
  if (_frame.is_interpreted_frame()) {
    fill_from_interpreter_frame();
    return true;
  }

  // Compiled frame

  if (cb() != NULL && cb()->is_nmethod()) {
    if (nm()->is_native_method()) {
      // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
      fill_from_compiled_native_frame();
    } else {
      PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
      int decode_offset;
      if (pc_desc == NULL) {
        // Should not happen, but let fill_from_compiled_frame handle it.

        // If we are trying to walk the stack of a thread that is not
        // at a safepoint (like AsyncGetCallTrace would do) then this is an
        // acceptable result. [ This is assuming that safe_for_sender
        // is so bullet proof that we can trust the frames it produced. ]
        //
        // So if we see that the thread is not safepoint safe
        // then simply produce the method and a bci of zero
        // and skip the possibility of decoding any inlining that
        // may be present. That is far better than simply stopping (or
        // asserting. If however the thread is safepoint safe this
        // is the sign of a compiler bug  and we'll let
        // fill_from_compiled_frame handle it.


        JavaThreadState state = _thread->thread_state();

        // in_Java should be good enough to test safepoint safety
        // if state were say in_Java_trans then we'd expect that
        // the pc would have already been slightly adjusted to
        // one that would produce a pcDesc since the trans state
        // would be one that might in fact anticipate a safepoint

        if (state == _thread_in_Java ) {
          // This will get a method a zero bci and no inlining.
          // Might be nice to have a unique bci to signify this
          // particular case but for now zero will do.

          fill_from_compiled_native_frame();

          // There is something to be said for setting the mode to
          // at_end_mode to prevent trying to walk further up the
          // stack. There is evidence that if we walk any further
          // that we could produce a bad stack chain. However until
          // we see evidence that allowing this causes us to find
          // frames bad enough to cause segv's or assertion failures
          // we don't do it as while we may get a bad call chain the
          // probability is much higher (several magnitudes) that we
          // get good data.

          return true;
        }
        decode_offset = DebugInformationRecorder::serialized_null;
      } else {
        decode_offset = pc_desc->scope_decode_offset();
      }
      fill_from_compiled_frame(decode_offset);
    }
    return true;
  }

  // End of stack?
  if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
    _mode = at_end_mode;
    return true;
  }

  return false;
}
Ejemplo n.º 12
0
 address frame_pc() const { return _frame.pc(); }
inline bool frame::equal(frame other) const {
  return sp() == other.sp()
      && fp() == other.fp()
      && pc() == other.pc();
}
// --- build_repack_buffer ---------------------------------------------------
// Build a IFrame structure to help ASM code repack the 1 compiled frame into
// many interpreter (or C1) frames.  Takes in the current thread and a vframe;
// the vframe is pointing and the virtual Java frame needing to be repacked.
// It takes in the callee (which this frame is busy trying to call in it's
// inlined code), and an array of IFrames.  It returns the updated IFrame
// buffer filled in for this frame.
void Deoptimization::build_repack_buffer( JavaThread *thread, frame fr, IFrame *buf, const DebugMap *dm, const DebugScope *ds, intptr_t *jexstk, objectRef *lckstk, bool is_deopt, bool is_c1, bool is_youngest) {
  assert( thread->_deopt_buffer->contains((char*)(buf+1)), "over-ran large deopt buffer?" );

int bci=ds->bci();
if(bci==InvocationEntryBci){
    // We deoptimized while hanging in prologue code for a synchronized
    // method.  We got the lock (after all, deopt happens after returning
    // from the blocking call).  We want to begin execution in the
    // interpreter at BCI 0, and after taking the lock.
    // Also it is possilble to enter the deopt code through the br_s on method
    // entry before the first byte code.
    bci = 0;
  }

  const methodOop moop = ds->method().as_methodOop();
  if( ds->caller() ) {          // Do I have a caller?  Am I mid-call?
    // Initialize the constant pool entry for caller-parameter size.  It
    // might be the case that we inlined and compiled a callee, and are busy
    // calling it in the compiled code, and get deoptimized with that callee
    // in-progress AND we've never executed it in the interpreter - which
    // would have filled in the constant pool cache before making the call.
    // Fill it in now.
    const methodOop caller = ds->caller()->method().as_methodOop();
    int index = Bytes::get_native_u2(caller->bcp_from(ds->caller()->bci())+1);
    ConstantPoolCacheEntry *cpe = caller->constants()->cache()->entry_at(index);
    // Since we are setting the constant pool entry here, and another thread
    // could be busy resolving here we have a race condition setting the
    // flags.  Use a CAS to only set the flags if they are currently 0.
    intx *flags_adr = (intx*)((intptr_t)cpe + in_bytes(ConstantPoolCacheEntry::flags_offset()));
    if( !*flags_adr ) {         // Flags currently 0?
      // Set the flags, because the interpreter-return-entry points need some
      // info from them.  Not all fields are set, because it's too complex to
      // do it here... and not needed.  The cpCacheEntry is left "unresolved"
      // such that the next real use of it from the interpreter will be forced
      // to do a proper resolve, which will fill in the missing fields.

      // Compute new flags needed by the interpreter-return-entry
      intx flags = 
        (moop->size_of_parameters() & 0xFF) | 
        (1 << ConstantPoolCacheEntry::hotSwapBit) |
        (moop->result_type() << ConstantPoolCacheEntry::tosBits);
      // CAS 'em in, but only if there is currently a 0 flags
      assert0( sizeof(jlong)==sizeof(intx) );
      Atomic::cmpxchg((jlong)flags, (jlong*)flags_adr, 0);
      // We don't care about the result, because the cache is monomorphic.
      // Either our CAS succeeded and jammed    the right parameter count, or
      // another thread succeeded and jammed in the right parameter count.
    } 
  }

  if (TraceDeoptimization) {
    BufferedLoggerMark m(NOTAG, Log::M_DEOPT, TraceDeoptimization, true);
    m.out("DEOPT REPACK c%d: ", is_c1 ? 1 : 2);
    moop->print_short_name(m.stream());
    m.out(" @ bci %d %s", bci, ds->caller() ? "called by...": "   (oldest frame)" );
  }

  // If there was a suitable C1 frame, use it.
  // Otherwise, use an interpreter frame.
  if( 1 ) {
    // Build an interpreter-style IFrame.  Naked oops abound.
    assert0( !objectRef(moop).is_stack() );
    buf->_mref = objectRef(moop);
    buf->_cpc = moop->constants()->cacheRef();

    // Compute monitor list length.  If we have coarsened a lock we will end
    // up unlocking it and the repack buffer will not need to see it.
    uint mons_len = ds->numlocks();
    if( ds->is_extra_lock() ) { mons_len--; assert0( mons_len >= 0 ); }
    assert0( mons_len < (256*sizeof(buf->_numlck)) );
    buf->_numlck = mons_len;
    
    // Set up the return pc for the next frame: the next frame is a younger
    // frame which will return to this older frame.  All middle frames return
    // back into the interpreter, just after a call with proper TOS state.
    // Youngest frames always start in vtos state because the uncommon-trap
    // blob sets them up that way.
    const address bcp = moop->bcp_from(bci);
    Bytecodes::Code c = Bytecodes::java_code(Bytecodes::cast(*bcp));
BasicType return_type=T_VOID;

    bool handle_popframe = is_youngest && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution();

    int bci_bump = 0;
    if( !is_youngest ) {        // Middle-frame?
      bool from_call = (c == Bytecodes::_invokevirtual ||
c==Bytecodes::_invokespecial||
c==Bytecodes::_invokestatic||
                        c == Bytecodes::_invokeinterface );
assert(from_call,"Middle frame is in the middle of a call");
      bci_bump = Bytecodes::length_at(bcp); // But need to know how much it will be bumped for the return address
      buf->_bci = bci;          // Save bci without bumping it; normal interpreter call returns bump the bci as needed
      buf[-1]._retadr = Interpreter::return_entry(vtos, bci_bump);

    } else if( thread->pending_exception() ) { 
      // Deopt-with-pending.  Throw up on return to interpreter, which is
      // handled by unpack_and_go.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();

    } else if( !is_deopt ) {    // It is a C2-style uncommon-trap.
      // Do NOT increment the BCP!  We are re-executing the current bytecode.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();
      
    } else {                    // It is a plain deopt
      // It is a deopt without exception.  See if we are C1 in mid-patch.
      // If so, we always need to re-execute the bytecode.
      bool is_C1_mid_patch = false;
      if( is_c1 ) {             // C1 codeblob?
address caller_pc=fr.pc();
if(NativeCall::is_call_before(caller_pc)){
          address target = nativeCall_at(caller_pc)->destination();
          is_C1_mid_patch = target == Runtime1::entry_for(Runtime1::load_klass_patching_id);
        }
      }
      if( is_C1_mid_patch ) {
        Untested("");
        // Do NOT increment the BCP!  We are re-executing the current bytecode.
      } else if( ds->bci() == InvocationEntryBci ) {
        // It is deopt while hanging on a method-entry lock.
        // Do not advance BCP, as we have not executed bci 0 yet.
        
      } else {                  // Else C2 or C1-not-mid-patch
        // It is a deopt.  Whether we re-execute the current bytecode or
        // assume it has completed depends on the bytecode.
        switch( c ) {
case Bytecodes::_lookupswitch:
case Bytecodes::_tableswitch:
case Bytecodes::_fast_binaryswitch:
        case Bytecodes::_fast_linearswitch:
          // recompute condtional expression folded into _if<cond>
        case Bytecodes::_lcmp      :
        case Bytecodes::_fcmpl     :
        case Bytecodes::_fcmpg     :
        case Bytecodes::_dcmpl     :
        case Bytecodes::_dcmpg     :
        case Bytecodes::_ifnull    :
        case Bytecodes::_ifnonnull :
        case Bytecodes::_goto      :
        case Bytecodes::_goto_w    :
        case Bytecodes::_ifeq      :
        case Bytecodes::_ifne      :
        case Bytecodes::_iflt      :
        case Bytecodes::_ifge      :
        case Bytecodes::_ifgt      :
        case Bytecodes::_ifle      :
        case Bytecodes::_if_icmpeq :
        case Bytecodes::_if_icmpne :
        case Bytecodes::_if_icmplt :
        case Bytecodes::_if_icmpge :
        case Bytecodes::_if_icmpgt :
        case Bytecodes::_if_icmple :
        case Bytecodes::_if_acmpeq :
        case Bytecodes::_if_acmpne :
          // special cases
case Bytecodes::_aastore:
          // We are re-executing the current bytecode.
          Untested("");
          break;
          // special cases
case Bytecodes::_putstatic:
case Bytecodes::_getstatic:
case Bytecodes::_getfield:
case Bytecodes::_putfield:
          // We are re-executing the current bytecode.
          break;
        case Bytecodes::_athrow    :
          break;                // Must be deopt-w-exception
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
        }
case Bytecodes::_invokeinterface:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 5; // Increment the BCP to post-call!!!  See below!
          break;
        }
        case Bytecodes::_ldc   : 
          Untested("");
return_type=constant_pool_type(moop,*(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 2; // Increment the BCP to post-call!!!  See below!
          break;
          
        case Bytecodes::_ldc_w : // fall through
        case Bytecodes::_ldc2_w: 
return_type=constant_pool_type(moop,Bytes::get_Java_u2(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
          
        default:
return_type=Bytecodes::result_type(c);
          if( !ds->should_reexecute()) bci_bump = Bytecodes::length_at(bcp); // Increment the BCP to post-call!!!  See below!
          break;
        }
        if (ds->should_reexecute()) return_type = T_VOID;
      }
      // Save (possibly advanced) bci
      buf->_bci = bci+bci_bump;
      buf[-1]._retadr = Interpreter::unpack_and_go(); // Interpreter::return_entry(vtos, bci_bump);
    }

    // ---
    // Now all the Java locals.
    // First set the start of locals for the interpreter frame we are building.
    buf->_loc = (intptr_t)jexstk;

    uint loc_len = moop->max_locals();
for(uint i=0;i<loc_len;i++){
      *jexstk++ = dm->get_value(ds->get_local(i),fr);
    }

    // Now that the locals have been unpacked if we have any deferred local writes
    // added by jvmti then we can free up that structure as the data is now in the
    // buffer
    GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
    if( list ) {
      // Because of inlining we could have multiple vframes for a single frame
      // and several of the vframes could have deferred writes. Find them all.
      Unimplemented();
    }

    // ---
    // Now all the Java Expressions
    uint expr_len = ds->numstk();
for(uint i=0;i<expr_len;i++)
      *jexstk++ = dm->get_value(ds->get_expr(i),fr);

    // If returning from a deoptimized call, we will have return values in
    // registers that need to end up on the Java execution stack.  They are
    // not recorded in the debug info, since they did not exist at the time
    // the call began.
    if( is_youngest && is_deopt ) { 
      if( type2size[return_type] > 0 ) {
        if( type2size[return_type]==2 ) {
          *jexstk++ = (intptr_t)frame::double_slot_primitive_type_empty_slot_id << 32;
        }
        *jexstk++ = pd_fetch_return_values( thread, return_type );
        // Need to adjust the final jexstk_top for the youngest frame
        // returning values.  These returned values are not accounted for in
        // the standard debug info.
        thread->_jexstk_top = jexstk;
      }
    }

    // JVMTI PopFrame support
    // Add the number of words of popframe preserved args to expr_len
    int popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
    int popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
    if (handle_popframe) {
      Unimplemented();
      expr_len += popframe_preserved_args_size_in_words;
      // An interpreted frame was popped but it returns to a deoptimized
      // frame. The incoming arguments to the interpreted activation
      // were preserved in thread-local storage by the
      // remove_activation_preserving_args_entry in the interpreter; now
      // we put them back into the just-unpacked interpreter frame.
      // Note that this assumes that the locals arena grows toward lower
      // addresses.
    }

    // Set the JEX stk top
    buf->_stk = (intptr_t)jexstk;

    // --- 
    // Now move locked objects to the interpreters lock-stack.
    // No need to inflate anything, as we're moving standard oops.
    int numlcks = ds->numlocks();
    if( ds->is_extra_lock() ) { // coarsened a lock
      Untested("");
      // The last lock is "coarsened" - kept locked when it should have been
      // unlocked and relocked.  With no deopt, keeping it locked saves the 2
      // sets of back-to-back CAS's and fences.  However, here we need to
      // unlock it to match the proper Java state.
      ObjectSynchronizer::unlock(ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(numlcks-1),fr)).as_oop());
      numlcks--;
    }
for(int i=0;i<numlcks;i++){
      *lckstk++ = ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(i),fr));
    }

  } else {                    // Make a C1 frame
    
    Unimplemented();
    
  }
}