// Iteration void next() { // handle frames with inlining if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return; // handle general case do { _frame = _frame.sender(&_reg_map); } while (!fill_from_frame()); }
// top-frame will be skipped vframeStream::vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub) : vframeStreamCommon(thread) { _stop_at_java_call_stub = stop_at_java_call_stub; // skip top frame, as it may not be at safepoint _frame = top_frame.sender(&_reg_map); while (!fill_from_frame()) { _frame = _frame.sender(&_reg_map); } }
void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) { // stack picture // unpack_frame // [new interpreter frames ] (frames are skeletal but walkable) // caller_frame // // This routine fills in the missing data for the skeletal interpreter frames // in the above picture. // Find the skeletal interpreter frames to unpack into JavaThread* THREAD = JavaThread::current(); RegisterMap map(THREAD, false); // Get the youngest frame we will unpack (last to be unpacked) frame me = unpack_frame.sender(&map); int index; for (index = 0; index < frames(); index++ ) { *element(index)->iframe() = me; // Get the caller frame (possibly skeletal) me = me.sender(&map); } // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee // Unpack the frames from the oldest (frames() -1) to the youngest (0) frame* caller_frame = &me; for (index = frames() - 1; index >= 0 ; index--) { vframeArrayElement* elem = element(index); // caller int callee_parameters, callee_locals; if (index == 0) { callee_parameters = callee_locals = 0; } else { methodHandle caller = elem->method(); methodHandle callee = element(index - 1)->method(); Bytecode_invoke inv(caller, elem->bci()); // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix. // NOTE: Use machinery here that avoids resolving of any kind. const bool has_member_arg = !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name()); callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0); callee_locals = callee->max_locals(); } elem->unpack_on_stack(caller_actual_parameters, callee_parameters, callee_locals, caller_frame, index == 0, index == frames() - 1, exec_mode); if (index == frames() - 1) { Deoptimization::unwind_callee_save_values(elem->iframe(), this); } caller_frame = elem->iframe(); caller_actual_parameters = callee_parameters; } deallocate_monitor_chunks(); }
// --- oops_arguments_do_impl ------------------------------------------------ void CodeBlob::oops_arguments_do_impl(frame fr, OopClosure* f) const { ResourceMark rm; // We are in some trampoline (resolve_and_patch_call) doing a GC. The // pending call has arguments that need GC'ing, but we do not yet know the // target method and cannot resolve the target method yet. frame cc = fr.sender(); // Compiled caller expected symbolOop meth_sig; bool is_static; if( cc.is_entry_frame() ) { // There's a rare race condition where the caller is an entry frame, but // the target got patched not-entrant before the call could be made. methodOop moop = cc.entry_frame_call_wrapper()->callee_method(); meth_sig = moop->signature(); is_static = moop->is_static(); } else { Bytecode_invoke*call; if(cc.is_interpreted_frame()){ // There's a rare race condition where we might need to GC in // resolve_and_patch_call but the caller is an interpreted frame. call = Bytecode_invoke_at(cc.interpreter_frame_method(), cc.interpreter_frame_bci()); } else { // Normal case: find caller's callsite CodeBlob*cb=CodeCache::find_blob(cc.pc()); const DebugScope *ds = cb->debuginfo(cc.pc()); call = Bytecode_invoke_at(ds->method(), ds->bci()); } meth_sig = call->signature(); is_static = (call->adjusted_invoke_code() == Bytecodes::_invokestatic); } int argcnt = 0; // Size of signature if( !is_static ) argcnt++; // Add receiver for(SignatureStream ss(meth_sig);!ss.at_return_type();ss.next()){ argcnt++; if (ss.type() == T_LONG || ss.type() == T_DOUBLE) argcnt++; } BasicType * sig_bt = NEW_RESOURCE_ARRAY(BasicType ,argcnt); VReg::VR * regs = NEW_RESOURCE_ARRAY(VReg::VR,argcnt); int i=0; if( !is_static ) sig_bt[i++] = T_OBJECT; for(SignatureStream ss(meth_sig);!ss.at_return_type();ss.next()){ sig_bt[i++] = ss.type(); // Collect remaining bits of signature if (ss.type() == T_LONG || ss.type() == T_DOUBLE) sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots } assert0(i==argcnt); // Now get the re-packed compiled-Java layout. Registers are numbered from // the callee's point of view. SharedRuntime::java_calling_convention(sig_bt,regs,argcnt,true); // Find the oop locations and do the GC thing for(int i=0;i<argcnt;i++){ if ((sig_bt[i] == T_OBJECT) || (sig_bt[i] == T_ARRAY)) { objectRef *loc = cc.reg_to_addr_oop(VReg::as_VOopReg(regs[i])); f->do_oop(loc); } } }
p->print(); tty->cr(); if (p->has_last_Java_frame()) { // If the last_Java_fp is set we are in C land and // can call the standard stack_trace function. #ifdef PRODUCT p->print_stack(); } else { tty->print_cr("Cannot find the last Java frame, printing stack disabled."); #else // !PRODUCT p->trace_stack(); } else { frame f = os::current_frame(); RegisterMap reg_map(p); f = f.sender(®_map); tty->print("(guessing starting frame id=%#p based on current fp)\n", f.id()); p->trace_stack_from(vframe::new_vframe(&f, ®_map, p)); pd_ps(f); #endif // PRODUCT } } extern "C" void pfl() { // print frame layout Command c("pfl"); JavaThread* p = JavaThread::active(); tty->print(" for thread: "); p->print(); tty->cr();