// If the bci matches, adjust the delta in the change jump request. bool adjust(int jump_bci, int delta) { if (bci() == jump_bci) { if (_delta > 0) _delta += delta; else _delta -= delta; return true; } return false; }
void StackFrameInfo::print_on(outputStream* st) const { ResourceMark rm; java_lang_Throwable::print_stack_element(st, method(), bci()); int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0); for (int i = 0; i < len; i++) { oop o = _locked_monitors->at(i); st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name()); } }
void MultiBranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { assert(stream->bci() == bci(), "wrong pos"); int target; int my_di; int target_di; int offset; if (stream->code() == Bytecodes::_tableswitch) { Bytecode_tableswitch sw(stream->method()(), stream->bcp()); int len = sw.length(); assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); for (int count = 0; count < len; count++) { target = sw.dest_offset_at(count) + bci(); my_di = mdo->dp_to_di(dp()); target_di = mdo->bci_to_di(target); offset = target_di - my_di; set_displacement_at(count, offset); } target = sw.default_offset() + bci(); my_di = mdo->dp_to_di(dp()); target_di = mdo->bci_to_di(target); offset = target_di - my_di; set_default_displacement(offset); } else { Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); int npairs = sw.number_of_pairs(); assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); for (int count = 0; count < npairs; count++) { LookupswitchPair pair = sw.pair_at(count); target = pair.offset() + bci(); my_di = mdo->dp_to_di(dp()); target_di = mdo->bci_to_di(target); offset = target_di - my_di; set_displacement_at(count, offset); } target = sw.default_offset() + bci(); my_di = mdo->dp_to_di(dp()); target_di = mdo->bci_to_di(target); offset = target_di - my_di; set_default_displacement(offset); } }
float Foo () { const int foo = bci (0.0f); int bar = foo; const int baz = foo & 1; if (!baz && (foo & 2)) bar = 0; return bcf (bar); }
void ProfileData::print_shared(outputStream* st, const char* name) { st->print("bci: %d", bci()); st->fill_to(tab_width_one); st->print("%s", name); tab(st); int trap = trap_state(); if (trap != 0) { char buf[100]; st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); } }
//--------------------------profile_not_taken_branch--------------------------- void Parse::profile_not_taken_branch(bool force_update) { if (method_data_update() || force_update) { ciMethodData* md = method()->method_data(); assert(md != NULL, "expected valid ciMethodData"); ciProfileData* data = md->bci_to_data(bci()); assert(data->is_BranchData(), "need BranchData for not taken branch"); increment_md_counter_at(md, data, BranchData::not_taken_offset()); } }
//--------------------------profile_null_checkcast---------------------------- void Parse::profile_null_checkcast() { // Set the null-seen flag, done in conjunction with the usual null check. We // never unset the flag, so this is a one-way switch. if (!method_data_update()) return; ciMethodData* md = method()->method_data(); assert(md != NULL, "expected valid ciMethodData"); ciProfileData* data = md->bci_to_data(bci()); assert(data->is_BitData(), "need BitData for checkcast"); set_md_flag_at(md, data, BitData::null_seen_byte_constant()); }
StackValueCollection* interpretedVFrame::locals() const { int length = method()->max_locals(); if (method()->is_native()) { // If the method is native, max_locals is not telling the truth. // maxlocals then equals the size of parameters length = method()->size_of_parameters(); } StackValueCollection* result = new StackValueCollection(length); // Get oopmap describing oops and int for current bci InterpreterOopMap oop_mask; if (TraceDeoptimization && Verbose) { methodHandle m_h(thread(), method()); OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask); } else { method()->mask_for(bci(), &oop_mask); } // handle locals for(int i=0; i < length; i++) { // Find stack location intptr_t *addr = locals_addr_at(i); // Depending on oop/int put it in the right package StackValue *sv; if (oop_mask.is_oop(i)) { // oop value Handle h(*(oop *)addr); sv = new StackValue(h); } else { // integer sv = new StackValue(*addr); } assert(sv != NULL, "sanity check"); result->add(sv); } return result; }
void ScopeDesc::print_on(outputStream* st) const { // header st->print("ScopeDesc[%d]@0x%lx ", _decode_offset, _code->instructions_begin()); print_value_on(st); // decode offsets if (WizardMode) { st->print_cr("offset: %d", _decode_offset); st->print_cr("bci: %d", bci()); st->print_cr("locals: %d", _locals_decode_offset); st->print_cr("stack: %d", _expressions_decode_offset); st->print_cr("monitor: %d", _monitors_decode_offset); st->print_cr("sender: %d", _sender_decode_offset); } // locals { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->locals(); if (l != NULL) { tty->print_cr("Locals"); for (int index = 0; index < l->length(); index++) { st->print(" - l%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // expressions { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->expressions(); if (l != NULL) { st->print_cr("Expression stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // monitors { GrowableArray<MonitorValue*>* l = ((ScopeDesc*) this)->monitors(); if (l != NULL) { st->print_cr("Monitor stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } if (!is_top()) { st->print_cr("Sender:"); sender()->print_on(st); } }
/* * Worker routine for fetching references and/or values * for a particular bci in the interpretedVFrame. * * Returns data for either "locals" or "expressions", * using bci relative oop_map (oop_mask) information. * * @param expressions bool switch controlling what data to return (false == locals / true == expressions) * */ StackValueCollection* interpretedVFrame::stack_data(bool expressions) const { InterpreterOopMap oop_mask; // oopmap for current bci if (TraceDeoptimization && Verbose) { methodHandle m_h(Thread::current(), method()); OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask); } else { method()->mask_for(bci(), &oop_mask); } const int mask_len = oop_mask.number_of_entries(); // If the method is native, method()->max_locals() is not telling the truth. // For our purposes, max locals instead equals the size of parameters. const int max_locals = method()->is_native() ? method()->size_of_parameters() : method()->max_locals(); assert(mask_len >= max_locals, "invariant"); const int length = expressions ? mask_len - max_locals : max_locals; assert(length >= 0, "invariant"); StackValueCollection* const result = new StackValueCollection(length); if (0 == length) { return result; } if (expressions) { stack_expressions(result, length, max_locals, oop_mask, fr()); } else { stack_locals(result, length, oop_mask, fr()); } assert(length == result->size(), "invariant"); return result; }
StackValueCollection* interpretedVFrame::expressions() const { int length = fr().interpreter_frame_expression_stack_size(); if (method()->is_native()) { // If the method is native, there is no expression stack length = 0; } int nof_locals = method()->max_locals(); StackValueCollection* result = new StackValueCollection(length); InterpreterOopMap oop_mask; // Get oopmap describing oops and int for current bci if (TraceDeoptimization && Verbose) { methodHandle m_h(method()); OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask); } else { method()->mask_for(bci(), &oop_mask); } // handle expressions for(int i=0; i < length; i++) { // Find stack location intptr_t *addr = fr().interpreter_frame_expression_stack_at(i); // Depending on oop/int put it in the right package StackValue *sv; if (oop_mask.is_oop(i + nof_locals)) { // oop value Handle h(*(oop *)addr); sv = new StackValue(h); } else { // integer sv = new StackValue(*addr); } assert(sv != NULL, "sanity check"); result->add(sv); } return result; }
void InterpreterOopMap::print() { int n = number_of_entries(); tty->print("oop map for "); method()->print_value(); tty->print(" @ %d = [%d] { ", bci(), n); for (int i = 0; i < n; i++) { #ifdef ENABLE_ZAP_DEAD_LOCALS if (is_dead(i)) tty->print("%d+ ", i); else #endif if (is_oop(i)) tty->print("%d ", i); } tty->print_cr("}"); }
//------------------------------make_jvmpi_method_entry------------------------ // JVMPI -- record entry to a method if compiled while JVMPI is turned on void GraphKit::make_jvmpi_method_entry() { const TypeFunc *call_type = OptoRuntime::jvmpi_method_entry_Type(); address call_address = OptoRuntime::jvmpi_method_entry_Java(); const char *call_name = OptoRuntime::stub_name( call_address ); assert(bci() == InvocationEntryBci, "must be outside all blocks"); const TypeInstPtr *method_type = TypeInstPtr::make(TypePtr::Constant, method()->klass(), true, method(), 0); Node *methodOop_node = _gvn.transform( new ConPNode(method_type) ); Node *receiver_node = (method() && !method()->is_static()) // IF (virtual call) ? map()->in(TypeFunc::Parms) // THEN 'this' pointer, receiver, : null(); // ELSE NULL kill_dead_locals(); make_slow_call( call_type, call_address, NULL, control(), methodOop_node, receiver_node ); }
void JumpData::post_initialize(BytecodeStream* stream, methodDataOop mdo) { assert(stream->bci() == bci(), "wrong pos"); int target; Bytecodes::Code c = stream->code(); if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { target = stream->dest_w(); } else { target = stream->dest(); } int my_di = mdo->dp_to_di(dp()); int target_di = mdo->bci_to_di(target); int offset = target_di - my_di; set_displacement(offset); }
//-----------------------------profile_switch_case----------------------------- void Parse::profile_switch_case(int table_index) { if (!method_data_update()) return; ciMethodData* md = method()->method_data(); assert(md != NULL, "expected valid ciMethodData"); ciProfileData* data = md->bci_to_data(bci()); assert(data->is_MultiBranchData(), "need MultiBranchData for switch case"); if (table_index >= 0) { increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); } else { increment_md_counter_at(md, data, MultiBranchData::default_count_offset()); } }
bool AbstractByteCode::GenSimpleBranchByteCode( fint offset, fint length, oop label, ByteCodeKind op ) { int32 literalIndex = labelSet->RecordLabelInfo( this, label, stack_depth, simpleBranch, bci() ); if ( literalIndex == -1 ) return false; branchSet->RecordBranch( false, literalIndex, label ); GenCode(offset, length, BuildCode(op, GenIndex( offset, length, literalIndex))); return true; }
void BytecodePrintClosure::bytecode_prolog(JVM_SINGLE_ARG_TRAPS) { if (_verbose) { StackmapList stack_maps = method()->stackmaps(); if (stack_maps.not_null()) { int count = stack_maps.entry_count(); for (int i=0; i<count; i++) { if (stack_maps.get_bci(i) == bci()) { bool redundant = StackmapChecker::is_redundant(method(), i JVM_CHECK); _st->print_cr(redundant ? "**REDUNDANT**" : "**NECESSARY**"); _st->print(" "); stack_maps.print_entry_on(_st, i, false); _st->cr(); break; } } } } _st->print("%3d: ", bci()); const Bytecodes::Code code (current_bytecode()); _st->print(_verbose ? "[ %-25s ] " : "%s ", Bytecodes::name(code)); }
void interpretedVFrame::set_locals(StackValueCollection* values) const { if (values == NULL || values->size() == 0) return; int length = method()->max_locals(); if (method()->is_native()) { // If the method is native, max_locals is not telling the truth. // maxlocals then equals the size of parameters length = method()->size_of_parameters(); } assert(length == values->size(), "Mismatch between actual stack format and supplied data"); // Get oopmap describing oops and int for current bci InterpreterOopMap oop_mask; if (TraceDeoptimization && Verbose) { methodHandle m_h(thread(), method()); OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask); } else { method()->mask_for(bci(), &oop_mask); } // handle locals for (int i = 0; i < length; i++) { // Find stack location intptr_t *addr = locals_addr_at(i); // Depending on oop/int put it in the right package StackValue *sv = values->at(i); assert(sv != NULL, "sanity check"); if (oop_mask.is_oop(i)) { // oop value *(oop *) addr = (sv->get_obj())(); } else { // integer *addr = sv->get_int(); } } }
bool AbstractByteCode::GenBranchIndexedByteCode( fint offset, fint length, objVectorOop labels) { --stack_depth; assert(stack_depth >= 0, "negative stack?"); int32 literalIndex = labelSet->RecordLabelVectorInfo( this, labels, stack_depth, bci() ); if ( literalIndex == -1 ) return false; branchSet->RecordBranch( true, literalIndex, literals->obj_at(literalIndex) ); GenCode(offset, length, BuildCode(BRANCH_INDEXED_CODE, GenIndex( offset, length, literalIndex))); return true; }
void PrimNode::gen() { BasicNode::gen(); assert(bci() != IllegalBCI, "should have legal bci"); if (pd->canWalkStack()) genPcDesc(); theAssembler->CallP(first_inst_addr(pd->fn())); fint skip = pd->canScavenge() ? oopSize : 0; // reg. mask if (pd->needsNLRCode()) skip += sendDesc::abortable_prim_end_offset - sendDesc::nonabortable_prim_end_offset; if (skip) { // skip register mask / NLR code upon return theAssembler->AddI(CalleeReturnAddr, skip, CalleeReturnAddr); theAssembler->Data(mask()); if (pd->needsNLRCode()) nlrCode(); } else { theAssembler->Nop(); } }
void vframe::update_local(JavaThread* thread, BasicType type, int index, jvalue value) { frame fr = this->get_frame(); // AZUL - We use extra slots to accomodate tags for longs and doubles // in the compiler as well. if(type==T_LONG||type==T_DOUBLE){ index=index+1; } #ifdef ASSERT Unimplemented(); //CodeBlob* b = CodeCache::find_blob(fr.pc()); //assert(b->is_patched_for_deopt(), "frame must be scheduled for deoptimization"); #endif /* ASSERT */ GrowableArray<jvmtiDeferredLocalVariableSet*>*deferred=thread->deferred_locals(); if (deferred != NULL ) { // See if this vframe has already had locals with deferred writes int f; for ( f = 0 ; f < deferred->length() ; f++ ) { if (deferred->at(f)->matches(this)) { // Matching, vframe now see if the local already had deferred write GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals(); int l; for (l = 0 ; l < locals->length() ; l++ ) { if (locals->at(l)->index() == index) { locals->at(l)->set_value(value); return; } } // No matching local already present. Push a new value onto the deferred collection locals->push(new jvmtiDeferredLocalVariable(index, type, value)); return; } } // No matching vframe must push a new vframe } else { // No deferred updates pending for this thread. // allocate in C heap deferred = new(ResourceObj::C_HEAP) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true); thread->set_deferred_locals(deferred); } // Because the frame is patched for deopt and we will push in // registers in uncommon_trap, we will use the sender's sp to compare deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr.pd_sender().sp())); assert(deferred->top()->id() == fr.pd_sender().sp(), "Huh? Must match"); deferred->top()->set_local_at(index, type, value); }
// This routine needs to atomically update the RetData structure, so the // caller needs to hold the RetData_lock before it gets here. Since taking // the lock can block (and allow GC) and since RetData is a ProfileData is a // wrapper around a derived oop, taking the lock in _this_ method will // basically cause the 'this' pointer's _data field to contain junk after the // lock. We require the caller to take the lock before making the ProfileData // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret address RetData::fixup_ret(int return_bci, methodDataHandle h_mdo) { // First find the mdp which corresponds to the return bci. address mdp = h_mdo->bci_to_dp(return_bci); // Now check to see if any of the cache slots are open. for (uint row = 0; row < row_limit(); row++) { if (bci(row) == no_bci) { set_bci_displacement(row, mdp - dp()); set_bci_count(row, DataLayout::counter_increment); // Barrier to ensure displacement is written before the bci; allows // the interpreter to read displacement without fear of race condition. release_set_bci(row, return_bci); break; } } return mdp; }
//----------------------------profile_taken_branch----------------------------- void Parse::profile_taken_branch(int target_bci, bool force_update) { // This is a potential osr_site if we have a backedge. int cur_bci = bci(); bool osr_site = (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement; // If we are going to OSR, restart at the target bytecode. set_bci(target_bci); // To do: factor out the the limit calculations below. These duplicate // the similar limit calculations in the interpreter. if (method_data_update() || force_update) { ciMethodData* md = method()->method_data(); assert(md != NULL, "expected valid ciMethodData"); ciProfileData* data = md->bci_to_data(cur_bci); assert(data->is_JumpData(), "need JumpData for taken branch"); increment_md_counter_at(md, data, JumpData::taken_offset()); } // In the new tiered system this is all we need to do. In the old // (c2 based) tiered sytem we must do the code below. #ifndef TIERED if (method_data_update()) { ciMethodData* md = method()->method_data(); if (osr_site) { ciProfileData* data = md->bci_to_data(cur_bci); int limit = (CompileThreshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100; test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); } } else { // With method data update off, use the invocation counter to trigger an // OSR compilation, as done in the interpreter. if (osr_site) { int limit = (CompileThreshold * OnStackReplacePercentage) / 100; increment_and_test_invocation_counter(limit); } } #endif // TIERED // Restore the original bytecode. set_bci(cur_bci); }
//------------------------------do_new----------------------------------------- void Parse::do_new() { kill_dead_locals(); bool will_link; ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); assert(will_link, "_new: typeflow responsibility"); // Should initialize, or throw an InstantiationError? if (!klass->is_initialized() && !klass->is_being_initialized() || klass->is_abstract() || klass->is_interface() || klass->name() == ciSymbol::java_lang_Class() || iter().is_unresolved_klass()) { uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_reinterpret, klass); return; } if (klass->is_being_initialized()) { emit_guard_for_new(klass); } Node* kls_node = makecon(TypeKlassPtr::make(klass)); Node* mth_node = makecon(TypeMetadataPtr::make(method())); Node* bci_node = intcon(bci()); Node* obj = new_instance(kls_node, mth_node, bci_node); // Push resultant oop onto stack push(obj); // Keep track of whether opportunities exist for StringBuilder // optimizations. if (OptimizeStringConcat && (klass == C->env()->StringBuilder_klass() || klass == C->env()->StringBuffer_klass())) { C->set_has_stringbuilder(true); } // Keep track of boxed values for EliminateAutoBox optimizations. if (C->eliminate_boxing() && klass->is_box_klass()) { C->set_has_boxed_value(true); } }
void check_static(int index) { AllocationDisabler no_allocation_should_happen; ConstantPool::Raw cp = method()->constants(); if (cp().tag_at(index).is_resolved_static_method()) { Method::Raw callee = cp().resolved_static_method_at(index); _owner->try_inline(method(), &callee, bci()); } else { // This could be an element we failed to resolve // when ROMizing an application. if (!PostponeErrorsUntilRuntime) { SHOULD_NOT_REACH_HERE(); } else { GUARANTEE(cp().tag_at(index).is_method(), "Sanity"); // The class must be marked as unverified or non-optimizable, // since it contains an unresolved entry at this point. #ifdef AZZERT InstanceClass::Raw klass = method()->holder(); GUARANTEE(!klass().is_verified() || !klass().is_optimizable(), "Sanity"); #endif } } }
void javaVFrame::print() { ResourceMark rm; vframe::print(); tty->print("\t"); method()->print_value(); tty->cr(); tty->print_cr("\tbci: %d", bci()); print_stack_values("locals", locals()); print_stack_values("expressions", expressions()); GrowableArray<MonitorInfo*>* list = monitors(); if (list->is_empty()) return; tty->print_cr("\tmonitor list:"); for (int index = (list->length()-1); index >= 0; index--) { MonitorInfo* monitor = list->at(index); tty->print("\t obj\t"); if (monitor->owner_is_scalar_replaced()) { Klass* k = java_lang_Class::as_Klass(monitor->owner_klass()); tty->print("( is scalar replaced %s)", k->external_name()); } else if (monitor->owner() == NULL) { tty->print("( null )"); } else { monitor->owner()->print_value(); tty->print("(owner=" INTPTR_FORMAT ")", (address)monitor->owner()); } if (monitor->eliminated()) { if(is_compiled_frame()) { tty->print(" ( lock is eliminated in compiled frame )"); } else { tty->print(" ( lock is eliminated, frame not compiled )"); } } tty->cr(); tty->print("\t "); monitor->lock()->print_on(tty); tty->cr(); } }
void compiledVFrame::update_local(BasicType type, int index, jvalue value) { #ifdef ASSERT assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization"); #endif /* ASSERT */ GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = thread()->deferred_locals(); if (deferred != NULL ) { // See if this vframe has already had locals with deferred writes int f; for ( f = 0 ; f < deferred->length() ; f++ ) { if (deferred->at(f)->matches(this)) { // Matching, vframe now see if the local already had deferred write GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals(); int l; for (l = 0 ; l < locals->length() ; l++ ) { if (locals->at(l)->index() == index) { locals->at(l)->set_value(value); return; } } // No matching local already present. Push a new value onto the deferred collection locals->push(new jvmtiDeferredLocalVariable(index, type, value)); return; } } // No matching vframe must push a new vframe } else { // No deferred updates pending for this thread. // allocate in C heap deferred = new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true); thread()->set_deferred_locals(deferred); } deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr().id())); assert(deferred->top()->id() == fr().id(), "Huh? Must match"); deferred->top()->set_local_at(index, type, value); }
void Canonicalizer::set_canonical(Value x) { assert(x != NULL, "value must exist"); // Note: we can not currently substitute root nodes which show up in // the instruction stream (because the instruction list is embedded // in the instructions). if (canonical() != x) { #ifndef PRODUCT if (!x->has_printable_bci()) { x->set_printable_bci(bci()); } #endif if (PrintCanonicalization) { PrintValueVisitor do_print_value; canonical()->input_values_do(&do_print_value); canonical()->print_line(); tty->print_cr("canonicalized to:"); x->input_values_do(&do_print_value); x->print_line(); tty->cr(); } assert(_canonical->type()->tag() == x->type()->tag(), "types must match"); _canonical = x; } }
void javaVFrame::print_value() const { methodOop m = method(); klassOop k = m->method_holder(); tty->print("%s.%s", Klass::cast(k)->internal_name(), m->name()->as_C_string()); if (!m->is_native()) { symbolOop source_name = instanceKlass::cast(k)->source_file_name(); int line_number = m->line_number_from_bci(bci()); if (source_name != NULL && (line_number != -1)) { tty->print("(%s:%d)", source_name->as_C_string(), line_number); } } else { tty->print("(Native Method)"); } // Check frame size and print warning if it looks suspiciously large if (fr().sp() != NULL) { int size = fr().frame_size(); #ifdef _LP64 if (size > 8*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size); #else if (size > 4*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size); #endif } }
void javaVFrame::print() { ResourceMark rm; vframe::print(); tty->print("\t"); method()->print_value(); tty->cr(); tty->print_cr("\tbci: %d", bci()); print_stack_values("locals", locals()); print_stack_values("expressions", expressions()); GrowableArray<MonitorInfo*>* list = monitors(); if (list->is_empty()) return; tty->print_cr("\tmonitor list:"); for (int index = (list->length()-1); index >= 0; index--) { MonitorInfo* monitor = list->at(index); tty->print("\t obj\t"); monitor->owner()->print_value(); tty->print("(" INTPTR_FORMAT ")", monitor->owner()); tty->cr(); tty->print("\t "); monitor->lock()->print_on(tty); tty->cr(); } }