void ciMethodData::set_parameter_type(int i, ciKlass* k) { VM_ENTRY_MARK; MethodData* mdo = get_MethodData(); if (mdo != NULL) { mdo->parameters_type_data()->set_type(i, k->get_Klass()); } }
void ciMethodData::set_would_profile(bool p) { VM_ENTRY_MARK; MethodData* mdo = get_MethodData(); if (mdo != NULL) { mdo->set_would_profile(p); } }
void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) { if (TraceInvocationCounterOverflow) { MethodCounters* mcs = m->method_counters(); assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); InvocationCounter* ic = mcs->invocation_counter(); InvocationCounter* bc = mcs->backedge_counter(); ResourceMark rm; if (bci == InvocationEntryBci) { tty->print("comp-policy cntr ovfl @ %d in entry of ", bci); } else { tty->print("comp-policy cntr ovfl @ %d in loop of ", bci); } m->print_value(); tty->cr(); ic->print(); bc->print(); if (ProfileInterpreter) { if (bci != InvocationEntryBci) { MethodData* mdo = m->method_data(); if (mdo != NULL) { int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken(); tty->print_cr("back branch count = %d", count); } } } } }
void ciMethodData::set_compilation_stats(short loops, short blocks) { VM_ENTRY_MARK; MethodData* mdo = get_MethodData(); if (mdo != NULL) { mdo->set_num_loops(loops); mdo->set_num_blocks(blocks); } }
// Is method profiled enough? bool AdvancedThresholdPolicy::is_method_profiled(Method* method) { MethodData* mdo = method->method_data(); if (mdo != NULL) { int i = mdo->invocation_count_delta(); int b = mdo->backedge_count_delta(); return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method); } return false; }
void BytecodePrinter::bytecode_epilog(int bci, outputStream* st) { MethodData* mdo = method()->method_data(); if (mdo != NULL) { ProfileData* data = mdo->bci_to_data(bci); if (data != NULL) { st->print(" %d", mdo->dp_to_di(data->dp())); st->fill_to(6); data->print_data_on(st); } } }
// Determine is a method is mature. bool SimpleThresholdPolicy::is_mature(Method* method) { if (is_trivial(method)) return true; MethodData* mdo = method->method_data(); if (mdo != NULL) { int i = mdo->invocation_count(); int b = mdo->backedge_count(); double k = ProfileMaturityPercentage / 100.0; return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) || loop_predicate_helper<CompLevel_full_profile>(i, b, k, method); } return false; }
// Set carry flags on the counters if necessary void SimpleThresholdPolicy::handle_counter_overflow(Method* method) { MethodCounters *mcs = method->method_counters(); if (mcs != NULL) { set_carry_if_necessary(mcs->invocation_counter()); set_carry_if_necessary(mcs->backedge_counter()); } MethodData* mdo = method->method_data(); if (mdo != NULL) { set_carry_if_necessary(mdo->invocation_counter()); set_carry_if_necessary(mdo->backedge_counter()); } }
void ciMethodData::clear_escape_info() { VM_ENTRY_MARK; MethodData* mdo = get_MethodData(); if (mdo != NULL) { mdo->clear_escape_info(); ArgInfoData *aid = arg_info(); int arg_count = (aid == NULL) ? 0 : aid->number_of_args(); for (int i = 0; i < arg_count; i++) { set_arg_modified(i, 0); } } _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
void ciMethodData::set_return_type(int bci, ciKlass* k) { VM_ENTRY_MARK; MethodData* mdo = get_MethodData(); if (mdo != NULL) { ProfileData* data = mdo->bci_to_data(bci); if (data->is_CallTypeData()) { data->as_CallTypeData()->set_return_type(k->get_Klass()); } else { assert(data->is_VirtualCallTypeData(), "no arguments!"); data->as_VirtualCallTypeData()->set_return_type(k->get_Klass()); } } }
void SimpleThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) { if (PrintTieredEvents) { methodHandle mh(sd->method()); print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none); } MethodData* mdo = sd->method()->method_data(); if (mdo != NULL) { mdo->reset_start_counters(); } if (sd->is_top()) break; } }
bool NonTieredCompPolicy::is_mature(Method* method) { MethodData* mdo = method->method_data(); assert(mdo != NULL, "Should be"); uint current = mdo->mileage_of(method); uint initial = mdo->creation_mileage(); if (current < initial) return true; // some sort of overflow uint target; if (ProfileMaturityPercentage <= 0) target = (uint) -ProfileMaturityPercentage; // absolute value else target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 ); return (current >= initial + target); }
void ciMethodData::load_extra_data() { MethodData* mdo = get_MethodData(); MutexLocker(mdo->extra_data_lock()); // speculative trap entries also hold a pointer to a Method so need to be translated DataLayout* dp_src = mdo->extra_data_base(); DataLayout* end_src = mdo->args_data_limit(); DataLayout* dp_dst = extra_data_base(); for (;; dp_src = MethodData::next_extra(dp_src), dp_dst = MethodData::next_extra(dp_dst)) { assert(dp_src < end_src, "moved past end of extra data"); assert(((intptr_t)dp_dst) - ((intptr_t)extra_data_base()) == ((intptr_t)dp_src) - ((intptr_t)mdo->extra_data_base()), "source and destination don't match"); // New traps in the MDO may have been added since we copied the // data (concurrent deoptimizations before we acquired // extra_data_lock above) or can be removed (a safepoint may occur // in the translate_from call below) as we translate the copy: // update the copy as we go. int tag = dp_src->tag(); if (tag != DataLayout::arg_info_data_tag) { memcpy(dp_dst, dp_src, ((intptr_t)MethodData::next_extra(dp_src)) - ((intptr_t)dp_src)); } switch(tag) { case DataLayout::speculative_trap_data_tag: { ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst); SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src); data_dst->translate_from(data_src); #ifdef ASSERT SpeculativeTrapData* data_src2 = new SpeculativeTrapData(dp_src); assert(data_src2->method() == data_src->method() && data_src2->bci() == data_src->bci(), "entries changed while translating"); #endif break; } case DataLayout::bit_data_tag: break; case DataLayout::no_tag: case DataLayout::arg_info_data_tag: // An empty slot or ArgInfoData entry marks the end of the trap data return; default: fatal(err_msg("bad tag = %d", dp_dst->tag())); } } }
// Common transition function. Given a predicate determines if a method should transition to another level. CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level) { CompLevel next_level = cur_level; int i = method->invocation_count(); int b = method->backedge_count(); if (is_trivial(method) && cur_level != CompLevel_aot) { next_level = CompLevel_simple; } else { switch(cur_level) { case CompLevel_aot: { if ((this->*p)(i, b, cur_level, method)) { next_level = CompLevel_full_profile; } } break; case CompLevel_none: // If we were at full profile level, would we switch to full opt? if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) { next_level = CompLevel_full_optimization; } else if ((this->*p)(i, b, cur_level, method)) { next_level = CompLevel_full_profile; } break; case CompLevel_limited_profile: case CompLevel_full_profile: { MethodData* mdo = method->method_data(); if (mdo != NULL) { if (mdo->would_profile()) { int mdo_i = mdo->invocation_count_delta(); int mdo_b = mdo->backedge_count_delta(); if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { next_level = CompLevel_full_optimization; } } else { next_level = CompLevel_full_optimization; } } } break; } } return MIN2(next_level, (CompLevel)TieredStopAtLevel); }
// Determine if a method should be compiled with a normal entry point at a different level. CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) { CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true)); CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level); // If OSR method level is greater than the regular method level, the levels should be // equalized by raising the regular method level in order to avoid OSRs during each // invocation of the method. if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { MethodData* mdo = method->method_data(); guarantee(mdo != NULL, "MDO should not be NULL"); if (mdo->invocation_count() >= 1) { next_level = CompLevel_full_optimization; } } else { next_level = MAX2(osr_level, next_level); } return next_level; }
void SimpleThresholdPolicy::print_counters(const char* prefix, methodHandle mh) { int invocation_count = mh->invocation_count(); int backedge_count = mh->backedge_count(); MethodData* mdh = mh->method_data(); int mdo_invocations = 0, mdo_backedges = 0; int mdo_invocations_start = 0, mdo_backedges_start = 0; if (mdh != NULL) { mdo_invocations = mdh->invocation_count(); mdo_backedges = mdh->backedge_count(); mdo_invocations_start = mdh->invocation_count_start(); mdo_backedges_start = mdh->backedge_count_start(); } tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix, invocation_count, backedge_count, prefix, mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start); tty->print(" %smax levels=%d,%d", prefix, mh->highest_comp_level(), mh->highest_osr_comp_level()); }
// Determine if a method should be compiled with a normal entry point at a different level. CompLevel SimpleThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread* thread) { CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common(&SimpleThresholdPolicy::loop_predicate, method, cur_level)); CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level); // If OSR method level is greater than the regular method level, the levels should be // equalized by raising the regular method level in order to avoid OSRs during each // invocation of the method. if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { MethodData* mdo = method->method_data(); guarantee(mdo != NULL, "MDO should not be NULL"); if (mdo->invocation_count() >= 1) { next_level = CompLevel_full_optimization; } } else { next_level = MAX2(osr_level, next_level); } #if INCLUDE_JVMCI if (UseJVMCICompiler) { next_level = JVMCIRuntime::adjust_comp_level(method, false, next_level, thread); } #endif return next_level; }
// copy our escape info to the MethodData* if it exists void ciMethodData::update_escape_info() { VM_ENTRY_MARK; MethodData* mdo = get_MethodData(); if ( mdo != NULL) { mdo->set_eflags(_eflags); mdo->set_arg_local(_arg_local); mdo->set_arg_stack(_arg_stack); mdo->set_arg_returned(_arg_returned); int arg_count = mdo->method()->size_of_parameters(); for (int i = 0; i < arg_count; i++) { mdo->set_arg_modified(i, arg_modified(i)); } } }
// Common transition function. Given a predicate determines if a method should transition to another level. CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { CompLevel next_level = cur_level; int i = method->invocation_count(); int b = method->backedge_count(); if (is_trivial(method)) { next_level = CompLevel_simple; } else { switch(cur_level) { case CompLevel_none: // If we were at full profile level, would we switch to full opt? if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { next_level = CompLevel_full_optimization; } else if ((this->*p)(i, b, cur_level, method)) { #if INCLUDE_JVMCI if (UseJVMCICompiler) { // Since JVMCI takes a while to warm up, its queue inevitably backs up during // early VM execution. next_level = CompLevel_full_profile; break; } #endif // C1-generated fully profiled code is about 30% slower than the limited profile // code that has only invocation and backedge counters. The observation is that // if C2 queue is large enough we can spend too much time in the fully profiled code // while waiting for C2 to pick the method from the queue. To alleviate this problem // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long // we choose to compile a limited profiled version and then recompile with full profiling // when the load on C2 goes down. if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { next_level = CompLevel_limited_profile; } else { next_level = CompLevel_full_profile; } } break; case CompLevel_limited_profile: if (is_method_profiled(method)) { // Special case: we got here because this method was fully profiled in the interpreter. next_level = CompLevel_full_optimization; } else { MethodData* mdo = method->method_data(); if (mdo != NULL) { if (mdo->would_profile()) { if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier3DelayOff * compiler_count(CompLevel_full_optimization) && (this->*p)(i, b, cur_level, method))) { next_level = CompLevel_full_profile; } } else { next_level = CompLevel_full_optimization; } } } break; case CompLevel_full_profile: { MethodData* mdo = method->method_data(); if (mdo != NULL) { if (mdo->would_profile()) { int mdo_i = mdo->invocation_count_delta(); int mdo_b = mdo->backedge_count_delta(); if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { next_level = CompLevel_full_optimization; } } else { next_level = CompLevel_full_optimization; } } } break; } } return MIN2(next_level, (CompLevel)TieredStopAtLevel); }
void ciMethodData::dump_replay_data(outputStream* out) { ResourceMark rm; MethodData* mdo = get_MethodData(); Method* method = mdo->method(); Klass* holder = method->method_holder(); out->print("ciMethodData %s %s %s %d %d", holder->name()->as_quoted_ascii(), method->name()->as_quoted_ascii(), method->signature()->as_quoted_ascii(), _state, current_mileage()); // dump the contents of the MDO header as raw data unsigned char* orig = (unsigned char*)&_orig; int length = sizeof(_orig); out->print(" orig %d", length); for (int i = 0; i < length; i++) { out->print(" %d", orig[i]); } // dump the MDO data as raw data int elements = (data_size() + extra_data_size()) / sizeof(intptr_t); out->print(" data %d", elements); for (int i = 0; i < elements; i++) { // We could use INTPTR_FORMAT here but that's a zero justified // which makes comparing it with the SA version of this output // harder. #ifdef _LP64 out->print(" 0x%" FORMAT64_MODIFIER "x", data()[i]); #else out->print(" 0x%x", data()[i]); #endif } // The MDO contained oop references as ciObjects, so scan for those // and emit pairs of offset and klass name so that they can be // reconstructed at runtime. The first round counts the number of // oop references and the second actually emits them. ciParametersTypeData* parameters = parameters_type_data(); for (int count = 0, round = 0; round < 2; round++) { if (round == 1) out->print(" oops %d", count); ProfileData* pdata = first_data(); for ( ; is_valid(pdata); pdata = next_data(pdata)) { if (pdata->is_VirtualCallData()) { ciVirtualCallData* vdata = (ciVirtualCallData*)pdata; dump_replay_data_receiver_type_helper<ciVirtualCallData>(out, round, count, vdata); if (pdata->is_VirtualCallTypeData()) { ciVirtualCallTypeData* call_type_data = (ciVirtualCallTypeData*)pdata; dump_replay_data_call_type_helper<ciVirtualCallTypeData>(out, round, count, call_type_data); } } else if (pdata->is_ReceiverTypeData()) { ciReceiverTypeData* vdata = (ciReceiverTypeData*)pdata; dump_replay_data_receiver_type_helper<ciReceiverTypeData>(out, round, count, vdata); } else if (pdata->is_CallTypeData()) { ciCallTypeData* call_type_data = (ciCallTypeData*)pdata; dump_replay_data_call_type_helper<ciCallTypeData>(out, round, count, call_type_data); } } if (parameters != NULL) { for (int i = 0; i < parameters->number_of_parameters(); i++) { dump_replay_data_type_helper(out, round, count, parameters, ParametersTypeData::type_offset(i), parameters->valid_parameter_type(i)); } } } for (int count = 0, round = 0; round < 2; round++) { if (round == 1) out->print(" methods %d", count); dump_replay_data_extra_data_helper(out, round, count); } out->cr(); }
uint trap_reason_limit() const { return _orig.trap_reason_limit(); }
uint overflow_trap_count() const { return _orig.overflow_trap_count(); }
uint overflow_recompile_count() const { return _orig.overflow_recompile_count(); }
void ciMethodData::load_data() { MethodData* mdo = get_MethodData(); if (mdo == NULL) { return; } // To do: don't copy the data if it is not "ripe" -- require a minimum # // of invocations. // Snapshot the data -- actually, take an approximate snapshot of // the data. Any concurrently executing threads may be changing the // data as we copy it. Copy::disjoint_words((HeapWord*) mdo, (HeapWord*) &_orig, sizeof(_orig) / HeapWordSize); Arena* arena = CURRENT_ENV->arena(); _data_size = mdo->data_size(); _extra_data_size = mdo->extra_data_size(); int total_size = _data_size + _extra_data_size; _data = (intptr_t *) arena->Amalloc(total_size); Copy::disjoint_words((HeapWord*) mdo->data_base(), (HeapWord*) _data, total_size / HeapWordSize); // Traverse the profile data, translating any oops into their // ci equivalents. ResourceMark rm; ciProfileData* ci_data = first_data(); ProfileData* data = mdo->first_data(); while (is_valid(ci_data)) { ci_data->translate_from(data); ci_data = next_data(ci_data); data = mdo->next_data(data); } if (mdo->parameters_type_data() != NULL) { _parameters = data_layout_at(mdo->parameters_type_data_di()); ciParametersTypeData* parameters = new ciParametersTypeData(_parameters); parameters->translate_from(mdo->parameters_type_data()); } load_extra_data(); // Note: Extra data are all BitData, and do not need translation. _current_mileage = MethodData::mileage_of(mdo->method()); _invocation_counter = mdo->invocation_count(); _backedge_counter = mdo->backedge_count(); _state = mdo->is_mature()? mature_state: immature_state; _eflags = mdo->eflags(); _arg_local = mdo->arg_local(); _arg_stack = mdo->arg_stack(); _arg_returned = mdo->arg_returned(); #ifndef PRODUCT if (ReplayCompiles) { ciReplay::initialize(this); } #endif }
void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, int callee_parameters, int callee_locals, frame* caller, bool is_top_frame, bool is_bottom_frame, int exec_mode) { JavaThread* thread = (JavaThread*) Thread::current(); bool realloc_failure_exception = thread->frames_to_pop_failed_realloc() > 0; // Look at bci and decide on bcp and continuation pc address bcp; // C++ interpreter doesn't need a pc since it will figure out what to do when it // begins execution address pc; bool use_next_mdp = false; // true if we should use the mdp associated with the next bci // rather than the one associated with bcp if (raw_bci() == SynchronizationEntryBCI) { // We are deoptimizing while hanging in prologue code for synchronized method bcp = method()->bcp_from(0); // first byte code pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode } else if (should_reexecute()) { //reexecute this bytecode assert(is_top_frame, "reexecute allowed only for the top frame"); bcp = method()->bcp_from(bci()); pc = Interpreter::deopt_reexecute_entry(method(), bcp); } else { bcp = method()->bcp_from(bci()); pc = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame); use_next_mdp = true; } assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode"); // Monitorenter and pending exceptions: // // For Compiler2, there should be no pending exception when deoptimizing at monitorenter // because there is no safepoint at the null pointer check (it is either handled explicitly // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER). If an asynchronous // exception was processed, the bytecode pointer would have to be extended one bytecode beyond // the monitorenter to place it in the proper exception range. // // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter, // in which case bcp should point to the monitorenter since it is within the exception's range. // // For realloc failure exception we just pop frames, skip the guarantee. assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame"); assert(thread->deopt_compiled_method() != NULL, "compiled method should be known"); guarantee(realloc_failure_exception || !(thread->deopt_compiled_method()->is_compiled_by_c2() && *bcp == Bytecodes::_monitorenter && exec_mode == Deoptimization::Unpack_exception), "shouldn't get exception during monitorenter"); int popframe_preserved_args_size_in_bytes = 0; int popframe_preserved_args_size_in_words = 0; if (is_top_frame) { JvmtiThreadState *state = thread->jvmti_thread_state(); if (JvmtiExport::can_pop_frame() && (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) { if (thread->has_pending_popframe()) { // Pop top frame after deoptimization #ifndef CC_INTERP pc = Interpreter::remove_activation_preserving_args_entry(); #else // Do an uncommon trap type entry. c++ interpreter will know // to pop frame and preserve the args pc = Interpreter::deopt_entry(vtos, 0); use_next_mdp = false; #endif } else { // Reexecute invoke in top frame pc = Interpreter::deopt_entry(vtos, 0); use_next_mdp = false; popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size()); // Note: the PopFrame-related extension of the expression stack size is done in // Deoptimization::fetch_unroll_info_helper popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words()); } } else if (!realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) { // Force early return from top frame after deoptimization #ifndef CC_INTERP pc = Interpreter::remove_activation_early_entry(state->earlyret_tos()); #endif } else { if (realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) { state->clr_earlyret_pending(); state->set_earlyret_oop(NULL); state->clr_earlyret_value(); } // Possibly override the previous pc computation of the top (youngest) frame switch (exec_mode) { case Deoptimization::Unpack_deopt: // use what we've got break; case Deoptimization::Unpack_exception: // exception is pending pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc); // [phh] We're going to end up in some handler or other, so it doesn't // matter what mdp we point to. See exception_handler_for_exception() // in interpreterRuntime.cpp. break; case Deoptimization::Unpack_uncommon_trap: case Deoptimization::Unpack_reexecute: // redo last byte code pc = Interpreter::deopt_entry(vtos, 0); use_next_mdp = false; break; default: ShouldNotReachHere(); } } } // Setup the interpreter frame assert(method() != NULL, "method must exist"); int temps = expressions()->size(); int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors(); Interpreter::layout_activation(method(), temps + callee_parameters, popframe_preserved_args_size_in_words, locks, caller_actual_parameters, callee_parameters, callee_locals, caller, iframe(), is_top_frame, is_bottom_frame); // Update the pc in the frame object and overwrite the temporary pc // we placed in the skeletal frame now that we finally know the // exact interpreter address we should use. _frame.patch_pc(thread, pc); assert (!method()->is_synchronized() || locks > 0 || _removed_monitors || raw_bci() == SynchronizationEntryBCI, "synchronized methods must have monitors"); BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin(); for (int index = 0; index < locks; index++) { top = iframe()->previous_monitor_in_interpreter_frame(top); BasicObjectLock* src = _monitors->at(index); top->set_obj(src->obj()); src->lock()->move_to(src->obj(), top->lock()); } if (ProfileInterpreter) { iframe()->interpreter_frame_set_mdp(0); // clear out the mdp. } iframe()->interpreter_frame_set_bcp(bcp); if (ProfileInterpreter) { MethodData* mdo = method()->method_data(); if (mdo != NULL) { int bci = iframe()->interpreter_frame_bci(); if (use_next_mdp) ++bci; address mdp = mdo->bci_to_dp(bci); iframe()->interpreter_frame_set_mdp(mdp); } } if (PrintDeoptimizationDetails) { tty->print_cr("Expressions size: %d", expressions()->size()); } // Unpack expression stack // If this is an intermediate frame (i.e. not top frame) then this // only unpacks the part of the expression stack not used by callee // as parameters. The callee parameters are unpacked as part of the // callee locals. int i; for(i = 0; i < expressions()->size(); i++) { StackValue *value = expressions()->at(i); intptr_t* addr = iframe()->interpreter_frame_expression_stack_at(i); switch(value->type()) { case T_INT: *addr = value->get_int(); #ifndef PRODUCT if (PrintDeoptimizationDetails) { tty->print_cr("Reconstructed expression %d (INT): %d", i, (int)(*addr)); } #endif break; case T_OBJECT: *addr = value->get_int(T_OBJECT); #ifndef PRODUCT if (PrintDeoptimizationDetails) { tty->print("Reconstructed expression %d (OBJECT): ", i); oop o = (oop)(address)(*addr); if (o == NULL) { tty->print_cr("NULL"); } else { ResourceMark rm; tty->print_raw_cr(o->klass()->name()->as_C_string()); } } #endif break; case T_CONFLICT: // A dead stack slot. Initialize to null in case it is an oop. *addr = NULL_WORD; break; default: ShouldNotReachHere(); } } // Unpack the locals for(i = 0; i < locals()->size(); i++) { StackValue *value = locals()->at(i); intptr_t* addr = iframe()->interpreter_frame_local_at(i); switch(value->type()) { case T_INT: *addr = value->get_int(); #ifndef PRODUCT if (PrintDeoptimizationDetails) { tty->print_cr("Reconstructed local %d (INT): %d", i, (int)(*addr)); } #endif break; case T_OBJECT: *addr = value->get_int(T_OBJECT); #ifndef PRODUCT if (PrintDeoptimizationDetails) { tty->print("Reconstructed local %d (OBJECT): ", i); oop o = (oop)(address)(*addr); if (o == NULL) { tty->print_cr("NULL"); } else { ResourceMark rm; tty->print_raw_cr(o->klass()->name()->as_C_string()); } } #endif break; case T_CONFLICT: // A dead location. If it is an oop then we need a NULL to prevent GC from following it *addr = NULL_WORD; break; default: ShouldNotReachHere(); } } if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { // An interpreted frame was popped but it returns to a deoptimized // frame. The incoming arguments to the interpreted activation // were preserved in thread-local storage by the // remove_activation_preserving_args_entry in the interpreter; now // we put them back into the just-unpacked interpreter frame. // Note that this assumes that the locals arena grows toward lower // addresses. if (popframe_preserved_args_size_in_words != 0) { void* saved_args = thread->popframe_preserved_args(); assert(saved_args != NULL, "must have been saved by interpreter"); #ifdef ASSERT assert(popframe_preserved_args_size_in_words <= iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords, "expression stack size should have been extended"); #endif // ASSERT int top_element = iframe()->interpreter_frame_expression_stack_size()-1; intptr_t* base; if (frame::interpreter_frame_expression_stack_direction() < 0) { base = iframe()->interpreter_frame_expression_stack_at(top_element); } else { base = iframe()->interpreter_frame_expression_stack(); } Copy::conjoint_jbytes(saved_args, base, popframe_preserved_args_size_in_bytes); thread->popframe_free_preserved_args(); } } #ifndef PRODUCT if (PrintDeoptimizationDetails) { ttyLocker ttyl; tty->print_cr("[%d Interpreted Frame]", ++unpack_counter); iframe()->print_on(tty); RegisterMap map(thread); vframe* f = vframe::new_vframe(iframe(), &map, thread); f->print(); tty->print_cr("locals size %d", locals()->size()); tty->print_cr("expression size %d", expressions()->size()); method()->print_value(); tty->cr(); // method()->print_codes(); } else if (TraceDeoptimization) { tty->print(" "); method()->print_value(); Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp); int bci = method()->bci_from(bcp); tty->print(" - %s", Bytecodes::name(code)); tty->print(" @ bci %d ", bci); tty->print_cr("sp = " PTR_FORMAT, p2i(iframe()->sp())); } #endif // PRODUCT // The expression stack and locals are in the resource area don't leave // a dangling pointer in the vframeArray we leave around for debug // purposes _locals = _expressions = NULL; }
uint decompile_count() const { return _orig.decompile_count(); }
int creation_mileage() { return _orig.creation_mileage(); }
uint trap_count_limit() const { return _orig.trap_count_limit(); }
uint trap_count(int reason) const { return _orig.trap_count(reason); }