void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { int call_count = profile.count(); int code_size = call_method->code_size(); // Expected execution count is based on the historical count: _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); // Expected profit from inlining, in units of simple call-overheads. _profit = 1.0; // Expected work performed by the call in units of call-overheads. // %%% need an empirical curve fit for "work" (time in call) float bytecodes_per_call = 3; _work = 1.0 + code_size / bytecodes_per_call; // Expected size of compilation graph: // -XX:+PrintParseStatistics once reported: // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 // Histogram of 144298 parsed bytecodes: // %%% Need an better predictor for graph size. _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); }
//-----------------------------try_to_inline----------------------------------- // return true if ok // Relocated from "InliningClosure::try_to_inline" bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay) { if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) { if (!callee_method->force_inline() || !IncrementalInline) { set_msg("size > DesiredMethodLimit"); return false; } else if (!C->inlining_incrementally()) { should_delay = true; } } _forced_inline = false; // Reset if (!should_inline(callee_method, caller_method, caller_bci, profile, wci_result)) { return false; } if (should_not_inline(callee_method, caller_method, jvms, wci_result)) { return false; } if (InlineAccessors && callee_method->is_accessor()) { // accessor methods are not subject to any of the following limits. set_msg("accessor"); return true; } // suppress a few checks for accessors and trivial methods if (callee_method->code_size() > MaxTrivialSize) { // don't inline into giant methods if (C->over_inlining_cutoff()) { if ((!callee_method->force_inline() && !caller_method->is_compiled_lambda_form()) || !IncrementalInline) { set_msg("NodeCountInliningCutoff"); return false; } else { should_delay = true; } } if ((!UseInterpreter || CompileTheWorld) && is_init_with_ea(callee_method, caller_method, C)) { // Escape Analysis stress testing when running Xcomp or CTW: // inline constructors even if they are not reached. } else if (forced_inline()) { // Inlining was forced by CompilerOracle, ciReplay or annotation } else if (profile.count() == 0) { // don't inline unreached call sites set_msg("call site not reached"); return false; } } if (!C->do_inlining() && InlineAccessors) { set_msg("not an accessor"); return false; } // Limit inlining depth in case inlining is forced or // _max_inline_level was increased to compensate for lambda forms. if (inline_level() > MaxForceInlineLevel) { set_msg("MaxForceInlineLevel"); return false; } if (inline_level() > _max_inline_level) { if (!callee_method->force_inline() || !IncrementalInline) { set_msg("inlining too deep"); return false; } else if (!C->inlining_incrementally()) { should_delay = true; } } // detect direct and indirect recursive inlining { // count the current method and the callee const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form(); int inline_level = 0; if (!is_compiled_lambda_form) { if (method() == callee_method) { inline_level++; } } // count callers of current method and callee Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL; for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) { if (j->method() == callee_method) { if (is_compiled_lambda_form) { // Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the // compiler stack. Node* caller_argument0 = j->map()->argument(j, 0)->uncast(); if (caller_argument0 == callee_argument0) { inline_level++; } } else { inline_level++; } } } if (inline_level > MaxRecursiveInlineLevel) { set_msg("recursive inlining is too deep"); return false; } } int size = callee_method->code_size_for_inlining(); if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) { if (!callee_method->force_inline() || !IncrementalInline) { set_msg("size > DesiredMethodLimit"); return false; } else if (!C->inlining_incrementally()) { should_delay = true; } } // ok, inline this method return true; }
// positive filter: should callee be inlined? bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) { // Allows targeted inlining if (C->directive()->should_inline(callee_method)) { *wci_result = *(WarmCallInfo::always_hot()); if (C->print_inlining() && Verbose) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method is hot: "); } set_msg("force inline by CompileCommand"); _forced_inline = true; return true; } if (callee_method->force_inline()) { set_msg("force inline by annotation"); _forced_inline = true; return true; } int inline_depth = inline_level()+1; if (CacheProfiles && ciCacheReplay::should_inline(CompilerThread::current()->get_cache_replay_state(), callee_method, caller_bci, inline_depth)) { //tty->print_cr("force inline by ciCacheReplay"); set_msg("force inline by ciCacheReplay"); _forced_inline = true; return true; } #ifndef PRODUCT if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { set_msg("force inline by ciReplay"); _forced_inline = true; return true; } #endif int size = callee_method->code_size_for_inlining(); // Check for too many throws (and not too huge) if(callee_method->interpreter_throwout_count() > InlineThrowCount && size < InlineThrowMaxSize ) { wci_result->set_profit(wci_result->profit() * 100); if (C->print_inlining() && Verbose) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); } set_msg("many throws"); return true; } int default_max_inline_size = C->max_inline_size(); int inline_small_code_size = InlineSmallCode / 4; int max_inline_size = default_max_inline_size; int call_site_count = method()->scale_count(profile.count()); int invoke_count = method()->interpreter_invocation_count(); assert(invoke_count != 0, "require invocation count greater than zero"); int freq = call_site_count / invoke_count; // bump the max size if the call is frequent if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount) || is_unboxing_method(callee_method, C) || is_init_with_ea(callee_method, caller_method, C)) { max_inline_size = C->freq_inline_size(); if (size <= max_inline_size && TraceFrequencyInlining) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count); CompileTask::print_inline_indent(inline_level()); callee_method->print(); tty->cr(); } } else { // Not hot. Check for medium-sized pre-existing nmethod at cold sites. if (callee_method->has_compiled_code() && callee_method->instructions_size() > inline_small_code_size) { // we force inlining when the caller is cached (to make sure that we replay correctly) // if (ciCacheProfiles::is_cached(caller_method->get_Method())) { // set_msg("force inline by ciCacheProfiles (over compiled into medium method)"); // _forced_inline = true; // return true; // } set_msg("already compiled into a medium method"); return false; } } if (size > max_inline_size) { if (max_inline_size > default_max_inline_size) { set_msg("hot method too big"); } else { set_msg("too big"); } return false; } return true; }