nmethod* SimpleThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { if (comp_level == CompLevel_none && JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { return NULL; } if (CompileTheWorld || ReplayCompiles) { // Don't trigger other compiles in testing mode return NULL; } handle_counter_overflow(method()); if (method() != inlinee()) { handle_counter_overflow(inlinee()); } if (PrintTieredEvents) { print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level); } if (bci == InvocationEntryBci) { method_invocation_event(method, inlinee, comp_level, nm, thread); } else { // method == inlinee if the event originated in the main method method_back_branch_event(method, inlinee, bci, comp_level, nm, thread); // Check if event led to a higher level OSR compilation nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false); if (osr_nm != NULL && osr_nm->comp_level() > comp_level) { // Perform OSR with new nmethod return osr_nm; } } return NULL; }
nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) { assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { // If certain JVMTI events (e.g. frame pop event) are requested then the // thread is forced to remain in interpreted code. This is // implemented partly by a check in the run_compiled_code // section of the interpreter whether we should skip running // compiled code, and partly by skipping OSR compiles for // interpreted-only threads. if (bci != InvocationEntryBci) { reset_counter_for_back_branch_event(method); return NULL; } } if (CompileTheWorld || ReplayCompiles) { // Don't trigger other compiles in testing mode if (bci == InvocationEntryBci) { reset_counter_for_invocation_event(method); } else { reset_counter_for_back_branch_event(method); } return NULL; } if (bci == InvocationEntryBci) { // when code cache is full, compilation gets switched off, UseCompiler // is set to false if (!method->has_compiled_code() && UseCompiler) { method_invocation_event(method, thread); } else { // Force counter overflow on method entry, even if no compilation // happened. (The method_invocation_event call does this also.) reset_counter_for_invocation_event(method); } // compilation at an invocation overflow no longer goes and retries test for // compiled method. We always run the loser of the race as interpreted. // so return NULL return NULL; } else { // counter overflow in a loop => try to do on-stack-replacement nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); NOT_PRODUCT(trace_osr_request(method, osr_nm, bci)); // when code cache is full, we should not compile any more... if (osr_nm == NULL && UseCompiler) { method_back_branch_event(method, bci, thread); osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); } if (osr_nm == NULL) { reset_counter_for_back_branch_event(method); return NULL; } return osr_nm; } return NULL; }