// is_optimized: Compiler has generated an optimized call (i.e., no inline // cache) static_bound: The call can be static bound (i.e, no need to use // inline cache) void CompiledIC::compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass, bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS) { nmethod* method_code = method->code(); address entry = NULL; if (method_code != NULL && method_code->is_in_use()) { // Call to compiled code if (static_bound || is_optimized) { entry = method_code->verified_entry_point(); } else { entry = method_code->entry_point(); } } if (entry != NULL) { // Call to compiled code info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized); } else { // Note: the following problem exists with Compiler1: // - at compile time we may or may not know if the destination is final // - if we know that the destination is final, we will emit an optimized // virtual call (no inline cache), and need a Method* to make a call // to the interpreter // - if we do not know if the destination is final, we emit a standard // virtual call, and use CompiledICHolder to call interpreted code // (no static call stub has been generated) // However in that case we will now notice it is static_bound // and convert the call into what looks to be an optimized // virtual call. This causes problems in verifying the IC because // it look vanilla but is optimized. Code in is_call_to_interpreted // is aware of this and weakens its asserts. // static_bound should imply is_optimized -- otherwise we have a // performance bug (statically-bindable method is called via // dynamically-dispatched call note: the reverse implication isn't // necessarily true -- the call may have been optimized based on compiler // analysis (static_bound is only based on "final" etc.) #ifdef COMPILER2 #ifdef TIERED #if defined(ASSERT) // can't check the assert because we don't have the CompiledIC with which to // find the address if the call instruction. // // CodeBlob* cb = find_blob_unsafe(instruction_address()); // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized"); #endif // ASSERT #else assert(!static_bound || is_optimized, "static_bound should imply is_optimized"); #endif // TIERED #endif // COMPILER2 if (is_optimized) { // Use stub entry info.set_interpreter_entry(method()->get_c2i_entry(), method()); } else { // Use icholder entry CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass()); info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder); } } assert(info.is_optimized() == is_optimized, "must agree"); }
void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); // Updating a cache to the wrong entry can cause bugs that are very hard // to track down - if cache entry gets invalid - we just clean it. In // this way it is always the same code path that is responsible for // updating and resolving an inline cache // // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized // callsites. In addition ic_miss code will update a site to monomorphic if it determines // that an monomorphic call to the interpreter can now be monomorphic to compiled code. // // In both of these cases the only thing being modifed is the jump/call target and these // transitions are mt_safe Thread *thread = Thread::current(); if (info.to_interpreter()) { // Call to interpreter if (info.is_optimized() && is_optimized()) { assert(is_clean(), "unsafe IC path"); MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); // the call analysis (callee structure) specifies that the call is optimized // (either because of CHA or the static target is final) // At code generation time, this call has been emitted as static call // Call via stub assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); methodHandle method (thread, (Method*)info.cached_metadata()); csc->set_to_interpreted(method, info.entry()); if (TraceICs) { ResourceMark rm(thread); tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", p2i(instruction_address()), method->print_value_string()); } } else { // Call via method-klass-holder InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry()); if (TraceICs) { ResourceMark rm(thread); tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address())); } } } else { // Call to compiled code bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); #ifdef ASSERT CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); assert (cb->is_nmethod(), "must be compiled!"); #endif /* ASSERT */ // This is MT safe if we come from a clean-cache and go through a // non-verified entry point bool safe = SafepointSynchronize::is_at_safepoint() || (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); if (!safe) { InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry()); } else { if (is_optimized()) { set_ic_destination(info.entry()); } else { set_ic_destination_and_value(info.entry(), info.cached_metadata()); } } if (TraceICs) { ResourceMark rm(thread); assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be"); tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", p2i(instruction_address()), ((Klass*)info.cached_metadata())->print_value_string(), (safe) ? "" : "via stub"); } } // We can't check this anymore. With lazy deopt we could have already // cleaned this IC entry before we even return. This is possible if // we ran out of space in the inline cache buffer trying to do the // set_next and we safepointed to free up space. This is a benign // race because the IC entry was complete when we safepointed so // cleaning it immediately is harmless. // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); }