//============================================================================= void Parse::do_anewarray() { bool will_link; ciKlass* klass = iter().get_klass(will_link); // Uncommon Trap when class that array contains is not loaded // we need the loaded class for the rest of graph; do not // initialize the container class (see Java spec)!!! assert(will_link, "anewarray: typeflow responsibility"); ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); // Check that array_klass object is loaded if (!array_klass->is_loaded()) { // Generate uncommon_trap for unloaded array_class uncommon_trap(Deoptimization::Reason_unloaded, Deoptimization::Action_reinterpret, array_klass); return; } kill_dead_locals(); const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); Node* count_val = pop(); Node* obj = new_array(makecon(array_klass_type), count_val, 1); push(obj); }
//------------------------------do_new----------------------------------------- void Parse::do_new() { kill_dead_locals(); bool will_link; ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); assert(will_link, "_new: typeflow responsibility"); // Should initialize, or throw an InstantiationError? if (!klass->is_initialized() && !klass->is_being_initialized() || klass->is_abstract() || klass->is_interface() || klass->name() == ciSymbol::java_lang_Class() || iter().is_unresolved_klass()) { uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_reinterpret, klass); return; } if (klass->is_being_initialized()) { emit_guard_for_new(klass); } Node* kls = makecon(TypeKlassPtr::make(klass)); Node* obj = new_instance(kls); // Push resultant oop onto stack push(obj); // Keep track of whether opportunities exist for StringBuilder // optimizations. if (OptimizeStringConcat && (klass == C->env()->StringBuilder_klass() || klass == C->env()->StringBuffer_klass())) { C->set_has_stringbuilder(true); } }
//------------------------------do_new----------------------------------------- void Parse::do_new() { kill_dead_locals(); // The allocator will coalesce int->oop copies away. See comment in // coalesce.cpp about how this works. It depends critically on the exact // code shape produced here, so if you are changing this code shape // make sure the GC info for the heap-top is correct in and around the // slow-path call. bool will_link; ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); assert(will_link, "_new: typeflow responsibility"); // Should initialize, or throw an InstantiationError? if (!klass->is_initialized() || klass->is_abstract() || klass->is_interface() || klass->name() == ciSymbol::java_lang_Class()) { uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_reinterpret, klass); return; } Node* obj = new_instance(klass); // Push resultant oop onto stack push(obj); }
void Parse::do_newarray(BasicType elem_type) { kill_dead_locals(); Node* count_val = pop(); const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); Node* obj = new_array(makecon(array_klass), count_val, 1); // Push resultant oop onto stack push(obj); }
//------------------------------do_monitor_exit-------------------------------- void Parse::do_monitor_exit() { kill_dead_locals(); pop(); // Pop oop to unlock // Because monitors are guaranteed paired (else we bail out), we know // the matching Lock for this Unlock. Hence we know there is no need // for a null check on Unlock. shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj()); }
//------------------------------make_jvmpi_method_exit------------------------- // JVMPI -- record entry to a method if compiled while JVMPI is turned on void GraphKit::make_jvmpi_method_exit(ciMethod* method) { const TypeFunc *call_type = OptoRuntime::jvmpi_method_exit_Type(); address call_address = OptoRuntime::jvmpi_method_exit_Java(); // CAST_FROM_FN_PTR(address, SharedRuntime::jvmpi_method_exit); // OptoRuntime::jvmpi_method_exit_Java(); const char *call_name = "jvmpi_method_exit"; // OptoRuntime::stub_name( call_address ); // assert triggers on exception exits with other BCIs // assert(bci() == InvocationEntryBci, "must be outside all blocks"); const TypeInstPtr* method_type = TypeInstPtr::make(TypePtr::Constant, method->klass(), true, method, 0); Node *method_node = _gvn.transform( new ConPNode(method_type) ); kill_dead_locals(); make_slow_call( call_type, call_address, NULL, control(), method_node, null() ); }
//------------------------------make_jvmpi_method_entry------------------------ // JVMPI -- record entry to a method if compiled while JVMPI is turned on void GraphKit::make_jvmpi_method_entry() { const TypeFunc *call_type = OptoRuntime::jvmpi_method_entry_Type(); address call_address = OptoRuntime::jvmpi_method_entry_Java(); const char *call_name = OptoRuntime::stub_name( call_address ); assert(bci() == InvocationEntryBci, "must be outside all blocks"); const TypeInstPtr *method_type = TypeInstPtr::make(TypePtr::Constant, method()->klass(), true, method(), 0); Node *methodOop_node = _gvn.transform( new ConPNode(method_type) ); Node *receiver_node = (method() && !method()->is_static()) // IF (virtual call) ? map()->in(TypeFunc::Parms) // THEN 'this' pointer, receiver, : null(); // ELSE NULL kill_dead_locals(); make_slow_call( call_type, call_address, NULL, control(), methodOop_node, receiver_node ); }
//============================================================================= //------------------------------do_monitor_enter------------------------------- void Parse::do_monitor_enter() { kill_dead_locals(); // Null check; get casted pointer. Node *obj = do_null_check(peek(), T_OBJECT); // Check for locking null object if (stopped()) return; // the monitor object is not part of debug info expression stack pop(); // Insert a FastLockNode which takes as arguments the current thread pointer, // the obj pointer & the address of the stack slot pair used for the lock. shared_lock(obj); }
//------------------------------make_dtrace_method_entry_exit ---------------- // Dtrace -- record entry or exit of a method if compiled with dtrace support void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; // Get base of thread-local storage area Node* thread = _gvn.transform( new (C) ThreadLocalNode() ); // Get method const TypePtr* method_type = TypeMetadataPtr::make(method); Node *method_node = _gvn.transform( ConNode::make(C, method_type) ); kill_dead_locals(); // For some reason, this call reads only raw memory. const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; make_runtime_call(RC_LEAF | RC_NARROW_MEM, call_type, call_address, call_name, raw_adr_type, thread, method_node); }
void Parse::do_multianewarray() { int ndimensions = iter().get_dimensions(); // the m-dimensional array bool will_link; ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); assert(will_link, "multianewarray: typeflow responsibility"); // Note: Array classes are always initialized; no is_initialized check. enum { MAX_DIMENSION = 5 }; if (ndimensions > MAX_DIMENSION || ndimensions <= 0) { uncommon_trap(Deoptimization::Reason_unhandled, Deoptimization::Action_none); return; } kill_dead_locals(); // get the lengths from the stack (first dimension is on top) Node* length[MAX_DIMENSION+1]; length[ndimensions] = NULL; // terminating null for make_runtime_call int j; for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); // The original expression was of this form: new T[length0][length1]... // It is often the case that the lengths are small (except the last). // If that happens, use the fast 1-d creator a constant number of times. const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100); jint expand_count = 1; // count of allocations in the expansion jint expand_fanout = 1; // running total fanout for (j = 0; j < ndimensions-1; j++) { jint dim_con = find_int_con(length[j], -1); expand_fanout *= dim_con; expand_count += expand_fanout; // count the level-J sub-arrays if (dim_con <= 0 || dim_con > expand_limit || expand_count > expand_limit) { expand_count = 0; break; } } // Can use multianewarray instead of [a]newarray if only one dimension, // or if all non-final dimensions are small constants. if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { Node* obj = NULL; // Set the original stack and the reexecute bit for the interpreter // to reexecute the multianewarray bytecode if deoptimization happens. // Do it unconditionally even for one dimension multianewarray. // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() // when AllocateArray node for newarray is created. { PreserveReexecuteState preexecs(this); _sp += ndimensions; // Pass 0 as nargs since uncommon trap code does not need to restore stack. obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); } //original reexecute and sp are set back here push(obj); return; } address fun = NULL; switch (ndimensions) { //case 1: Actually, there is no case 1. It's handled by new_array. case 2: fun = OptoRuntime::multianewarray2_Java(); break; case 3: fun = OptoRuntime::multianewarray3_Java(); break; case 4: fun = OptoRuntime::multianewarray4_Java(); break; case 5: fun = OptoRuntime::multianewarray5_Java(); break; default: ShouldNotReachHere(); }; Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, OptoRuntime::multianewarray_Type(ndimensions), fun, NULL, TypeRawPtr::BOTTOM, makecon(TypeKlassPtr::make(array_klass)), length[0], length[1], length[2], length[3], length[4]); Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms)); const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); // Improve the type: We know it's not null, exact, and of a given length. type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); type = type->is_aryptr()->cast_to_exactness(true); const TypeInt* ltype = _gvn.find_int_type(length[0]); if (ltype != NULL) type = type->is_aryptr()->cast_to_size(ltype); // We cannot sharpen the nested sub-arrays, since the top level is mutable. Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) ); push(cast); // Possible improvements: // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) // - Issue CastII against length[*] values, to TypeInt::POS. }
//----------------------------catch_inline_exceptions-------------------------- // Handle all exceptions thrown by an inlined method or individual bytecode. // Common case 1: we have no handler, so all exceptions merge right into // the rethrow case. // Case 2: we have some handlers, with loaded exception klasses that have // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming // exception oop and branch to the handler directly. // Case 3: We have some handlers with subklasses or are not loaded at // compile-time. We have to call the runtime to resolve the exception. // So we insert a RethrowCall and all the logic that goes with it. void Parse::catch_inline_exceptions(SafePointNode* ex_map) { // Caller is responsible for saving away the map for normal control flow! assert(stopped(), "call set_map(NULL) first"); assert(method()->has_exception_handlers(), "don't come here w/o work to do"); Node* ex_node = saved_ex_oop(ex_map); if (ex_node == top()) { // No action needed. return; } const TypeInstPtr*ex_type=_gvn.type(ex_node)->is_instptr(); // determine potential exception handlers ciExceptionHandlerStream handlers(method(), bci(), ex_type->klass()->as_instance_klass(), ex_type->klass_is_exact()); // Start executing from the given throw state. (Keep its stack, for now.) // Get the exception oop as known at compile time. ex_node = use_exception_state(ex_map); // Get the exception oop klass from its header const TypeOopPtr *toop = ex_node->bottom_type()->is_oopptr(); const TypeKlassPtr *tkid = TypeKlassPtr::make_kid(toop->klass(),toop->klass_is_exact()); Node*ex_kid_node=_gvn.transform(new(C,2)GetKIDNode(control(),ex_node,tkid)); // Have handlers and the exception klass is not exact? It might be the // merging of many exact exception klasses (happens alot with nested inlined // throw/catch blocks). if (has_ex_handler() && !ex_type->klass_is_exact()) { // Compute the exception klass a little more cleverly. // Obvious solution is to simple do a GetKlass from the 'ex_node'. // However, if the ex_node is a PhiNode, I'm going to do a GetKlass for // each arm of the Phi. If I know something clever about the exceptions // I'm loading the class from, I can replace the GetKlass with the // klass constant for the exception oop. if( ex_node->is_Phi() ) { ex_kid_node=new(C,ex_node->req())PhiNode(ex_node->in(0),TypeKlassPtr::KID); for( uint i = 1; i < ex_node->req(); i++ ) { const TypeOopPtr *toopi = ex_node->in(i)->bottom_type()->is_oopptr(); const TypeKlassPtr *tkidi = TypeKlassPtr::make_kid(toop->klass(),toop->klass_is_exact()); Node *kid = _gvn.transform(new (C, 2) GetKIDNode(ex_node->in(0)->in(i), ex_node->in(i),tkidi)); ex_kid_node->init_req(i,kid); } _gvn.set_type(ex_kid_node,TypeKlassPtr::KID); } } // Scan the exception table for applicable handlers. // If none, we can call rethrow() and be done! // If precise (loaded with no subklasses), insert a D.S. style // pointer compare to the correct handler and loop back. // If imprecise, switch to the Rethrow VM-call style handling. int remaining = handlers.count_remaining(); // iterate through all entries sequentially ciInstanceKlass*handler_catch_klass=NULL; for (;!handlers.is_done(); handlers.next()) { // Do nothing if turned off if( !DeutschShiffmanExceptions ) break; ciExceptionHandler* handler = handlers.handler(); if (handler->is_rethrow()) { // If we fell off the end of the table without finding an imprecise // exception klass (and without finding a generic handler) then we // know this exception is not handled in this method. We just rethrow // the exception into the caller. throw_to_exit(make_exception_state(ex_node)); return; } // exception handler bci range covers throw_bci => investigate further int handler_bci = handler->handler_bci(); if (remaining == 1) { push_ex_oop(ex_node); // Push exception oop for handler merge_exception(handler_bci); // jump to handler return; // No more handling to be done here! } handler_catch_klass=handler->catch_klass(); if(!handler_catch_klass->is_loaded())//klass is not loaded? break; // Must call Rethrow! // Sharpen handler klass. Some klasses cannot have any oops // (e.g. interface with no implementations). const TypePtr* tpx = TypeOopPtr::make_from_klass_unique(handler_catch_klass); const TypeOopPtr *tp = tpx->isa_oopptr(); // Oop of this klass is possible? Node *handler_klass = tp ? _gvn.makecon( TypeKlassPtr::make_kid(tp->klass(),true) ) : NULL; Node *failure = gen_subtype_check( ex_kid_node, handler_klass, _gvn.type(ex_node) ); { PreserveJVMState pjvms(this); Node*ex_oop=_gvn.transform(new(C,2)CheckCastPPNode(control(),ex_node,tpx)); push_ex_oop(ex_oop); // Push exception oop for handler merge_exception(handler_bci); } // Come here if exception does not match handler. // Carry on with more handler checks. set_control(failure); --remaining; } assert(!stopped(), "you should return if you finish the chain"); if (remaining == 1) { // Further checks do not matter. } if (can_rerun_bytecode()) { // Do not push_ex_oop here! // Re-executing the bytecode will reproduce the throwing condition. bool must_throw = true; uncommon_trap(Deoptimization::Reason_unloaded,handler_catch_klass,"matching handler klass not loaded", must_throw); return; } // Oops, need to call into the VM to resolve the klasses at runtime. // Note: This call must not deoptimize, since it is not a real at this bci! kill_dead_locals(); make_runtime_call(RC_NO_LEAF | RC_MUST_THROW, false /* !must_callruntimenode */, OptoRuntime::forward_exception2_Type(), StubRoutines::forward_exception_entry2(), "forward_exception2", TypeRawPtr::BOTTOM, // sets the exception oop back into thr->_pending_ex ex_node); // Rethrow is a pure call, no side effects, only a result. // The result cannot be allocated, so we use I_O // Catch exceptions from the rethrow catch_call_exceptions(handlers); }
//------------------------------do_call---------------------------------------- // Handle your basic call. Inline if we can & want to, else just setup call. void Parse::do_call() { // It's likely we are going to add debug info soon. // Also, if we inline a guy who eventually needs debug info for this JVMS, // our contribution to it is cleaned up right here. kill_dead_locals(); // Set frequently used booleans bool is_virtual = bc() == Bytecodes::_invokevirtual; bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; // Find target being called bool will_link; ciMethod* dest_method = iter().get_method(will_link); ciInstanceKlass* holder_klass = dest_method->holder(); ciKlass* holder = iter().get_declared_method_holder(); ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); int nargs = dest_method->arg_size(); // See if the receiver (if any) is NULL, hence we always throw BEFORE // attempting to resolve the call or initialize the holder class. Doing so // out of order opens a window where we can endlessly deopt because the call // holder is not initialized, but the call never actually happens (forcing // class initialization) because we only see NULL receivers. CPData_Invoke *caller_cpdi = cpdata()->as_Invoke(bc()); debug_only( assert(caller_cpdi->is_Invoke(), "Not invoke!") ); if( is_virtual_or_interface && _gvn.type(stack(sp() - nargs))->higher_equal(TypePtr::NULL_PTR) ) { builtin_throw( Deoptimization::Reason_null_check, "null receiver", caller_cpdi, caller_cpdi->saw_null(), /*must_throw=*/true ); return; } // uncommon-trap when callee is unloaded, uninitialized or will not link // bailout when too many arguments for register representation if (!will_link || can_not_compile_call_site(dest_method, klass)) { return; } assert(FAM||holder_klass->is_loaded(),""); assert(dest_method->is_static() == !has_receiver, "must match bc"); // Note: this takes into account invokeinterface of methods declared in java/lang/Object, // which should be invokevirtuals but according to the VM spec may be invokeinterfaces assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); // Note: In the absence of miranda methods, an abstract class K can perform // an invokevirtual directly on an interface method I.m if K implements I. // --------------------- // Does Class Hierarchy Analysis reveal only a single target of a v-call? // Then we may inline or make a static call, but become dependent on there being only 1 target. // Does the call-site type profile reveal only one receiver? // Then we may introduce a run-time check and inline on the path where it succeeds. // The other path may uncommon_trap, check for another receiver, or do a v-call. // Choose call strategy. bool call_is_virtual = is_virtual_or_interface; int vtable_index = methodOopDesc::invalid_vtable_index; ciMethod* call_method = dest_method; // Try to get the most accurate receiver type if (is_virtual_or_interface) { Node* receiver_node = stack(sp() - nargs); const TypeInstPtr*inst_type=_gvn.type(receiver_node)->isa_instptr(); if( inst_type ) { ciInstanceKlass*ikl=inst_type->klass()->as_instance_klass(); // If the receiver is not yet linked then: (1) we never can make this // call because no objects can be created until linkage, and (2) CHA // reports incorrect answers... so do not bother with making the call // until after the klass gets linked. ciInstanceKlass *ikl2 = ikl->is_subtype_of(klass) ? ikl : klass; if(!ikl->is_linked()){ uncommon_trap(Deoptimization::Reason_uninitialized,klass,"call site where receiver is not linked",false); return; } } const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); // Have the call been sufficiently improved such that it is no longer a virtual? if (optimized_virtual_method != NULL) { call_method = optimized_virtual_method; call_is_virtual = false; } else if (false) { // We can make a vtable call at this site vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); } } // Note: It's OK to try to inline a virtual call. // The call generator will not attempt to inline a polymorphic call // unless it knows how to optimize the receiver dispatch. bool try_inline=(C->do_inlining()||InlineAccessors)&& (!C->method()->should_disable_inlining()) && (call_method->number_of_breakpoints() == 0); // Get profile data for the *callee*. First see if we have precise // CodeProfile for this exact inline because C1 inlined it already. CodeProfile *callee_cp; int callee_cp_inloff; if( caller_cpdi->inlined_method_oid() == call_method->objectId() ) { callee_cp = c1_cp(); // Use same CodeProfile as current callee_cp_inloff = caller_cpdi->cpd_offset(); // But use inlined portion } else { // If callee has a cp, clone it and use callee_cp = call_method->codeprofile(true); callee_cp_inloff = 0; if (callee_cp || FAM) { // The cloned cp needs to be freed later Compile* C = Compile::current(); C->record_cloned_cp(callee_cp); } else { // Had profile info at top level, but not for this call site? // callee_cp will hold the just created cp, or whatever cp allocated by // other thread which wins the race in set_codeprofile callee_cp = call_method->set_codeprofile(CodeProfile::make(call_method)); } } CPData_Invoke *c2_caller_cpdi = UseC1 ? c2cpdata()->as_Invoke(bc()) : NULL; // --------------------- inc_sp(- nargs); // Temporarily pop args for JVM state of call JVMState* jvms = sync_jvms(); // --------------------- // Decide call tactic. // This call checks with CHA, the interpreter profile, intrinsics table, etc. // It decides whether inlining is desirable or not. CallGenerator*cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),callee_cp,callee_cp_inloff,c2_caller_cpdi,caller_cpdi); // --------------------- // Round double arguments before call round_double_arguments(dest_method); #ifndef PRODUCT // Record first part of parsing work for this call parse_histogram()->record_change(); #endif // not PRODUCT assert(jvms == this->jvms(), "still operating on the right JVMS"); assert(jvms_in_sync(), "jvms must carry full info into CG"); // save across call, for a subsequent cast_not_null. Node* receiver = has_receiver ? argument(0) : NULL; JVMState* new_jvms = cg->generate(jvms, caller_cpdi, is_private_copy()); if( new_jvms == NULL ) { // Did it work? // When inlining attempt fails (e.g., too many arguments), // it may contaminate the current compile state, making it // impossible to pull back and try again. Once we call // cg->generate(), we are committed. If it fails, the whole // compilation task is compromised. if (failing()) return; if (PrintOpto || PrintInlining || PrintC2Inlining) { // Only one fall-back, so if an intrinsic fails, ignore any bytecodes. if (cg->is_intrinsic() && call_method->code_size() > 0) { C2OUT->print("Bailed out of intrinsic, will not inline: "); call_method->print_name(C2OUT); C2OUT->cr(); } } // This can happen if a library intrinsic is available, but refuses // the call site, perhaps because it did not match a pattern the // intrinsic was expecting to optimize. The fallback position is // to call out-of-line. try_inline = false; // Inline tactic bailed out. cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),c1_cp(),c1_cp_inloff(),c2_caller_cpdi,caller_cpdi); new_jvms=cg->generate(jvms,caller_cpdi,is_private_copy()); assert(new_jvms!=NULL,"call failed to generate: calls should work"); if (c2_caller_cpdi) c2_caller_cpdi->_inlining_failure_id = IF_GENERALFAILURE; } if (cg->is_inline()) { C->env()->notice_inlined_method(call_method); } // Reset parser state from [new_]jvms, which now carries results of the call. // Return value (if any) is already pushed on the stack by the cg. add_exception_states_from(new_jvms); if (new_jvms->map()->control() == top()) { stop_and_kill_map(); } else { assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); set_jvms(new_jvms); } if (!stopped()) { // This was some sort of virtual call, which did a null check for us. // Now we can assert receiver-not-null, on the normal return path. if (receiver != NULL && cg->is_virtual()) { Node*cast=cast_not_null(receiver,true); // %%% assert(receiver == cast, "should already have cast the receiver"); } // Round double result after a call from strict to non-strict code round_double_result(dest_method); // If the return type of the method is not loaded, assert that the // value we got is a null. Otherwise, we need to recompile. if (!dest_method->return_type()->is_loaded()) { // If there is going to be a trap, put it at the next bytecode: set_bci(iter().next_bci()); do_null_assert(peek(), T_OBJECT); set_bci(iter().cur_bci()); // put it back } else { assert0( call_method->return_type()->is_loaded() ); BasicType result_type = dest_method->return_type()->basic_type(); if(result_type==T_OBJECT||result_type==T_ARRAY){ const Type *t = peek()->bottom_type(); assert0( t == TypePtr::NULL_PTR || t->is_oopptr()->klass()->is_loaded() ); } } } // Restart record of parsing work after possible inlining of call #ifndef PRODUCT parse_histogram()->set_initial_state(bc()); #endif }