address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) { #ifdef COMPILER2 // Stub is fixed up when the corresponding call is converted from calling // compiled code to calling interpreted code. if (mark == NULL) { // Get the mark within main instrs section which is set to the address of the call. mark = cbuf.insts_mark(); } assert(mark != NULL, "mark must not be NULL"); // Note that the code buffer's insts_mark is always relative to insts. // That's why we must use the macroassembler to generate a stub. MacroAssembler _masm(&cbuf); address stub = __ start_a_stub(Compile::MAX_stubs_size); if (stub == NULL) { return NULL; // CodeBuffer::expand failed. } __ relocate(static_stub_Relocation::spec(mark)); AddressLiteral meta = __ allocate_metadata_address(NULL); bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta); __ set_inst_mark(); AddressLiteral a((address)-1); success = success && __ load_const_from_toc(Z_R1, a); if (!success) { return NULL; // CodeCache is full. } __ z_br(Z_R1); __ end_a_stub(); // Update current stubs pointer and restore insts_end. return stub; #else ShouldNotReachHere(); #endif }
inline void MacroAssembler::set_metadata(Metadata* obj, Register d) { set_metadata(allocate_metadata_address(obj), d); }
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) { #ifdef COMPILER2 if (mark == NULL) { // Get the mark within main instrs section which is set to the address of the call. mark = cbuf.insts_mark(); } // Note that the code buffer's insts_mark is always relative to insts. // That's why we must use the macroassembler to generate a stub. MacroAssembler _masm(&cbuf); // Start the stub. address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size()); if (stub == NULL) { return NULL; // CodeCache is full } // For java_to_interp stubs we use R11_scratch1 as scratch register // and in call trampoline stubs we use R12_scratch2. This way we // can distinguish them (see is_NativeCallTrampolineStub_at()). Register reg_scratch = R11_scratch1; // Create a static stub relocation which relates this stub // with the call instruction at insts_call_instruction_offset in the // instructions code-section. __ relocate(static_stub_Relocation::spec(mark)); const int stub_start_offset = __ offset(); // Now, create the stub's code: // - load the TOC // - load the inline cache oop from the constant pool // - load the call target from the constant pool // - call __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); bool success = __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch, /*fixed_size*/ true); if (!success) { return NULL; // CodeCache is full } if (ReoptimizeCallSequences) { __ b64_patchable((address)-1, relocInfo::none); } else { AddressLiteral a((address)-1); success = __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); if (!success) { return NULL; // CodeCache is full } __ mtctr(reg_scratch); __ bctr(); } // FIXME: Assert that the stub can be identified and patched. // Java_to_interp_stub_size should be good. assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(), "should be good size"); assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)), "must not confuse java_to_interp with trampoline stubs"); // End the stub. __ end_a_stub(); return stub; #else ShouldNotReachHere(); return NULL; #endif }