address AbstractInterpreterGenerator::generate_slow_signature_handler() { address entry = __ pc(); Argument argv(0, true); // We are in the jni transition frame. Save the last_java_frame corresponding to the // outer interpreter frame // __ set_last_Java_frame(FP, noreg); // make sure the interpreter frame we've pushed has a valid return pc __ mov(O7, I7); __ mov(Lmethod, G3_scratch); __ mov(Llocals, G4_scratch); __ save_frame(0); __ mov(G2_thread, L7_thread_cache); __ add(argv.address_in_frame(), O3); __ mov(G2_thread, O0); __ mov(G3_scratch, O1); __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); __ delayed()->mov(G4_scratch, O2); __ mov(L7_thread_cache, G2_thread); __ reset_last_Java_frame(); // load the register arguments (the C code packed them as varargs) for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) { __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); } __ ret(); __ delayed()-> restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler return entry; }
// Abstract method entry. // address InterpreterGenerator::generate_abstract_entry(void) { address entry = __ pc(); // // Registers alive // R16_thread - JavaThread* // R19_method - callee's method (method to be invoked) // R1_SP - SP prepared such that caller's outgoing args are near top // LR - return address to caller // // Stack layout at this point: // // 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP // alignment (optional) // [outgoing Java arguments] // ... // PARENT [PARENT_IJAVA_FRAME_ABI] // ... // // Can't use call_VM here because we have not set up a new // interpreter state. Make the call to the vm and make it look like // our caller set up the JavaFrameAnchor. __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); // Push a new C frame and save LR. __ save_LR_CR(R0); __ push_frame_reg_args(0, R11_scratch1); // This is not a leaf but we have a JavaFrameAnchor now and we will // check (create) exceptions afterward so this is ok. __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError), R16_thread); // Pop the C frame and restore LR. __ pop_frame(); __ restore_LR_CR(R0); // Reset JavaFrameAnchor from call_VM_leaf above. __ reset_last_Java_frame(); #ifdef CC_INTERP // Return to frame manager, it will handle the pending exception. __ blr(); #else // We don't know our caller, so jump to the general forward exception stub, // which will also pop our full frame off. Satisfy the interface of // SharedRuntime::generate_forward_exception() __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0); __ mtctr(R11_scratch1); __ bctr(); #endif return entry; }
void OptoRuntime::generate_exception_blob() { // Capture info about frame layout enum layout { thread_off, // last_java_sp // The frame sender code expects that rbp will be in the "natural" place and // will override any oopMap setting for it. We must therefore force the layout // so that it agrees with the frame sender code. rbp_off, return_off, // slot for return address framesize }; // allocate space for the code ResourceMark rm; // setup code generation tools CodeBuffer buffer("exception_blob", 512, 512); MacroAssembler* masm = new MacroAssembler(&buffer); OopMapSet *oop_maps = new OopMapSet(); address start = __ pc(); __ push(rdx); __ subptr(rsp, return_off * wordSize); // Prolog! // rbp, location is implicitly known __ movptr(Address(rsp,rbp_off *wordSize), rbp); // Store exception in Thread object. We cannot pass any arguments to the // handle_exception call, since we do not want to make any assumption // about the size of the frame where the exception happened in. __ get_thread(rcx); __ movptr(Address(rcx, JavaThread::exception_oop_offset()), rax); __ movptr(Address(rcx, JavaThread::exception_pc_offset()), rdx); // This call does all the hard work. It checks if an exception handler // exists in the method. // If so, it returns the handler address. // If not, it prepares for stack-unwinding, restoring the callee-save // registers of the frame being removed. // __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument __ set_last_Java_frame(rcx, noreg, noreg, NULL); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); // No registers to map, rbp is known implicitly oop_maps->add_gc_map( __ pc() - start, new OopMap( framesize, 0 )); __ get_thread(rcx); __ reset_last_Java_frame(rcx, false, false); // Restore callee-saved registers __ movptr(rbp, Address(rsp, rbp_off * wordSize)); __ addptr(rsp, return_off * wordSize); // Epilog! __ pop(rdx); // Exception pc // rax: exception handler for given <exception oop/exception pc> // Restore SP from BP if the exception PC is a MethodHandle call. __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0); __ cmovptr(Assembler::notEqual, rsp, rbp); // We have a handler in rax, (could be deopt blob) // rdx - throwing pc, deopt blob will need it. __ push(rax); // Get the exception __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset())); // Get the exception pc in case we are deoptimized __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset())); #ifdef ASSERT __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), NULL_WORD); __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD); #endif // Clear the exception oop so GC no longer processes it as a root. __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD); __ pop(rcx); // rax: exception oop // rcx: exception handler // rdx: exception pc __ jmp (rcx); // ------------- // make sure all code is generated masm->flush(); _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize); }
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { // for sparc changing the number of arguments doesn't change // anything about the frame size so we'll always lie and claim that // we are only passing 1 argument. set_num_rt_args(1); assert_not_delayed(); // bang stack before going to runtime set(-os::vm_page_size() + STACK_BIAS, G3_scratch); st(G0, SP, G3_scratch); // debugging support assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); set_last_Java_frame(SP, noreg); if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early save_thread(L7_thread_cache); // do the call call(entry_point, relocInfo::runtime_call_type); if (!VerifyThread) { delayed()->mov(G2_thread, O0); // pass thread as first argument } else { delayed()->nop(); // (thread already passed) } int call_offset = offset(); // offset of return address restore_thread(L7_thread_cache); reset_last_Java_frame(); // check for pending exceptions { Label L; Address exception_addr(G2_thread, Thread::pending_exception_offset()); ld_ptr(exception_addr, Gtemp); br_null_short(Gtemp, pt, L); Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); st_ptr(G0, vm_result_addr); Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); st_ptr(G0, vm_result_addr_2); if (frame_size() == no_frame_size) { // we use O7 linkage so that forward_exception_entry has the issuing PC call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); delayed()->restore(); } else if (_stub_id == Runtime1::forward_exception_id) { should_not_reach_here(); } else { AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id)); jump_to(exc, G4); delayed()->nop(); } bind(L); } // get oop result if there is one and reset the value in the thread if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread get_vm_result (oop_result1); } else { // be a little paranoid and clear the result Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); st_ptr(G0, vm_result_addr); } // get second result if there is one and reset the value in the thread if (metadata_result->is_valid()) { get_vm_result_2 (metadata_result); } else { // be a little paranoid and clear the result Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); st_ptr(G0, vm_result_addr_2); } return call_offset; }
//------------------------------ generate_exception_blob --------------------------- // creates exception blob at the end // Using exception blob, this code is jumped from a compiled method. // (see emit_exception_handler in sparc.ad file) // // Given an exception pc at a call we call into the runtime for the // handler in this method. This handler might merely restore state // (i.e. callee save registers) unwind the frame and jump to the // exception handler for the nmethod if there is no Java level handler // for the nmethod. // // This code is entered with a jmp. // // Arguments: // O0: exception oop // O1: exception pc // // Results: // O0: exception oop // O1: exception pc in caller or ??? // destination: exception handler of caller // // Note: the exception pc MUST be at a call (precise debug information) // void OptoRuntime::generate_exception_blob() { // allocate space for code ResourceMark rm; int pad = VerifyThread ? 256 : 0;// Extra slop space for more verify code // setup code generation tools // Measured 8/7/03 at 256 in 32bit debug build (no VerifyThread) // Measured 8/7/03 at 528 in 32bit debug build (VerifyThread) CodeBuffer buffer("exception_blob", 600+pad, 512); MacroAssembler* masm = new MacroAssembler(&buffer); int framesize_in_bytes = __ total_frame_size_in_bytes(0); int framesize_in_words = framesize_in_bytes / wordSize; int framesize_in_slots = framesize_in_bytes / sizeof(jint); Label L; int start = __ offset(); __ verify_thread(); __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); __ st_ptr(Oissuing_pc, G2_thread, JavaThread::exception_pc_offset()); // This call does all the hard work. It checks if an exception catch // exists in the method. // If so, it returns the handler address. // If the nmethod has been deoptimized and it had a handler the handler // address is the deopt blob unpack_with_exception entry. // // If no handler exists it prepares for stack-unwinding, restoring the callee-save // registers of the frame being removed. // __ save_frame(0); __ mov(G2_thread, O0); __ set_last_Java_frame(SP, noreg); __ save_thread(L7_thread_cache); // This call can block at exit and nmethod can be deoptimized at that // point. If the nmethod had a catch point we would jump to the // now deoptimized catch point and fall thru the vanilla deopt // path and lose the exception // Sure would be simpler if this call didn't block! __ call(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C), relocInfo::runtime_call_type); __ delayed()->mov(L7_thread_cache, O0); // Set an oopmap for the call site. This oopmap will only be used if we // are unwinding the stack. Hence, all locations will be dead. // Callee-saved registers will be the same as the frame above (i.e., // handle_exception_stub), since they were restored when we got the // exception. OopMapSet *oop_maps = new OopMapSet(); oop_maps->add_gc_map( __ offset()-start, new OopMap(framesize_in_slots, 0)); __ bind(L); __ restore_thread(L7_thread_cache); __ reset_last_Java_frame(); __ mov(O0, G3_scratch); // Move handler address to temp __ restore(); // Restore SP from L7 if the exception PC is a MethodHandle call site. __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), O7); __ tst(O7); __ movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // G3_scratch contains handler address // Since this may be the deopt blob we must set O7 to look like we returned // from the original pc that threw the exception __ ld_ptr(G2_thread, JavaThread::exception_pc_offset(), O7); __ sub(O7, frame::pc_return_offset, O7); assert(Assembler::is_simm13(in_bytes(JavaThread::exception_oop_offset())), "exception offset overflows simm13, following ld instruction cannot be in delay slot"); __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); // O0 #ifdef ASSERT __ st_ptr(G0, G2_thread, JavaThread::exception_handler_pc_offset()); __ st_ptr(G0, G2_thread, JavaThread::exception_pc_offset()); #endif __ JMP(G3_scratch, 0); // Clear the exception oop so GC no longer processes it as a root. __ delayed()->st_ptr(G0, G2_thread, JavaThread::exception_oop_offset()); // ------------- // make sure all code is generated masm->flush(); _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize_in_words); }
address InterpreterGenerator::generate_normal_entry(bool synchronized) { assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor); Label re_dispatch; Label call_interpreter; Label call_method; Label call_non_interpreted_method; Label return_with_exception; Label return_from_method; Label resume_interpreter; Label return_to_initial_caller; Label more_monitors; Label throwing_exception; // We use the same code for synchronized and not if (normal_entry) return normal_entry; address start = __ pc(); // There are two ways in which we can arrive at this entry. // There is the special case where a normal interpreted method // calls another normal interpreted method, and there is the // general case of when we enter from somewhere else: from // call_stub, from C1 or C2, or from a fast accessor which // deferred. In the special case we're already in frame manager // code: we arrive at re_dispatch with Rstate containing the // previous interpreter state. In the general case we arrive // at start with no previous interpreter state so we set Rstate // to NULL to indicate this. __ bind (fast_accessor_slow_entry_path); __ load (Rstate, 0); __ bind (re_dispatch); // Adjust the caller's stack frame to accomodate any additional // local variables we have contiguously with our parameters. generate_adjust_callers_stack(); // Allocate and initialize our stack frame. generate_compute_interpreter_state(false); // Call the interpreter ============================================== __ bind (call_interpreter); // We can setup the frame anchor with everything we want at // this point as we are thread_in_Java and no safepoints can // occur until we go to vm mode. We do have to clear flags // on return from vm but that is it __ set_last_Java_frame (); // Call interpreter address interpreter = JvmtiExport::can_post_interpreter_events() ? CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks) : CAST_FROM_FN_PTR(address, BytecodeInterpreter::run); __ mr (r3, Rstate); __ call (interpreter); __ fixup_after_potential_safepoint (); // Clear the frame anchor __ reset_last_Java_frame (); // Examine the message from the interpreter to decide what to do __ lwz (r4, STATE(_msg)); __ compare (r4, BytecodeInterpreter::call_method); __ beq (call_method); __ compare (r4, BytecodeInterpreter::return_from_method); __ beq (return_from_method); __ compare (r4, BytecodeInterpreter::more_monitors); __ beq (more_monitors); __ compare (r4, BytecodeInterpreter::throwing_exception); __ beq (throwing_exception); __ load (r3, (intptr_t) "error: bad message from interpreter: %d\n"); __ call (CAST_FROM_FN_PTR(address, printf)); __ should_not_reach_here (__FILE__, __LINE__); // Handle a call_method message ====================================== __ bind (call_method); __ load (Rmethod, STATE(_result._to_call._callee)); __ verify_oop(Rmethod); __ load (Rlocals, STATE(_stack)); __ lhz (r0, Address(Rmethod, methodOopDesc::size_of_parameters_offset())); __ shift_left (r0, r0, LogBytesPerWord); __ add (Rlocals, Rlocals, r0); __ load (r0, STATE(_result._to_call._callee_entry_point)); __ load (r3, (intptr_t) start); __ compare (r0, r3); __ bne (call_non_interpreted_method); // Interpreted methods are intercepted and re-dispatched ----------- __ load (r0, CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation)); __ mtlr (r0); __ b (re_dispatch); // Non-interpreted methods are dispatched normally ----------------- __ bind (call_non_interpreted_method); __ mtctr (r0); __ bctrl (); // Restore Rstate __ load (Rstate, Address(r1, StackFrame::back_chain_offset * wordSize)); __ subi (Rstate, Rstate, sizeof(BytecodeInterpreter)); // Check for pending exceptions __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_with_exception); // Convert the result and resume generate_convert_result(CppInterpreter::_tosca_to_stack); __ b (resume_interpreter); // Handle a return_from_method message =============================== __ bind (return_from_method); __ load (r0, STATE(_prev_link)); __ compare (r0, 0); __ beq (return_to_initial_caller); // "Return" from a re-dispatch ------------------------------------- generate_convert_result(CppInterpreter::_stack_to_stack); generate_unwind_interpreter_state(); // Resume the interpreter __ bind (resume_interpreter); __ store (Rlocals, STATE(_stack)); __ load (Rlocals, STATE(_locals)); __ load (Rmethod, STATE(_method)); __ verify_oop(Rmethod); __ load (r0, BytecodeInterpreter::method_resume); __ stw (r0, STATE(_msg)); __ b (call_interpreter); // Return to the initial caller (call_stub etc) -------------------- __ bind (return_to_initial_caller); generate_convert_result(CppInterpreter::_stack_to_native_abi); generate_unwind_interpreter_state(); __ blr (); // Handle a more_monitors message ==================================== __ bind (more_monitors); generate_more_monitors(); __ load (r0, BytecodeInterpreter::got_monitors); __ stw (r0, STATE(_msg)); __ b (call_interpreter); // Handle a throwing_exception message =============================== __ bind (throwing_exception); // Check we actually have an exception #ifdef ASSERT { Label ok; __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Return to wherever generate_unwind_interpreter_state(); __ bind (return_with_exception); __ compare (Rstate, 0); __ bne (resume_interpreter); __ blr (); normal_entry = start; return start; }
address InterpreterGenerator::generate_native_entry(bool synchronized) { const Register handler = r14; const Register function = r15; assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor, handler, function); // We use the same code for synchronized and not if (native_entry) return native_entry; address start = __ pc(); // Allocate and initialize our stack frame. __ load (Rstate, 0); generate_compute_interpreter_state(true); // Make sure method is native and not abstract #ifdef ASSERT { Label ok; __ lwz (r0, Address(Rmethod, methodOopDesc::access_flags_offset())); __ andi_ (r0, r0, JVM_ACC_NATIVE | JVM_ACC_ABSTRACT); __ compare (r0, JVM_ACC_NATIVE); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Lock if necessary Label not_synchronized_1; __ bne (CRsync, not_synchronized_1); __ lock_object (Rmonitor); __ bind (not_synchronized_1); // Get signature handler const Address signature_handler_addr( Rmethod, methodOopDesc::signature_handler_offset()); Label return_to_caller, got_signature_handler; __ load (handler, signature_handler_addr); __ compare (handler, 0); __ bne (got_signature_handler); __ call_VM (noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Rmethod, CALL_VM_NO_EXCEPTION_CHECKS); __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); __ load (handler, signature_handler_addr); __ bind (got_signature_handler); // Get the native function entry point const Address native_function_addr( Rmethod, methodOopDesc::native_function_offset()); Label got_function; __ load (function, native_function_addr); #ifdef ASSERT { // InterpreterRuntime::prepare_native_call() sets the mirror // handle and native function address first and the signature // handler last, so function should always be set here. Label ok; __ compare (function, 0); __ bne (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Call signature handler __ mtctr (handler); __ bctrl (); __ mr (handler, r0); // Pass JNIEnv __ la (r3, Address(Rthread, JavaThread::jni_environment_offset())); // Pass mirror handle if static const Address oop_temp_addr = STATE(_oop_temp); Label not_static; __ bne (CRstatic, not_static); __ get_mirror_handle (r4); __ store (r4, oop_temp_addr); __ la (r4, oop_temp_addr); __ bind (not_static); // Set up the Java frame anchor __ set_last_Java_frame (); // Change the thread state to native const Address thread_state_addr(Rthread, JavaThread::thread_state_offset()); #ifdef ASSERT { Label ok; __ lwz (r0, thread_state_addr); __ compare (r0, _thread_in_Java); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif __ load (r0, _thread_in_native); __ stw (r0, thread_state_addr); // Make the call __ call (function); __ fixup_after_potential_safepoint (); // The result will be in r3 (and maybe r4 on 32-bit) or f1. // Wherever it is, we need to store it before calling anything const Register r3_save = r16; #ifdef PPC32 const Register r4_save = r17; #endif const FloatRegister f1_save = f14; __ mr (r3_save, r3); #ifdef PPC32 __ mr (r4_save, r4); #endif __ fmr (f1_save, f1); // Switch thread to "native transition" state before reading the // synchronization state. This additional state is necessary // because reading and testing the synchronization state is not // atomic with respect to garbage collection. __ load (r0, _thread_in_native_trans); __ stw (r0, thread_state_addr); // Ensure the new state is visible to the VM thread. if(os::is_MP()) { if (UseMembar) __ sync (); else __ serialize_memory (r3, r4); } // Check for safepoint operation in progress and/or pending // suspend requests. We use a leaf call in order to leave // the last_Java_frame setup undisturbed. Label block, no_block; __ load (r3, (intptr_t) SafepointSynchronize::address_of_state()); __ lwz (r0, Address(r3, 0)); __ compare (r0, SafepointSynchronize::_not_synchronized); __ bne (block); __ lwz (r0, Address(Rthread, JavaThread::suspend_flags_offset())); __ compare (r0, 0); __ beq (no_block); __ bind (block); __ call_VM_leaf ( CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); __ fixup_after_potential_safepoint (); __ bind (no_block); // Change the thread state __ load (r0, _thread_in_Java); __ stw (r0, thread_state_addr); // Reset the frame anchor __ reset_last_Java_frame (); // If the result was an OOP then unbox it and store it in the frame // (where it will be safe from garbage collection) before we release // the handle it might be protected by Label non_oop, store_oop; __ load (r0, (intptr_t) AbstractInterpreter::result_handler(T_OBJECT)); __ compare (r0, handler); __ bne (non_oop); __ compare (r3_save, 0); __ beq (store_oop); __ load (r3_save, Address(r3_save, 0)); __ bind (store_oop); __ store (r3_save, STATE(_oop_temp)); __ bind (non_oop); // Reset handle block __ load (r3, Address(Rthread, JavaThread::active_handles_offset())); __ load (r0, 0); __ stw (r0, Address(r3, JNIHandleBlock::top_offset_in_bytes())); // If there is an exception we skip the result handler and return. // Note that this also skips unlocking which seems totally wrong, // but apparently this is what the asm interpreter does so we do // too. __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); // Unlock if necessary Label not_synchronized_2; __ bne (CRsync, not_synchronized_2); __ unlock_object (Rmonitor); __ bind (not_synchronized_2); // Restore saved result and call the result handler __ mr (r3, r3_save); #ifdef PPC32 __ mr (r4, r4_save); #endif __ fmr (f1, f1_save); __ mtctr (handler); __ bctrl (); // Unwind the current activation and return __ bind (return_to_caller); generate_unwind_interpreter_state(); __ blr (); native_entry = start; return start; }
//------------------------------------------------------------------------------------------------------------------------ // Continuation point for throwing of implicit exceptions that are not handled in // the current activation. Fabricates an exception oop and initiates normal // exception dispatching in this frame. Since we need to preserve callee-saved values // (currently only for C2, but done for C1 as well) we need a callee-saved oop map and // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. // If the compiler needs all registers to be preserved between the fault // point and the exception handler then it must assume responsibility for that in // AbstractCompiler::continuation_for_implicit_null_exception or // continuation_for_implicit_division_by_zero_exception. All other implicit // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are // either at call sites or otherwise assume that stack unwinding will be initiated, // so caller saved registers were assumed volatile in the compiler. // // Note: the routine set_pc_not_at_call_for_caller in SharedRuntime.cpp requires // that this code be generated into a RuntimeStub. address StubGenerator::generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) { int insts_size = 256; int locs_size = 32; CodeBuffer* code = new CodeBuffer(insts_size, locs_size, 0, 0, 0, false, NULL, NULL, NULL, false, NULL, name, false); OopMapSet* oop_maps = new OopMapSet(); MacroAssembler* masm = new MacroAssembler(code); address start = __ pc(); // This is an inlined and slightly modified version of call_VM // which has the ability to fetch the return PC out of // thread-local storage and also sets up last_Java_sp slightly // differently than the real call_VM Register java_thread = ebx; __ get_thread(java_thread); if (restore_saved_exception_pc) { __ movl(eax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset()))); __ pushl(eax); } #ifndef COMPILER2 __ enter(); // required for proper stackwalking of RuntimeStub frame #endif COMPILER2 __ subl(esp, framesize * wordSize); // prolog #ifdef COMPILER2 if( OptoRuntimeCalleeSavedFloats ) { if( UseSSE == 1 ) { __ movss(Address(esp,xmm6_off*wordSize),xmm6); __ movss(Address(esp,xmm7_off*wordSize),xmm7); } else if( UseSSE == 2 ) { __ movsd(Address(esp,xmm6_off*wordSize),xmm6); __ movsd(Address(esp,xmm7_off*wordSize),xmm7); } } #endif /* COMPILER2 */ __ movl(Address(esp, ebp_off * wordSize), ebp); __ movl(Address(esp, edi_off * wordSize), edi); __ movl(Address(esp, esi_off * wordSize), esi); // push java thread (becomes first argument of C function) __ movl(Address(esp, thread_off * wordSize), java_thread); // Set up last_Java_sp and last_Java_fp __ set_last_Java_frame(java_thread, esp, ebp, NULL); // Call runtime __ call(runtime_entry, relocInfo::runtime_call_type); // Generate oop map OopMap* map = new OopMap(framesize, 0); #ifdef COMPILER2 // SharedInfo is apparently not initialized if -Xint is specified if (UseCompiler) { map->set_callee_saved(SharedInfo::stack2reg(ebp_off), framesize, 0, OptoReg::Name(EBP_num)); map->set_callee_saved(SharedInfo::stack2reg(edi_off), framesize, 0, OptoReg::Name(EDI_num)); map->set_callee_saved(SharedInfo::stack2reg(esi_off), framesize, 0, OptoReg::Name(ESI_num)); if( OptoRuntimeCalleeSavedFloats ) { map->set_callee_saved(SharedInfo::stack2reg(xmm6_off ), framesize, 0, OptoReg::Name(XMM6a_num)); map->set_callee_saved(SharedInfo::stack2reg(xmm6_off+1), framesize, 0, OptoReg::Name(XMM6b_num)); map->set_callee_saved(SharedInfo::stack2reg(xmm7_off ), framesize, 0, OptoReg::Name(XMM7a_num)); map->set_callee_saved(SharedInfo::stack2reg(xmm7_off+1), framesize, 0, OptoReg::Name(XMM7b_num)); } } #endif #ifdef COMPILER1 map->set_callee_saved(OptoReg::Name(SharedInfo::stack0+ebp_off), framesize, 0, OptoReg::Name(ebp->encoding())); map->set_callee_saved(OptoReg::Name(SharedInfo::stack0+esi_off), framesize, 0, OptoReg::Name(esi->encoding())); map->set_callee_saved(OptoReg::Name(SharedInfo::stack0+edi_off), framesize, 0, OptoReg::Name(edi->encoding())); #endif oop_maps->add_gc_map(__ pc() - start, true, map); // restore the thread (cannot use the pushed argument since arguments // may be overwritten by C code generated by an optimizing compiler); // however can use the register value directly if it is callee saved. __ get_thread(java_thread); __ reset_last_Java_frame(java_thread, false); // Restore callee save registers. This must be done after resetting the Java frame #ifdef COMPILER2 if( OptoRuntimeCalleeSavedFloats ) { if( UseSSE == 1 ) { __ movss(xmm6,Address(esp,xmm6_off*wordSize)); __ movss(xmm7,Address(esp,xmm7_off*wordSize)); } else if( UseSSE == 2 ) { __ movsd(xmm6,Address(esp,xmm6_off*wordSize)); __ movsd(xmm7,Address(esp,xmm7_off*wordSize)); } } #endif /* COMPILER2 */ __ movl(ebp,Address(esp, ebp_off * wordSize)); __ movl(edi,Address(esp, edi_off * wordSize)); __ movl(esi,Address(esp, esi_off * wordSize)); // discard arguments __ addl(esp, framesize * wordSize); // epilog #ifndef COMPILER2 __ leave(); // required for proper stackwalking of RuntimeStub frame #endif COMPILER2 // check for pending exceptions #ifdef ASSERT Label L; __ cmpl(Address(java_thread, Thread::pending_exception_offset()), (int)NULL); __ jcc(Assembler::notEqual, L); __ should_not_reach_here(); __ bind(L); #endif ASSERT __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); // Note: it seems the frame size reported to the RuntimeStub has // to be incremented by 1 to account for the return PC. It // definitely must be one more than the amount by which SP was // decremented. int extra_words = 1; #ifdef COMPILER1 ++extra_words; // Not strictly necessary since C1 ignores frame size and uses link #endif COMPILER1 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, code, framesize + extra_words, oop_maps, false); return stub->entry_point(); }
// LP64 passes floating point arguments in F1, F3, F5, etc. instead of // O0, O1, O2 etc.. // Doubles are passed in D0, D2, D4 // We store the signature of the first 16 arguments in the first argument // slot because it will be overwritten prior to calling the native // function, with the pointer to the JNIEnv. // If LP64 there can be up to 16 floating point arguments in registers // or 6 integer registers. address AbstractInterpreterGenerator::generate_slow_signature_handler() { enum { non_float = 0, float_sig = 1, double_sig = 2, sig_mask = 3 }; address entry = __ pc(); Argument argv(0, true); // We are in the jni transition frame. Save the last_java_frame corresponding to the // outer interpreter frame // __ set_last_Java_frame(FP, noreg); // make sure the interpreter frame we've pushed has a valid return pc __ mov(O7, I7); __ mov(Lmethod, G3_scratch); __ mov(Llocals, G4_scratch); __ save_frame(0); __ mov(G2_thread, L7_thread_cache); __ add(argv.address_in_frame(), O3); __ mov(G2_thread, O0); __ mov(G3_scratch, O1); __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); __ delayed()->mov(G4_scratch, O2); __ mov(L7_thread_cache, G2_thread); __ reset_last_Java_frame(); // load the register arguments (the C code packed them as varargs) Address Sig = argv.address_in_frame(); // Argument 0 holds the signature __ ld_ptr( Sig, G3_scratch ); // Get register argument signature word into G3_scratch __ mov( G3_scratch, G4_scratch); __ srl( G4_scratch, 2, G4_scratch); // Skip Arg 0 Label done; for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) { Label NonFloatArg; Label LoadFloatArg; Label LoadDoubleArg; Label NextArg; Address a = ldarg.address_in_frame(); __ andcc(G4_scratch, sig_mask, G3_scratch); __ br(Assembler::zero, false, Assembler::pt, NonFloatArg); __ delayed()->nop(); __ cmp(G3_scratch, float_sig ); __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg); __ delayed()->nop(); __ cmp(G3_scratch, double_sig ); __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg); __ delayed()->nop(); __ bind(NonFloatArg); // There are only 6 integer register arguments! if ( ldarg.is_register() ) __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); else { // Optimization, see if there are any more args and get out prior to checking // all 16 float registers. My guess is that this is rare. // If is_register is false, then we are done the first six integer args. __ br_null_short(G4_scratch, Assembler::pt, done); } __ ba(NextArg); __ delayed()->srl( G4_scratch, 2, G4_scratch ); __ bind(LoadFloatArg); __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4); __ ba(NextArg); __ delayed()->srl( G4_scratch, 2, G4_scratch ); __ bind(LoadDoubleArg); __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() ); __ ba(NextArg); __ delayed()->srl( G4_scratch, 2, G4_scratch ); __ bind(NextArg); } __ bind(done); __ ret(); __ delayed()-> restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler return entry; }
inline void MacroAssembler::reset_last_Java_frame_static(void) { reset_last_Java_frame(false); }
inline void MacroAssembler::reset_last_Java_frame(void) { reset_last_Java_frame(true); }