address generate_call_stub(address& return_address) { assert (!TaggedStackInterpreter, "not supported"); StubCodeMark mark(this, "StubRoutines", "call_stub"); address start = __ enter(); const Register call_wrapper = r3; const Register result = r4; const Register result_type = r5; const Register method = r6; const Register entry_point = r7; const Register parameters = r8; const Register parameter_words = r9; const Register thread = r10; #ifdef ASSERT // Make sure we have no pending exceptions { StackFrame frame; Label label; __ load (r0, Address(thread, Thread::pending_exception_offset())); __ compare (r0, 0); __ beq (label); __ prolog (frame); __ should_not_reach_here (__FILE__, __LINE__); __ epilog (frame); __ blr (); __ bind (label); } #endif // ASSERT // Calculate the frame size StackFrame frame; for (int i = 0; i < StackFrame::max_crfs; i++) frame.get_cr_field(); for (int i = 0; i < StackFrame::max_gprs; i++) frame.get_register(); StubRoutines::set_call_stub_base_size(frame.unaligned_size() + 3*wordSize); // the 3 extra words are for call_wrapper, result and result_type const Register parameter_bytes = parameter_words; __ shift_left (parameter_bytes, parameter_words, LogBytesPerWord); const Register frame_size = r11; const Register padding = r12; __ addi (frame_size, parameter_bytes, StubRoutines::call_stub_base_size()); __ calc_padding_for_alignment (padding, frame_size, StackAlignmentInBytes); __ add (frame_size, frame_size, padding); // Save the link register and create the new frame __ mflr (r0); __ store (r0, Address(r1, StackFrame::lr_save_offset * wordSize)); __ neg (r0, frame_size); __ store_update_indexed (r1, r1, r0); #ifdef PPC64 __ mfcr (r0); __ store (r0, Address(r1, StackFrame::cr_save_offset * wordSize)); #endif // PPC64 // Calculate the address of the interpreter's local variables const Register locals = frame_size; __ addi (locals, r1, frame.start_of_locals() - wordSize); __ add (locals, locals, padding); __ add (locals, locals, parameter_bytes); // Store the call wrapper address and the result stuff const int initial_offset = 1; int offset = initial_offset; __ store (call_wrapper, Address(locals, offset++ * wordSize)); __ store (result, Address(locals, offset++ * wordSize)); __ store (result_type, Address(locals, offset++ * wordSize)); // Store the registers #ifdef PPC32 __ mfcr (r0); __ store (r0, Address(locals, offset++ * wordSize)); #endif // PPC32 for (int i = 14; i < 32; i++) { __ store (as_Register(i), Address(locals, offset++ * wordSize)); } const int final_offset = offset; // Store the location of call_wrapper frame::set_call_wrapper_offset((final_offset - initial_offset) * wordSize); #ifdef ASSERT // Check that we wrote all the way to the end of the frame. // The frame may have been resized when we return from the // interpreter, so the start of the frame may have moved // but the end will be where we left it and we rely on this // to find our stuff. { StackFrame frame; Label label; __ load (r3, Address(r1, 0)); __ subi (r3, r3, final_offset * wordSize); __ compare (r3, locals); __ beq (label); __ prolog (frame); __ should_not_reach_here (__FILE__, __LINE__); __ epilog (frame); __ blr (); __ bind (label); } #endif // ASSERT // Pass parameters if any { Label loop, done; __ compare (parameter_bytes, 0); __ ble (done); const Register src = parameters; const Register dst = padding; __ mr (dst, locals); __ shift_right (r0, parameter_bytes, LogBytesPerWord); __ mtctr (r0); __ bind (loop); __ load (r0, Address(src, 0)); __ store (r0, Address(dst, 0)); __ addi (src, src, wordSize); __ subi (dst, dst, wordSize); __ bdnz (loop); __ bind (done); } // Make the call __ mr (Rmethod, method); __ mr (Rlocals, locals); __ mr (Rthread, thread); __ mtctr (entry_point); __ bctrl(); // This is used to identify call_stub stack frames return_address = __ pc(); // Figure out where our stuff is stored __ load (locals, Address(r1, 0)); __ subi (locals, locals, final_offset * wordSize); #ifdef ASSERT // Rlocals should contain the address we just calculated. { StackFrame frame; Label label; __ compare (Rlocals, locals); __ beq (label); __ prolog (frame); __ should_not_reach_here (__FILE__, __LINE__); __ epilog (frame); __ blr (); __ bind (label); } #endif // ASSERT // Is an exception being thrown? Label exit; __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (exit); // Store result depending on type const Register result_addr = r6; Label is_int, is_long, is_object; offset = initial_offset + 1; // skip call_wrapper __ load (result_addr, Address(locals, offset++ * wordSize)); __ load (result_type, Address(locals, offset++ * wordSize)); __ compare (result_type, T_INT); __ beq (is_int); __ compare (result_type, T_LONG); __ beq (is_long); __ compare (result_type, T_OBJECT); __ beq (is_object); __ should_not_reach_here (__FILE__, __LINE__); __ bind (is_int); __ stw (r3, Address(result_addr, 0)); __ b (exit); __ bind (is_long); #ifdef PPC32 __ store (r4, Address(result_addr, wordSize)); #endif __ store (r3, Address(result_addr, 0)); __ b (exit); __ bind (is_object); __ store (r3, Address(result_addr, 0)); //__ b (exit); // Restore the registers __ bind (exit); #ifdef PPC32 __ load (r0, Address(locals, offset++ * wordSize)); __ mtcr (r0); #endif // PPC32 for (int i = 14; i < 32; i++) { __ load (as_Register(i), Address(locals, offset++ * wordSize)); } #ifdef PPC64 __ load (r0, Address(r1, StackFrame::cr_save_offset * wordSize)); __ mtcr (r0); #endif // PPC64 assert (offset == final_offset, "save and restore must match"); // Unwind and return __ load (r1, Address(r1, StackFrame::back_chain_offset * wordSize)); __ load (r0, Address(r1, StackFrame::lr_save_offset * wordSize)); __ mtlr (r0); __ blr (); return start; }
address InterpreterGenerator::generate_normal_entry(bool synchronized) { assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor); Label re_dispatch; Label call_interpreter; Label call_method; Label call_non_interpreted_method; Label return_with_exception; Label return_from_method; Label resume_interpreter; Label return_to_initial_caller; Label more_monitors; Label throwing_exception; // We use the same code for synchronized and not if (normal_entry) return normal_entry; address start = __ pc(); // There are two ways in which we can arrive at this entry. // There is the special case where a normal interpreted method // calls another normal interpreted method, and there is the // general case of when we enter from somewhere else: from // call_stub, from C1 or C2, or from a fast accessor which // deferred. In the special case we're already in frame manager // code: we arrive at re_dispatch with Rstate containing the // previous interpreter state. In the general case we arrive // at start with no previous interpreter state so we set Rstate // to NULL to indicate this. __ bind (fast_accessor_slow_entry_path); __ load (Rstate, 0); __ bind (re_dispatch); // Adjust the caller's stack frame to accomodate any additional // local variables we have contiguously with our parameters. generate_adjust_callers_stack(); // Allocate and initialize our stack frame. generate_compute_interpreter_state(false); // Call the interpreter ============================================== __ bind (call_interpreter); // We can setup the frame anchor with everything we want at // this point as we are thread_in_Java and no safepoints can // occur until we go to vm mode. We do have to clear flags // on return from vm but that is it __ set_last_Java_frame (); // Call interpreter address interpreter = JvmtiExport::can_post_interpreter_events() ? CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks) : CAST_FROM_FN_PTR(address, BytecodeInterpreter::run); __ mr (r3, Rstate); __ call (interpreter); __ fixup_after_potential_safepoint (); // Clear the frame anchor __ reset_last_Java_frame (); // Examine the message from the interpreter to decide what to do __ lwz (r4, STATE(_msg)); __ compare (r4, BytecodeInterpreter::call_method); __ beq (call_method); __ compare (r4, BytecodeInterpreter::return_from_method); __ beq (return_from_method); __ compare (r4, BytecodeInterpreter::more_monitors); __ beq (more_monitors); __ compare (r4, BytecodeInterpreter::throwing_exception); __ beq (throwing_exception); __ load (r3, (intptr_t) "error: bad message from interpreter: %d\n"); __ call (CAST_FROM_FN_PTR(address, printf)); __ should_not_reach_here (__FILE__, __LINE__); // Handle a call_method message ====================================== __ bind (call_method); __ load (Rmethod, STATE(_result._to_call._callee)); __ verify_oop(Rmethod); __ load (Rlocals, STATE(_stack)); __ lhz (r0, Address(Rmethod, methodOopDesc::size_of_parameters_offset())); __ shift_left (r0, r0, LogBytesPerWord); __ add (Rlocals, Rlocals, r0); __ load (r0, STATE(_result._to_call._callee_entry_point)); __ load (r3, (intptr_t) start); __ compare (r0, r3); __ bne (call_non_interpreted_method); // Interpreted methods are intercepted and re-dispatched ----------- __ load (r0, CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation)); __ mtlr (r0); __ b (re_dispatch); // Non-interpreted methods are dispatched normally ----------------- __ bind (call_non_interpreted_method); __ mtctr (r0); __ bctrl (); // Restore Rstate __ load (Rstate, Address(r1, StackFrame::back_chain_offset * wordSize)); __ subi (Rstate, Rstate, sizeof(BytecodeInterpreter)); // Check for pending exceptions __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_with_exception); // Convert the result and resume generate_convert_result(CppInterpreter::_tosca_to_stack); __ b (resume_interpreter); // Handle a return_from_method message =============================== __ bind (return_from_method); __ load (r0, STATE(_prev_link)); __ compare (r0, 0); __ beq (return_to_initial_caller); // "Return" from a re-dispatch ------------------------------------- generate_convert_result(CppInterpreter::_stack_to_stack); generate_unwind_interpreter_state(); // Resume the interpreter __ bind (resume_interpreter); __ store (Rlocals, STATE(_stack)); __ load (Rlocals, STATE(_locals)); __ load (Rmethod, STATE(_method)); __ verify_oop(Rmethod); __ load (r0, BytecodeInterpreter::method_resume); __ stw (r0, STATE(_msg)); __ b (call_interpreter); // Return to the initial caller (call_stub etc) -------------------- __ bind (return_to_initial_caller); generate_convert_result(CppInterpreter::_stack_to_native_abi); generate_unwind_interpreter_state(); __ blr (); // Handle a more_monitors message ==================================== __ bind (more_monitors); generate_more_monitors(); __ load (r0, BytecodeInterpreter::got_monitors); __ stw (r0, STATE(_msg)); __ b (call_interpreter); // Handle a throwing_exception message =============================== __ bind (throwing_exception); // Check we actually have an exception #ifdef ASSERT { Label ok; __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Return to wherever generate_unwind_interpreter_state(); __ bind (return_with_exception); __ compare (Rstate, 0); __ bne (resume_interpreter); __ blr (); normal_entry = start; return start; }
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, vmIntrinsics::ID iid, Register receiver_reg, Register member_reg, bool for_compiler_entry) { assert(is_signature_polymorphic(iid), "expected invoke iid"); Register temp1 = (for_compiler_entry ? R25_tmp5 : R7); Register temp2 = (for_compiler_entry ? R22_tmp2 : R8); Register temp3 = (for_compiler_entry ? R23_tmp3 : R9); Register temp4 = (for_compiler_entry ? R24_tmp4 : R10); if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg); if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg); if (iid == vmIntrinsics::_invokeBasic) { // indirect through MH.form.vmentry.vmtarget jump_to_lambda_form(_masm, receiver_reg, R19_method, temp1, temp2, for_compiler_entry); } else { // The method is a member invoker used by direct method handles. if (VerifyMethodHandles) { // make sure the trailing argument really is a MemberName (caller responsibility) verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass), temp1, temp2, "MemberName required for invokeVirtual etc."); } Register temp1_recv_klass = temp1; if (iid != vmIntrinsics::_linkToStatic) { __ verify_oop(receiver_reg); if (iid == vmIntrinsics::_linkToSpecial) { // Don't actually load the klass; just null-check the receiver. __ null_check_throw(receiver_reg, -1, temp1, EXCEPTION_ENTRY); } else { // load receiver klass itself __ null_check_throw(receiver_reg, oopDesc::klass_offset_in_bytes(), temp1, EXCEPTION_ENTRY); __ load_klass(temp1_recv_klass, receiver_reg); __ verify_klass_ptr(temp1_recv_klass); } BLOCK_COMMENT("check_receiver {"); // The receiver for the MemberName must be in receiver_reg. // Check the receiver against the MemberName.clazz if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { // Did not load it above... __ load_klass(temp1_recv_klass, receiver_reg); __ verify_klass_ptr(temp1_recv_klass); } if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { Label L_ok; Register temp2_defc = temp2; __ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3); load_klass_from_Class(_masm, temp2_defc, temp3, temp4); __ verify_klass_ptr(temp2_defc); __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok); // If we get here, the type check failed! __ stop("receiver class disagrees with MemberName.clazz"); __ BIND(L_ok); } BLOCK_COMMENT("} check_receiver"); } if (iid == vmIntrinsics::_linkToSpecial || iid == vmIntrinsics::_linkToStatic) { DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass } // Live registers at this point: // member_reg - MemberName that was the trailing argument // temp1_recv_klass - klass of stacked receiver, if needed // O5_savedSP - interpreter linkage (if interpreted) // O0..O5 - compiler arguments (if compiled) Label L_incompatible_class_change_error; switch (iid) { case vmIntrinsics::_linkToSpecial: if (VerifyMethodHandles) { verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2); } __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg); break; case vmIntrinsics::_linkToStatic: if (VerifyMethodHandles) { verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2); } __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg); break; case vmIntrinsics::_linkToVirtual: { // same as TemplateTable::invokevirtual, // minus the CP setup and profiling: if (VerifyMethodHandles) { verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp2); } // pick out the vtable index from the MemberName, and then we can discard it: Register temp2_index = temp2; __ ld(temp2_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg); if (VerifyMethodHandles) { Label L_index_ok; __ cmpdi(CCR1, temp2_index, 0); __ bge(CCR1, L_index_ok); __ stop("no virtual index"); __ BIND(L_index_ok); } // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget // at this point. And VerifyMethodHandles has already checked clazz, if needed. // get target Method* & entry point __ lookup_virtual_method(temp1_recv_klass, temp2_index, R19_method); break; } case vmIntrinsics::_linkToInterface: { // same as TemplateTable::invokeinterface // (minus the CP setup and profiling, with different argument motion) if (VerifyMethodHandles) { verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp2); } Register temp2_intf = temp2; __ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3); load_klass_from_Class(_masm, temp2_intf, temp3, temp4); __ verify_klass_ptr(temp2_intf); Register vtable_index = R19_method; __ ld(vtable_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg); if (VerifyMethodHandles) { Label L_index_ok; __ cmpdi(CCR1, vtable_index, 0); __ bge(CCR1, L_index_ok); __ stop("invalid vtable index for MH.invokeInterface"); __ BIND(L_index_ok); } // given intf, index, and recv klass, dispatch to the implementation method __ lookup_interface_method(temp1_recv_klass, temp2_intf, // note: next two args must be the same: vtable_index, R19_method, temp3, temp4, L_incompatible_class_change_error); break; } default: fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); break; } // Live at this point: // R19_method // O5_savedSP (if interpreted) // After figuring out which concrete method to call, jump into it. // Note that this works in the interpreter with no data motion. // But the compiled version will require that rcx_recv be shifted out. __ verify_method_ptr(R19_method); jump_from_method_handle(_masm, R19_method, temp1, temp2, for_compiler_entry); if (iid == vmIntrinsics::_linkToInterface) { __ BIND(L_incompatible_class_change_error); __ load_const_optimized(temp1, StubRoutines::throw_IncompatibleClassChangeError_entry()); __ mtctr(temp1); __ bctr(); } } }
inline void MacroAssembler::call_stub_and_return_to(Register function_entry, Register return_pc) { assert_different_registers(function_entry, return_pc); mtlr(return_pc); mtctr(function_entry); bctr(); }
address InterpreterGenerator::generate_native_entry(bool synchronized) { const Register handler = r14; const Register function = r15; assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor, handler, function); // We use the same code for synchronized and not if (native_entry) return native_entry; address start = __ pc(); // Allocate and initialize our stack frame. __ load (Rstate, 0); generate_compute_interpreter_state(true); // Make sure method is native and not abstract #ifdef ASSERT { Label ok; __ lwz (r0, Address(Rmethod, methodOopDesc::access_flags_offset())); __ andi_ (r0, r0, JVM_ACC_NATIVE | JVM_ACC_ABSTRACT); __ compare (r0, JVM_ACC_NATIVE); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Lock if necessary Label not_synchronized_1; __ bne (CRsync, not_synchronized_1); __ lock_object (Rmonitor); __ bind (not_synchronized_1); // Get signature handler const Address signature_handler_addr( Rmethod, methodOopDesc::signature_handler_offset()); Label return_to_caller, got_signature_handler; __ load (handler, signature_handler_addr); __ compare (handler, 0); __ bne (got_signature_handler); __ call_VM (noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Rmethod, CALL_VM_NO_EXCEPTION_CHECKS); __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); __ load (handler, signature_handler_addr); __ bind (got_signature_handler); // Get the native function entry point const Address native_function_addr( Rmethod, methodOopDesc::native_function_offset()); Label got_function; __ load (function, native_function_addr); #ifdef ASSERT { // InterpreterRuntime::prepare_native_call() sets the mirror // handle and native function address first and the signature // handler last, so function should always be set here. Label ok; __ compare (function, 0); __ bne (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Call signature handler __ mtctr (handler); __ bctrl (); __ mr (handler, r0); // Pass JNIEnv __ la (r3, Address(Rthread, JavaThread::jni_environment_offset())); // Pass mirror handle if static const Address oop_temp_addr = STATE(_oop_temp); Label not_static; __ bne (CRstatic, not_static); __ get_mirror_handle (r4); __ store (r4, oop_temp_addr); __ la (r4, oop_temp_addr); __ bind (not_static); // Set up the Java frame anchor __ set_last_Java_frame (); // Change the thread state to native const Address thread_state_addr(Rthread, JavaThread::thread_state_offset()); #ifdef ASSERT { Label ok; __ lwz (r0, thread_state_addr); __ compare (r0, _thread_in_Java); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif __ load (r0, _thread_in_native); __ stw (r0, thread_state_addr); // Make the call __ call (function); __ fixup_after_potential_safepoint (); // The result will be in r3 (and maybe r4 on 32-bit) or f1. // Wherever it is, we need to store it before calling anything const Register r3_save = r16; #ifdef PPC32 const Register r4_save = r17; #endif const FloatRegister f1_save = f14; __ mr (r3_save, r3); #ifdef PPC32 __ mr (r4_save, r4); #endif __ fmr (f1_save, f1); // Switch thread to "native transition" state before reading the // synchronization state. This additional state is necessary // because reading and testing the synchronization state is not // atomic with respect to garbage collection. __ load (r0, _thread_in_native_trans); __ stw (r0, thread_state_addr); // Ensure the new state is visible to the VM thread. if(os::is_MP()) { if (UseMembar) __ sync (); else __ serialize_memory (r3, r4); } // Check for safepoint operation in progress and/or pending // suspend requests. We use a leaf call in order to leave // the last_Java_frame setup undisturbed. Label block, no_block; __ load (r3, (intptr_t) SafepointSynchronize::address_of_state()); __ lwz (r0, Address(r3, 0)); __ compare (r0, SafepointSynchronize::_not_synchronized); __ bne (block); __ lwz (r0, Address(Rthread, JavaThread::suspend_flags_offset())); __ compare (r0, 0); __ beq (no_block); __ bind (block); __ call_VM_leaf ( CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); __ fixup_after_potential_safepoint (); __ bind (no_block); // Change the thread state __ load (r0, _thread_in_Java); __ stw (r0, thread_state_addr); // Reset the frame anchor __ reset_last_Java_frame (); // If the result was an OOP then unbox it and store it in the frame // (where it will be safe from garbage collection) before we release // the handle it might be protected by Label non_oop, store_oop; __ load (r0, (intptr_t) AbstractInterpreter::result_handler(T_OBJECT)); __ compare (r0, handler); __ bne (non_oop); __ compare (r3_save, 0); __ beq (store_oop); __ load (r3_save, Address(r3_save, 0)); __ bind (store_oop); __ store (r3_save, STATE(_oop_temp)); __ bind (non_oop); // Reset handle block __ load (r3, Address(Rthread, JavaThread::active_handles_offset())); __ load (r0, 0); __ stw (r0, Address(r3, JNIHandleBlock::top_offset_in_bytes())); // If there is an exception we skip the result handler and return. // Note that this also skips unlocking which seems totally wrong, // but apparently this is what the asm interpreter does so we do // too. __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); // Unlock if necessary Label not_synchronized_2; __ bne (CRsync, not_synchronized_2); __ unlock_object (Rmonitor); __ bind (not_synchronized_2); // Restore saved result and call the result handler __ mr (r3, r3_save); #ifdef PPC32 __ mr (r4, r4_save); #endif __ fmr (f1, f1_save); __ mtctr (handler); __ bctrl (); // Unwind the current activation and return __ bind (return_to_caller); generate_unwind_interpreter_state(); __ blr (); native_entry = start; return start; }
// Used by compiler only; may use only caller saved, non-argument // registers. VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // PPC port: use fixed size. const int code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new (code_length) VtableStub(true, vtable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); #ifndef PRODUCT if (CountCompiledCalls) { int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true); __ lwz(R12_scratch2, offs, R11_scratch1); __ addi(R12_scratch2, R12_scratch2, 1); __ stw(R12_scratch2, offs, R11_scratch1); } #endif assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1"); // Get receiver klass. const Register rcvr_klass = R11_scratch1; // We might implicit NULL fault here. address npe_addr = __ pc(); // npe = null pointer exception __ load_klass_with_trap_null_check(rcvr_klass, R3); // Set method (in case of interpreted method), and destination address. int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); #ifndef PRODUCT if (DebugVtables) { Label L; // Check offset vs vtable length. const Register vtable_len = R12_scratch2; __ lwz(vtable_len, InstanceKlass::vtable_length_offset()*wordSize, rcvr_klass); __ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size()); __ bge(CCR0, L); __ li(R12_scratch2, vtable_index); __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false); __ bind(L); } #endif int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); __ ld(R19_method, v_off, rcvr_klass); #ifndef PRODUCT if (DebugVtables) { Label L; __ cmpdi(CCR0, R19_method, 0); __ bne(CCR0, L); __ stop("Vtable entry is ZERO", 102); __ bind(L); } #endif // If the vtable entry is null, the method is abstract. address ame_addr = __ pc(); // ame = abstract method error __ load_with_trap_null_check(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ mtctr(R12_scratch2); __ bctr(); masm->flush(); guarantee(__ pc() <= s->code_end(), "overflowed buffer"); s->set_exception_points(npe_addr, ame_addr); return s; }
inline address MacroAssembler::call_stub(Register function_entry) { mtctr(function_entry); bctrl(); return pc(); }
VtableStub* VtableStubs::create_itable_stub(int itable_index) { // PPC port: use fixed size. const int code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new (code_length) VtableStub(false, itable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); address start_pc; #ifndef PRODUCT if (CountCompiledCalls) { int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true); __ lwz(R12_scratch2, offs, R11_scratch1); __ addi(R12_scratch2, R12_scratch2, 1); __ stw(R12_scratch2, offs, R11_scratch1); } #endif assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1"); // Entry arguments: // R19_method: Interface // R3_ARG1: Receiver Label L_no_such_interface; const Register rcvr_klass = R11_scratch1, interface = R12_scratch2, tmp1 = R21_tmp1, tmp2 = R22_tmp2; address npe_addr = __ pc(); // npe = null pointer exception __ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1); // Receiver subtype check against REFC. __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method); __ lookup_interface_method(rcvr_klass, interface, noreg, R0, tmp1, tmp2, L_no_such_interface, /*return_method=*/ false); // Get Method* and entrypoint for compiler __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method); __ lookup_interface_method(rcvr_klass, interface, itable_index, R19_method, tmp1, tmp2, L_no_such_interface, /*return_method=*/ true); #ifndef PRODUCT if (DebugVtables) { Label ok; __ cmpd(CCR0, R19_method, 0); __ bne(CCR0, ok); __ stop("method is null", 103); __ bind(ok); } #endif // If the vtable entry is null, the method is abstract. address ame_addr = __ pc(); // ame = abstract method error // Must do an explicit check if implicit checks are disabled. assert(!MacroAssembler::needs_explicit_null_check(in_bytes(Method::from_compiled_offset())), "sanity"); if (!ImplicitNullChecks || !os::zero_page_read_protected()) { if (TrapBasedNullChecks) { __ trap_null_check(R19_method); } else { __ cmpdi(CCR0, R19_method, 0); __ beq(CCR0, L_no_such_interface); } } __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ mtctr(R12_scratch2); __ bctr(); // Handle IncompatibleClassChangeError in itable stubs. // More detailed error message. // We force resolving of the call site by jumping to the "handle // wrong method" stub, and so let the interpreter runtime do all the // dirty work. __ bind(L_no_such_interface); __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2); __ mtctr(R11_scratch1); __ bctr(); masm->flush(); guarantee(__ pc() <= s->code_end(), "overflowed buffer"); s->set_exception_points(npe_addr, ame_addr); return s; }
address AbstractInterpreterGenerator::generate_slow_signature_handler() { // Slow_signature handler that respects the PPC C calling conventions. // // We get called by the native entry code with our output register // area == 8. First we call InterpreterRuntime::get_result_handler // to copy the pointer to the signature string temporarily to the // first C-argument and to return the result_handler in // R3_RET. Since native_entry will copy the jni-pointer to the // first C-argument slot later on, it is OK to occupy this slot // temporarilly. Then we copy the argument list on the java // expression stack into native varargs format on the native stack // and load arguments into argument registers. Integer arguments in // the varargs vector will be sign-extended to 8 bytes. // // On entry: // R3_ARG1 - intptr_t* Address of java argument list in memory. // R15_prev_state - BytecodeInterpreter* Address of interpreter state for // this method // R19_method // // On exit (just before return instruction): // R3_RET - contains the address of the result_handler. // R4_ARG2 - is not updated for static methods and contains "this" otherwise. // R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double, // ARGi contains this argument. Otherwise, ARGi is not updated. // F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double. const int LogSizeOfTwoInstructions = 3; // FIXME: use Argument:: GL: Argument names different numbers! const int max_fp_register_arguments = 13; const int max_int_register_arguments = 6; // first 2 are reserved const Register arg_java = R21_tmp1; const Register arg_c = R22_tmp2; const Register signature = R23_tmp3; // is string const Register sig_byte = R24_tmp4; const Register fpcnt = R25_tmp5; const Register argcnt = R26_tmp6; const Register intSlot = R27_tmp7; const Register target_sp = R28_tmp8; const FloatRegister floatSlot = F0; address entry = __ function_entry(); __ save_LR_CR(R0); __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); // We use target_sp for storing arguments in the C frame. __ mr(target_sp, R1_SP); __ push_frame_reg_args_nonvolatiles(0, R11_scratch1); __ mr(arg_java, R3_ARG1); __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method); // Signature is in R3_RET. Signature is callee saved. __ mr(signature, R3_RET); // Get the result handler. __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method); { Label L; // test if static // _access_flags._flags must be at offset 0. // TODO PPC port: requires change in shared code. //assert(in_bytes(AccessFlags::flags_offset()) == 0, // "MethodDesc._access_flags == MethodDesc._access_flags._flags"); // _access_flags must be a 32 bit value. assert(sizeof(AccessFlags) == 4, "wrong size"); __ lwa(R11_scratch1/*access_flags*/, method_(access_flags)); // testbit with condition register. __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT); __ btrue(CCR0, L); // For non-static functions, pass "this" in R4_ARG2 and copy it // to 2nd C-arg slot. // We need to box the Java object here, so we use arg_java // (address of current Java stack slot) as argument and don't // dereference it as in case of ints, floats, etc. __ mr(R4_ARG2, arg_java); __ addi(arg_java, arg_java, -BytesPerWord); __ std(R4_ARG2, _abi(carg_2), target_sp); __ bind(L); } // Will be incremented directly after loop_start. argcnt=0 // corresponds to 3rd C argument. __ li(argcnt, -1); // arg_c points to 3rd C argument __ addi(arg_c, target_sp, _abi(carg_3)); // no floating-point args parsed so far __ li(fpcnt, 0); Label move_intSlot_to_ARG, move_floatSlot_to_FARG; Label loop_start, loop_end; Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed; // signature points to '(' at entry #ifdef ASSERT __ lbz(sig_byte, 0, signature); __ cmplwi(CCR0, sig_byte, '('); __ bne(CCR0, do_dontreachhere); #endif __ bind(loop_start); __ addi(argcnt, argcnt, 1); __ lbzu(sig_byte, 1, signature); __ cmplwi(CCR0, sig_byte, ')'); // end of signature __ beq(CCR0, loop_end); __ cmplwi(CCR0, sig_byte, 'B'); // byte __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'C'); // char __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'D'); // double __ beq(CCR0, do_double); __ cmplwi(CCR0, sig_byte, 'F'); // float __ beq(CCR0, do_float); __ cmplwi(CCR0, sig_byte, 'I'); // int __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'J'); // long __ beq(CCR0, do_long); __ cmplwi(CCR0, sig_byte, 'S'); // short __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'Z'); // boolean __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'L'); // object __ beq(CCR0, do_object); __ cmplwi(CCR0, sig_byte, '['); // array __ beq(CCR0, do_array); // __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type // __ beq(CCR0, do_void); __ bind(do_dontreachhere); __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120); __ bind(do_array); { Label start_skip, end_skip; __ bind(start_skip); __ lbzu(sig_byte, 1, signature); __ cmplwi(CCR0, sig_byte, '['); __ beq(CCR0, start_skip); // skip further brackets __ cmplwi(CCR0, sig_byte, '9'); __ bgt(CCR0, end_skip); // no optional size __ cmplwi(CCR0, sig_byte, '0'); __ bge(CCR0, start_skip); // skip optional size __ bind(end_skip); __ cmplwi(CCR0, sig_byte, 'L'); __ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped __ b(do_boxed); // otherwise, go directly to do_boxed } __ bind(do_object); { Label L; __ bind(L); __ lbzu(sig_byte, 1, signature); __ cmplwi(CCR0, sig_byte, ';'); __ bne(CCR0, L); } // Need to box the Java object here, so we use arg_java (address of // current Java stack slot) as argument and don't dereference it as // in case of ints, floats, etc. Label do_null; __ bind(do_boxed); __ ld(R0,0, arg_java); __ cmpdi(CCR0, R0, 0); __ li(intSlot,0); __ beq(CCR0, do_null); __ mr(intSlot, arg_java); __ bind(do_null); __ std(intSlot, 0, arg_c); __ addi(arg_java, arg_java, -BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, argcnt, max_int_register_arguments); __ blt(CCR0, move_intSlot_to_ARG); __ b(loop_start); __ bind(do_int); __ lwa(intSlot, 0, arg_java); __ std(intSlot, 0, arg_c); __ addi(arg_java, arg_java, -BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, argcnt, max_int_register_arguments); __ blt(CCR0, move_intSlot_to_ARG); __ b(loop_start); __ bind(do_long); __ ld(intSlot, -BytesPerWord, arg_java); __ std(intSlot, 0, arg_c); __ addi(arg_java, arg_java, - 2 * BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, argcnt, max_int_register_arguments); __ blt(CCR0, move_intSlot_to_ARG); __ b(loop_start); __ bind(do_float); __ lfs(floatSlot, 0, arg_java); #if defined(LINUX) // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float // in the least significant word of an argument slot. #if defined(VM_LITTLE_ENDIAN) __ stfs(floatSlot, 0, arg_c); #else __ stfs(floatSlot, 4, arg_c); #endif #elif defined(AIX) // Although AIX runs on big endian CPU, float is in most significant // word of an argument slot. __ stfs(floatSlot, 0, arg_c); #else #error "unknown OS" #endif __ addi(arg_java, arg_java, -BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); __ blt(CCR0, move_floatSlot_to_FARG); __ b(loop_start); __ bind(do_double); __ lfd(floatSlot, - BytesPerWord, arg_java); __ stfd(floatSlot, 0, arg_c); __ addi(arg_java, arg_java, - 2 * BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); __ blt(CCR0, move_floatSlot_to_FARG); __ b(loop_start); __ bind(loop_end); __ pop_frame(); __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); __ restore_LR_CR(R0); __ blr(); Label move_int_arg, move_float_arg; __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) __ mr(R5_ARG3, intSlot); __ b(loop_start); __ mr(R6_ARG4, intSlot); __ b(loop_start); __ mr(R7_ARG5, intSlot); __ b(loop_start); __ mr(R8_ARG6, intSlot); __ b(loop_start); __ mr(R9_ARG7, intSlot); __ b(loop_start); __ mr(R10_ARG8, intSlot); __ b(loop_start); __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) __ fmr(F1_ARG1, floatSlot); __ b(loop_start); __ fmr(F2_ARG2, floatSlot); __ b(loop_start); __ fmr(F3_ARG3, floatSlot); __ b(loop_start); __ fmr(F4_ARG4, floatSlot); __ b(loop_start); __ fmr(F5_ARG5, floatSlot); __ b(loop_start); __ fmr(F6_ARG6, floatSlot); __ b(loop_start); __ fmr(F7_ARG7, floatSlot); __ b(loop_start); __ fmr(F8_ARG8, floatSlot); __ b(loop_start); __ fmr(F9_ARG9, floatSlot); __ b(loop_start); __ fmr(F10_ARG10, floatSlot); __ b(loop_start); __ fmr(F11_ARG11, floatSlot); __ b(loop_start); __ fmr(F12_ARG12, floatSlot); __ b(loop_start); __ fmr(F13_ARG13, floatSlot); __ b(loop_start); __ bind(move_intSlot_to_ARG); __ sldi(R0, argcnt, LogSizeOfTwoInstructions); __ load_const(R11_scratch1, move_int_arg); // Label must be bound here. __ add(R11_scratch1, R0, R11_scratch1); __ mtctr(R11_scratch1/*branch_target*/); __ bctr(); __ bind(move_floatSlot_to_FARG); __ sldi(R0, fpcnt, LogSizeOfTwoInstructions); __ addi(fpcnt, fpcnt, 1); __ load_const(R11_scratch1, move_float_arg); // Label must be bound here. __ add(R11_scratch1, R0, R11_scratch1); __ mtctr(R11_scratch1/*branch_target*/); __ bctr(); return entry; }
// Call an accessor method (assuming it is resolved, otherwise drop into // vanilla (slow path) entry. address InterpreterGenerator::generate_accessor_entry(void) { if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) { return NULL; } Label Lslow_path, Lacquire; const Register Rclass_or_obj = R3_ARG1, Rconst_method = R4_ARG2, Rcodes = Rconst_method, Rcpool_cache = R5_ARG3, Rscratch = R11_scratch1, Rjvmti_mode = Rscratch, Roffset = R12_scratch2, Rflags = R6_ARG4, Rbtable = R7_ARG5; static address branch_table[number_of_states]; address entry = __ pc(); // Check for safepoint: // Ditch this, real man don't need safepoint checks. // Also check for JVMTI mode // Check for null obj, take slow path if so. __ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp)); __ lwz(Rjvmti_mode, thread_(interp_only_mode)); __ cmpdi(CCR1, Rclass_or_obj, 0); __ cmpwi(CCR0, Rjvmti_mode, 0); __ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); __ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0 // Do 2 things in parallel: // 1. Load the index out of the first instruction word, which looks like this: // <0x2a><0xb4><index (2 byte, native endianess)>. // 2. Load constant pool cache base. __ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method); __ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method); __ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field. __ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache); // Get the const pool entry by means of <index>. const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord); __ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift __ add(Rcpool_cache, Rscratch, Rcpool_cache); // Check if cpool cache entry is resolved. // We are resolved if the indices offset contains the current bytecode. ByteSize cp_base_offset = ConstantPoolCache::base_offset(); // Big Endian: __ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache); __ cmpwi(CCR0, Rscratch, Bytecodes::_getfield); __ bne(CCR0, Lslow_path); __ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache. // Finally, start loading the value: Get cp cache entry into regs. __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache); __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache); // Following code is from templateTable::getfield_or_static // Load pointer to branch table __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); // Get volatile flag __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit // note: sync is needed before volatile load on PPC64 // Check field type __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); #ifdef ASSERT Label LFlagInvalid; __ cmpldi(CCR0, Rflags, number_of_states); __ bge(CCR0, LFlagInvalid); __ ld(R9_ARG7, 0, R1_SP); __ ld(R10_ARG8, 0, R21_sender_SP); __ cmpd(CCR0, R9_ARG7, R10_ARG8); __ asm_assert_eq("backlink", 0x543); #endif // ASSERT __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. // Load from branch table and dispatch (volatile case: one instruction ahead) __ sldi(Rflags, Rflags, LogBytesPerWord); __ cmpwi(CCR6, Rscratch, 1); // volatile? if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0 } __ ldx(Rbtable, Rbtable, Rflags); if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point } __ mtctr(Rbtable); __ bctr(); #ifdef ASSERT __ bind(LFlagInvalid); __ stop("got invalid flag", 0x6541); bool all_uninitialized = true, all_initialized = true; for (int i = 0; i<number_of_states; ++i) { all_uninitialized = all_uninitialized && (branch_table[i] == NULL); all_initialized = all_initialized && (branch_table[i] != NULL); } assert(all_uninitialized != all_initialized, "consistency"); // either or __ fence(); // volatile entry point (one instruction before non-volatile_entry point) if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point __ stop("unexpected type", 0x6551); #endif if (branch_table[itos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[itos] = __ pc(); // non-volatile_entry point __ lwax(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[ltos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[ltos] = __ pc(); // non-volatile_entry point __ ldx(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[btos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[btos] = __ pc(); // non-volatile_entry point __ lbzx(R3_RET, Rclass_or_obj, Roffset); __ extsb(R3_RET, R3_RET); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[ctos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[ctos] = __ pc(); // non-volatile_entry point __ lhzx(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[stos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[stos] = __ pc(); // non-volatile_entry point __ lhax(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[atos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[atos] = __ pc(); // non-volatile_entry point __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj); __ verify_oop(R3_RET); //__ dcbt(R3_RET); // prefetch __ beq(CCR6, Lacquire); __ blr(); } __ align(32, 12); __ bind(Lacquire); __ twi_0(R3_RET); __ isync(); // acquire __ blr(); #ifdef ASSERT for (int i = 0; i<number_of_states; ++i) { assert(branch_table[i], "accessor_entry initialization"); //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i])); } #endif __ bind(Lslow_path); __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch); __ flush(); return entry; }
void C1_MacroAssembler::initialize_body(Register obj, Register tmp1, Register tmp2, int obj_size_in_bytes, int hdr_size_in_bytes) { const int index = (obj_size_in_bytes - hdr_size_in_bytes) / HeapWordSize; const int cl_size = VM_Version::L1_data_cache_line_size(), cl_dwords = cl_size>>3, cl_dw_addr_bits = exact_log2(cl_dwords); const Register tmp = R0, base_ptr = tmp1, cnt_dwords = tmp2; if (index <= 6) { // Use explicit NULL stores. if (index > 0) { li(tmp, 0); } for (int i = 0; i < index; ++i) { std(tmp, hdr_size_in_bytes + i * HeapWordSize, obj); } } else if (index < (2<<cl_dw_addr_bits)-1) { // simple loop Label loop; li(cnt_dwords, index); addi(base_ptr, obj, hdr_size_in_bytes); // Compute address of first element. li(tmp, 0); mtctr(cnt_dwords); // Load counter. bind(loop); std(tmp, 0, base_ptr); // Clear 8byte aligned block. addi(base_ptr, base_ptr, 8); bdnz(loop); } else { // like clear_memory_doubleword Label startloop, fast, fastloop, restloop, done; addi(base_ptr, obj, hdr_size_in_bytes); // Compute address of first element. load_const_optimized(cnt_dwords, index); rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line. beq(CCR0, fast); // Already 128byte aligned. subfic(tmp, tmp, cl_dwords); mtctr(tmp); // Set ctr to hit 128byte boundary (0<ctr<cl_dwords). subf(cnt_dwords, tmp, cnt_dwords); // rest. li(tmp, 0); bind(startloop); // Clear at the beginning to reach 128byte boundary. std(tmp, 0, base_ptr); // Clear 8byte aligned block. addi(base_ptr, base_ptr, 8); bdnz(startloop); bind(fast); // Clear 128byte blocks. srdi(tmp, cnt_dwords, cl_dw_addr_bits); // Loop count for 128byte loop (>0). andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords. mtctr(tmp); // Load counter. bind(fastloop); dcbz(base_ptr); // Clear 128byte aligned block. addi(base_ptr, base_ptr, cl_size); bdnz(fastloop); cmpdi(CCR0, cnt_dwords, 0); // size 0? beq(CCR0, done); // rest == 0 li(tmp, 0); mtctr(cnt_dwords); // Load counter. bind(restloop); // Clear rest. std(tmp, 0, base_ptr); // Clear 8byte aligned block. addi(base_ptr, base_ptr, 8); bdnz(restloop); bind(done); } }
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) { #ifdef COMPILER2 if (mark == NULL) { // Get the mark within main instrs section which is set to the address of the call. mark = cbuf.insts_mark(); } // Note that the code buffer's insts_mark is always relative to insts. // That's why we must use the macroassembler to generate a stub. MacroAssembler _masm(&cbuf); // Start the stub. address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size()); if (stub == NULL) { return NULL; // CodeCache is full } // For java_to_interp stubs we use R11_scratch1 as scratch register // and in call trampoline stubs we use R12_scratch2. This way we // can distinguish them (see is_NativeCallTrampolineStub_at()). Register reg_scratch = R11_scratch1; // Create a static stub relocation which relates this stub // with the call instruction at insts_call_instruction_offset in the // instructions code-section. __ relocate(static_stub_Relocation::spec(mark)); const int stub_start_offset = __ offset(); // Now, create the stub's code: // - load the TOC // - load the inline cache oop from the constant pool // - load the call target from the constant pool // - call __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); bool success = __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch, /*fixed_size*/ true); if (!success) { return NULL; // CodeCache is full } if (ReoptimizeCallSequences) { __ b64_patchable((address)-1, relocInfo::none); } else { AddressLiteral a((address)-1); success = __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); if (!success) { return NULL; // CodeCache is full } __ mtctr(reg_scratch); __ bctr(); } // FIXME: Assert that the stub can be identified and patched. // Java_to_interp_stub_size should be good. assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(), "should be good size"); assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)), "must not confuse java_to_interp with trampoline stubs"); // End the stub. __ end_a_stub(); return stub; #else ShouldNotReachHere(); return NULL; #endif }