void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { if (!TraceMethodHandles) return; BLOCK_COMMENT("trace_method_handle {"); int nbytes_save = 10 * 8; // 10 volatile gprs __ save_LR_CR(R0); __ mr(R0, R1_SP); // saved_sp assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0"); // Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit. __ push_frame_reg_args(nbytes_save, R0); __ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0. __ load_const(R3_ARG1, (address)adaptername); __ mr(R4_ARG2, R23_method_handle); __ mr(R5_ARG3, R0); // saved_sp __ mr(R6_ARG4, R1_SP); __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub)); __ restore_volatile_gprs(R1_SP, 112); // Except R0. __ pop_frame(); __ restore_LR_CR(R0); BLOCK_COMMENT("} trace_method_handle"); }
address generate_d2i_wrapper( address fcn ) { StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); address start = __ pc(); // Capture info about frame layout enum layout { FPUState_off = 0, ebp_off = FPUStateSizeInWords, edi_off, esi_off, ecx_off, ebx_off, saved_argument_off, saved_argument_off2, // 2nd half of double framesize }; assert(FPUStateSizeInWords == 27, "update stack layout"); // Save outgoing argument to stack across push_FPU_state() __ subl(esp, wordSize * 2); __ fstp_d(Address(esp)); // Save CPU & FPU state __ pushl(ebx); __ pushl(ecx); __ pushl(esi); __ pushl(edi); __ pushl(ebp); __ push_FPU_state(); // push_FPU_state() resets the FP top of stack // Load original double into FP top of stack __ fld_d(Address(esp, saved_argument_off * wordSize)); // Store double into stack as outgoing argument __ subl(esp, wordSize*2); __ fst_d(Address(esp)); // Prepare FPU for doing math in C-land __ empty_FPU_stack(); // Call the C code to massage the double. Result in EAX __ call_VM_leaf( fcn, 2 ); // Restore CPU & FPU state __ pop_FPU_state(); __ popl(ebp); __ popl(edi); __ popl(esi); __ popl(ecx); __ popl(ebx); __ addl(esp, wordSize * 2); __ ret(0); return start; }
// Abstract method entry. // address InterpreterGenerator::generate_abstract_entry(void) { address entry = __ pc(); // // Registers alive // R16_thread - JavaThread* // R19_method - callee's method (method to be invoked) // R1_SP - SP prepared such that caller's outgoing args are near top // LR - return address to caller // // Stack layout at this point: // // 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP // alignment (optional) // [outgoing Java arguments] // ... // PARENT [PARENT_IJAVA_FRAME_ABI] // ... // // Can't use call_VM here because we have not set up a new // interpreter state. Make the call to the vm and make it look like // our caller set up the JavaFrameAnchor. __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); // Push a new C frame and save LR. __ save_LR_CR(R0); __ push_frame_reg_args(0, R11_scratch1); // This is not a leaf but we have a JavaFrameAnchor now and we will // check (create) exceptions afterward so this is ok. __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError), R16_thread); // Pop the C frame and restore LR. __ pop_frame(); __ restore_LR_CR(R0); // Reset JavaFrameAnchor from call_VM_leaf above. __ reset_last_Java_frame(); #ifdef CC_INTERP // Return to frame manager, it will handle the pending exception. __ blr(); #else // We don't know our caller, so jump to the general forward exception stub, // which will also pop our full frame off. Satisfy the interface of // SharedRuntime::generate_forward_exception() __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0); __ mtctr(R11_scratch1); __ bctr(); #endif return entry; }
address generate_forward_exception() { StubCodeMark mark(this, "StubRoutines", "forward exception"); address start = __ pc(); // Upon entry, the sp points to the return address returning into Java // (interpreted or compiled) code; i.e., the return address becomes the // throwing pc. // // Arguments pushed before the runtime call are still on the stack but // the exception handler will reset the stack pointer -> ignore them. // A potential result in registers can be ignored as well. #ifdef ASSERT // make sure this code is only executed if there is a pending exception { Label L; __ get_thread(ecx); __ cmpl(Address(ecx, Thread::pending_exception_offset()), (int)NULL); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(L); } #endif // compute exception handler into ebx __ movl(eax, Address(esp)); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), eax); __ movl(ebx, eax); // setup eax & edx, remove return address & clear pending exception __ get_thread(ecx); __ popl(edx); __ movl(eax, Address(ecx, Thread::pending_exception_offset())); __ movl(Address(ecx, Thread::pending_exception_offset()), (int)NULL); #ifdef ASSERT // make sure exception is set { Label L; __ testl(eax, eax); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(L); } #endif // continue at exception handler (return address removed) // eax: exception // ebx: exception handler // edx: throwing pc __ verify_oop(eax); __ jmp(ebx); return start; }
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { if (!TraceMethodHandles) return; BLOCK_COMMENT("trace_method_handle {"); __ push(rax); __ lea(rax, Address(rsp, wordSize*6)); // entry_sp __ pusha(); // arguments: __ push(rbp); // interpreter frame pointer __ push(rsi); // saved_sp __ push(rax); // entry_sp __ push(rcx); // mh __ push(rcx); __ movptr(Address(rsp, 0), (intptr_t) adaptername); __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5); __ popa(); __ pop(rax); BLOCK_COMMENT("} trace_method_handle"); }
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { if (!TraceMethodHandles) return; BLOCK_COMMENT("trace_method_handle {"); const Register tmp = R11; // Will be preserved. const int nbytes_save = 11*8; // volatile gprs except R0 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 __ save_LR_CR(tmp); // save in old frame __ mr(R5_ARG3, R1_SP); // saved_sp __ push_frame_reg_args(nbytes_save, tmp); __ load_const_optimized(R3_ARG1, (address)adaptername, tmp); __ mr(R4_ARG2, R23_method_handle); __ mr(R6_ARG4, R1_SP); __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub)); __ pop_frame(); __ restore_LR_CR(tmp); __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 BLOCK_COMMENT("} trace_method_handle"); }
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMapSet* oop_maps = NULL; // for better readability const bool must_gc_arguments = true; const bool dont_gc_arguments = false; // stub code & info for the different stubs switch (id) { case forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); } break; case new_instance_id: case fast_new_instance_id: case fast_new_instance_init_check_id: { Register G5_klass = G5; // Incoming Register O0_obj = O0; // Outgoing if (id == new_instance_id) { __ set_info("new_instance", dont_gc_arguments); } else if (id == fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { assert(id == fast_new_instance_init_check_id, "bad StubID"); __ set_info("fast new_instance init check", dont_gc_arguments); } if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB && FastTLABRefill) { Label slow_path; Register G1_obj_size = G1; Register G3_t1 = G3; Register G4_t2 = G4; assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2); // Push a frame since we may do dtrace notification for the // allocation which requires calling out and we don't want // to stomp the real return address. __ save_frame(0); if (id == fast_new_instance_init_check_id) { // make sure the klass is initialized __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1); __ cmp_and_br_short(G3_t1, InstanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path); } #ifdef ASSERT // assert object can be fast path allocated { Label ok, not_ok; __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); // make sure it's an instance (LH > 0) __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok); __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); __ br(Assembler::zero, false, Assembler::pn, ok); __ delayed()->nop(); __ bind(not_ok); __ stop("assert(can be fast path allocated)"); __ should_not_reach_here(); __ bind(ok); } #endif // ASSERT // if we got here then the TLAB allocation failed, so try // refilling the TLAB or allocating directly from eden. Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass __ bind(retry_tlab); // get the instance size __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path); __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); __ verify_oop(O0_obj); __ mov(O0, I0); __ ret(); __ delayed()->restore(); __ bind(try_eden); // get the instance size __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size); __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2); __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); __ verify_oop(O0_obj); __ mov(O0, I0); __ ret(); __ delayed()->restore(); __ bind(slow_path); // pop this frame so generate_stub_call can push it's own __ restore(); } oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass); // I0->O0: new instance } break; case counter_overflow_id: // G4 contains bci, G5 contains method oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5); break; case new_type_array_id: case new_object_array_id: { Register G5_klass = G5; // Incoming Register G4_length = G4; // Incoming Register O0_obj = O0; // Outgoing Address klass_lh(G5_klass, Klass::layout_helper_offset()); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); // Use this offset to pick out an individual byte of the layout_helper: const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0} - Klass::_lh_header_size_shift / BitsPerByte); if (id == new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); } #ifdef ASSERT // assert object type is really an array of the proper kind { Label ok; Register G3_t1 = G3; __ ld(klass_lh, G3_t1); __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1); int tag = ((id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok); __ stop("assert(is an array klass)"); __ should_not_reach_here(); __ bind(ok); } #endif // ASSERT if (UseTLAB && FastTLABRefill) { Label slow_path; Register G1_arr_size = G1; Register G3_t1 = G3; Register O1_t2 = O1; assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2); // check that array length is small enough for fast path __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path); // if we got here then the TLAB allocation failed, so try // refilling the TLAB or allocating directly from eden. Label retry_tlab, try_eden; __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass __ bind(retry_tlab); // get the allocation size: (length << (layout_helper & 0x1F)) + header_size __ ld(klass_lh, G3_t1); __ sll(G4_length, G3_t1, G1_arr_size); __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); __ add(G1_arr_size, G3_t1, G1_arr_size); __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); __ sub(G1_arr_size, G3_t1, O1_t2); // body length __ add(O0_obj, G3_t1, G3_t1); // body start __ initialize_body(G3_t1, O1_t2); __ verify_oop(O0_obj); __ retl(); __ delayed()->nop(); __ bind(try_eden); // get the allocation size: (length << (layout_helper & 0x1F)) + header_size __ ld(klass_lh, G3_t1); __ sll(G4_length, G3_t1, G1_arr_size); __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); __ add(G1_arr_size, G3_t1, G1_arr_size); __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2); __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); __ sub(G1_arr_size, G3_t1, O1_t2); // body length __ add(O0_obj, G3_t1, G3_t1); // body start __ initialize_body(G3_t1, O1_t2); __ verify_oop(O0_obj); __ retl(); __ delayed()->nop(); __ bind(slow_path); } if (id == new_type_array_id) { oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length); } else { oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length); } // I0 -> O0: new array } break; case new_multi_array_id: { // O0: klass // O1: rank // O2: address of 1st dimension __ set_info("new_multi_array", dont_gc_arguments); oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2); // I0 -> O0: new multi array } break; case register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); // load the klass and check the has finalizer flag Label register_finalizer; Register t = O1; __ load_klass(O0, t); __ ld(t, in_bytes(Klass::access_flags_offset()), t); __ set(JVM_ACC_HAS_FINALIZER, G3); __ andcc(G3, t, G0); __ br(Assembler::notZero, false, Assembler::pt, register_finalizer); __ delayed()->nop(); // do a leaf return __ retl(); __ delayed()->nop(); __ bind(register_finalizer); OopMap* oop_map = save_live_registers(sasm); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0); oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); // Now restore all the live registers restore_live_registers(sasm); __ ret(); __ delayed()->restore(); } break; case throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded // G4: index oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; case throw_index_exception_id: { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded // G4: index oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; case throw_div0_exception_id: { __ set_info("throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; case throw_null_pointer_exception_id: { __ set_info("throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; case handle_exception_id: { __ set_info("handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; case handle_exception_from_callee_id: { __ set_info("handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; case unwind_exception_id: { // O0: exception // I7: address of call to this method __ set_info("unwind_exception", dont_gc_arguments); __ mov(Oexception, Oexception->after_save()); __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Oissuing_pc->after_save()); __ verify_not_null_oop(Oexception->after_save()); // Restore SP from L7 if the exception PC is a method handle call site. __ mov(O0, G5); // Save the target address. __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); __ tst(L0); // Condition codes are preserved over the restore. __ restore(); __ jmp(G5, 0); __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. } break; case throw_array_store_exception_id: { __ set_info("throw_array_store_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); } break; case throw_class_cast_exception_id: { // G4: object __ set_info("throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; case throw_incompatible_class_change_error_id: { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; case slow_subtype_check_id: { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); // Arguments : // // ret : G3 // sub : G3, argument, destroyed // super: G1, argument, not changed // raddr: O7, blown by call Label miss; __ save_frame(0); // Blow no registers! __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss); __ mov(1, G3); __ ret(); // Result in G5 is 'true' __ delayed()->restore(); // free copy or add can go here __ bind(miss); __ mov(0, G3); __ ret(); // Result in G5 is 'false' __ delayed()->restore(); // free copy or add can go here } case monitorenter_nofpu_id: case monitorenter_id: { // G4: object // G5: lock address __ set_info("monitorenter", dont_gc_arguments); int save_fpu_registers = (id == monitorenter_id); // make a frame and preserve the caller's caller-save registers OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5); oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm, save_fpu_registers); __ ret(); __ delayed()->restore(); } break; case monitorexit_nofpu_id: case monitorexit_id: { // G4: lock address // note: really a leaf routine but must setup last java sp // => use call_RT for now (speed can be improved by // doing last java sp setup manually) __ set_info("monitorexit", dont_gc_arguments); int save_fpu_registers = (id == monitorexit_id); // make a frame and preserve the caller's caller-save registers OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4); oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm, save_fpu_registers); __ ret(); __ delayed()->restore(); } break; case deoptimize_id: { __ set_info("deoptimize", dont_gc_arguments); OopMap* oop_map = save_live_registers(sasm); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize)); oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); assert(deopt_blob != NULL, "deoptimization blob must have been created"); AddressLiteral dest(deopt_blob->unpack_with_reexecution()); __ jump_to(dest, O0); __ delayed()->restore(); } break; case access_field_patching_id: { __ set_info("access_field_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; case load_klass_patching_id: { __ set_info("load_klass_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; case load_mirror_patching_id: { __ set_info("load_mirror_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; case dtrace_object_alloc_id: { // O0: object __ set_info("dtrace_object_alloc", dont_gc_arguments); // we can't gc here so skip the oopmap but make sure that all // the live registers get saved. save_live_registers(sasm); __ save_thread(L7_thread_cache); __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), relocInfo::runtime_call_type); __ delayed()->mov(I0, O0); __ restore_thread(L7_thread_cache); restore_live_registers(sasm); __ ret(); __ delayed()->restore(); } break; #if INCLUDE_ALL_GCS case g1_pre_barrier_slow_id: { // G4: previous value of memory BarrierSet* bs = Universe::heap()->barrier_set(); if (bs->kind() != BarrierSet::G1SATBCTLogging) { __ save_frame(0); __ set((int)id, O1); __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); __ should_not_reach_here(); break; } __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); Register pre_val = G4; Register tmp = G1_scratch; Register tmp2 = G3_scratch; Label refill, restart; bool with_frame = false; // I don't know if we can do with-frame. int satb_q_index_byte_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()); int satb_q_buf_byte_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()); __ bind(restart); // Load the index into the SATB buffer. PtrQueue::_index is a // size_t so ld_ptr is appropriate __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp); // index == 0? __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill); __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2); __ sub(tmp, oopSize, tmp); __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> // Use return-from-leaf __ retl(); __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset); __ bind(refill); __ save_frame(0); __ mov(pre_val, L0); __ mov(tmp, L1); __ mov(tmp2, L2); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), G2_thread); __ mov(L0, pre_val); __ mov(L1, tmp); __ mov(L2, tmp2); __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); __ delayed()->restore(); } break; case g1_post_barrier_slow_id: { BarrierSet* bs = Universe::heap()->barrier_set(); if (bs->kind() != BarrierSet::G1SATBCTLogging) { __ save_frame(0); __ set((int)id, O1); __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); __ should_not_reach_here(); break; } __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); Register addr = G4; Register cardtable = G5; Register tmp = G1_scratch; Register tmp2 = G3_scratch; jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; Label not_already_dirty, restart, refill; #ifdef _LP64 __ srlx(addr, CardTableModRefBS::card_shift, addr); #else __ srl(addr, CardTableModRefBS::card_shift, addr); #endif AddressLiteral rs(byte_map_base); __ set(rs, cardtable); // cardtable := <card table base> __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); // We didn't take the branch, so we're already dirty: return. // Use return-from-leaf __ retl(); __ delayed()->nop(); // Not dirty. __ bind(not_already_dirty); // Get cardtable + tmp into a reg by itself __ add(addr, cardtable, tmp2); // First, dirty it. __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty). Register tmp3 = cardtable; Register tmp4 = tmp; // these registers are now dead addr = cardtable = tmp = noreg; int dirty_card_q_index_byte_offset = in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()); int dirty_card_q_buf_byte_offset = in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()); __ bind(restart); // Get the index into the update buffer. PtrQueue::_index is // a size_t so ld_ptr is appropriate here. __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3); // index == 0? __ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill); __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4); __ sub(tmp3, oopSize, tmp3); __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card> // Use return-from-leaf __ retl(); __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset); __ bind(refill); __ save_frame(0); __ mov(tmp2, L0); __ mov(tmp3, L1); __ mov(tmp4, L2); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), G2_thread); __ mov(L0, tmp2); __ mov(L1, tmp3); __ mov(L2, tmp4); __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); __ delayed()->restore(); } break; #endif // INCLUDE_ALL_GCS case predicate_failed_trap_id: { __ set_info("predicate_failed_trap", dont_gc_arguments); OopMap* oop_map = save_live_registers(sasm); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); assert(deopt_blob != NULL, "deoptimization blob must have been created"); restore_live_registers(sasm); AddressLiteral dest(deopt_blob->unpack_with_reexecution()); __ jump_to(dest, O0); __ delayed()->restore(); } break; default: { __ set_info("unimplemented entry", dont_gc_arguments); __ save_frame(0); __ set((int)id, O1); __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); __ should_not_reach_here(); } break; } return oop_maps; }
address InterpreterGenerator::generate_native_entry(bool synchronized) { const Register handler = r14; const Register function = r15; assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor, handler, function); // We use the same code for synchronized and not if (native_entry) return native_entry; address start = __ pc(); // Allocate and initialize our stack frame. __ load (Rstate, 0); generate_compute_interpreter_state(true); // Make sure method is native and not abstract #ifdef ASSERT { Label ok; __ lwz (r0, Address(Rmethod, methodOopDesc::access_flags_offset())); __ andi_ (r0, r0, JVM_ACC_NATIVE | JVM_ACC_ABSTRACT); __ compare (r0, JVM_ACC_NATIVE); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Lock if necessary Label not_synchronized_1; __ bne (CRsync, not_synchronized_1); __ lock_object (Rmonitor); __ bind (not_synchronized_1); // Get signature handler const Address signature_handler_addr( Rmethod, methodOopDesc::signature_handler_offset()); Label return_to_caller, got_signature_handler; __ load (handler, signature_handler_addr); __ compare (handler, 0); __ bne (got_signature_handler); __ call_VM (noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Rmethod, CALL_VM_NO_EXCEPTION_CHECKS); __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); __ load (handler, signature_handler_addr); __ bind (got_signature_handler); // Get the native function entry point const Address native_function_addr( Rmethod, methodOopDesc::native_function_offset()); Label got_function; __ load (function, native_function_addr); #ifdef ASSERT { // InterpreterRuntime::prepare_native_call() sets the mirror // handle and native function address first and the signature // handler last, so function should always be set here. Label ok; __ compare (function, 0); __ bne (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif // Call signature handler __ mtctr (handler); __ bctrl (); __ mr (handler, r0); // Pass JNIEnv __ la (r3, Address(Rthread, JavaThread::jni_environment_offset())); // Pass mirror handle if static const Address oop_temp_addr = STATE(_oop_temp); Label not_static; __ bne (CRstatic, not_static); __ get_mirror_handle (r4); __ store (r4, oop_temp_addr); __ la (r4, oop_temp_addr); __ bind (not_static); // Set up the Java frame anchor __ set_last_Java_frame (); // Change the thread state to native const Address thread_state_addr(Rthread, JavaThread::thread_state_offset()); #ifdef ASSERT { Label ok; __ lwz (r0, thread_state_addr); __ compare (r0, _thread_in_Java); __ beq (ok); __ should_not_reach_here (__FILE__, __LINE__); __ bind (ok); } #endif __ load (r0, _thread_in_native); __ stw (r0, thread_state_addr); // Make the call __ call (function); __ fixup_after_potential_safepoint (); // The result will be in r3 (and maybe r4 on 32-bit) or f1. // Wherever it is, we need to store it before calling anything const Register r3_save = r16; #ifdef PPC32 const Register r4_save = r17; #endif const FloatRegister f1_save = f14; __ mr (r3_save, r3); #ifdef PPC32 __ mr (r4_save, r4); #endif __ fmr (f1_save, f1); // Switch thread to "native transition" state before reading the // synchronization state. This additional state is necessary // because reading and testing the synchronization state is not // atomic with respect to garbage collection. __ load (r0, _thread_in_native_trans); __ stw (r0, thread_state_addr); // Ensure the new state is visible to the VM thread. if(os::is_MP()) { if (UseMembar) __ sync (); else __ serialize_memory (r3, r4); } // Check for safepoint operation in progress and/or pending // suspend requests. We use a leaf call in order to leave // the last_Java_frame setup undisturbed. Label block, no_block; __ load (r3, (intptr_t) SafepointSynchronize::address_of_state()); __ lwz (r0, Address(r3, 0)); __ compare (r0, SafepointSynchronize::_not_synchronized); __ bne (block); __ lwz (r0, Address(Rthread, JavaThread::suspend_flags_offset())); __ compare (r0, 0); __ beq (no_block); __ bind (block); __ call_VM_leaf ( CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); __ fixup_after_potential_safepoint (); __ bind (no_block); // Change the thread state __ load (r0, _thread_in_Java); __ stw (r0, thread_state_addr); // Reset the frame anchor __ reset_last_Java_frame (); // If the result was an OOP then unbox it and store it in the frame // (where it will be safe from garbage collection) before we release // the handle it might be protected by Label non_oop, store_oop; __ load (r0, (intptr_t) AbstractInterpreter::result_handler(T_OBJECT)); __ compare (r0, handler); __ bne (non_oop); __ compare (r3_save, 0); __ beq (store_oop); __ load (r3_save, Address(r3_save, 0)); __ bind (store_oop); __ store (r3_save, STATE(_oop_temp)); __ bind (non_oop); // Reset handle block __ load (r3, Address(Rthread, JavaThread::active_handles_offset())); __ load (r0, 0); __ stw (r0, Address(r3, JNIHandleBlock::top_offset_in_bytes())); // If there is an exception we skip the result handler and return. // Note that this also skips unlocking which seems totally wrong, // but apparently this is what the asm interpreter does so we do // too. __ load (r0, Address(Rthread, Thread::pending_exception_offset())); __ compare (r0, 0); __ bne (return_to_caller); // Unlock if necessary Label not_synchronized_2; __ bne (CRsync, not_synchronized_2); __ unlock_object (Rmonitor); __ bind (not_synchronized_2); // Restore saved result and call the result handler __ mr (r3, r3_save); #ifdef PPC32 __ mr (r4, r4_save); #endif __ fmr (f1, f1_save); __ mtctr (handler); __ bctrl (); // Unwind the current activation and return __ bind (return_to_caller); generate_unwind_interpreter_state(); __ blr (); native_entry = start; return start; }
//------------------------------------------------------------------------------------------------------------------------ // Continuation point for runtime calls returning with a pending exception. // The pending exception check happened in the runtime or native call stub. // The pending exception in Thread is converted into a Java-level exception. // // Contract with Java-level exception handlers: // address generate_forward_exception() { StubCodeMark mark(this, "StubRoutines", "forward exception"); address start = __ pc(); // Upon entry, GR_Lsave_RP has the return address returning into Java // compiled code; i.e. the return address becomes the throwing pc. const Register pending_exception_addr = GR31_SCRATCH; const Register handler = GR30_SCRATCH; const PredicateRegister is_not_null = PR15_SCRATCH; const BranchRegister handler_br = BR6_SCRATCH; // Allocate abi scratch, since the compiler didn't allocate a memory frame. // pop_dummy_thin_frame will restore the caller's SP. __ sub(SP, SP, 16); #ifdef ASSERT // Get pending exception oop. __ add(pending_exception_addr, thread_(pending_exception)); __ ld8(GR8_exception, pending_exception_addr); // Make sure that this code is only executed if there is a pending exception. { Label not_null; __ cmp(is_not_null, PR0, 0, GR8_exception, Assembler::notEqual); __ br(is_not_null, not_null); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(not_null); } // __ verify_oop(GR8_exception, "generate_forward_exception"); #endif // Find exception handler __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), GR_Lsave_RP); __ mov(handler, GR_RET); // Load pending exception oop. __ add(pending_exception_addr, thread_(pending_exception)); __ ld8(GR8_exception, pending_exception_addr); // The exception pc is the return address in the caller. __ mov(GR9_issuing_pc, GR_Lsave_RP); // Uses GR2, BR6 __ pop_dummy_thin_frame(); // Now in caller of native/stub register frame #ifdef ASSERT // make sure exception is set { Label not_null; __ cmp(is_not_null, PR0, 0, GR8_exception, Assembler::notEqual); __ br(is_not_null, not_null); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(not_null); } #endif // clear pending exception __ st8(pending_exception_addr, GR0); // jump to exception handler __ mov(handler_br, handler); __ br(handler_br); __ flush_bundle(); return start; }
address AbstractInterpreterGenerator::generate_slow_signature_handler() { // Slow_signature handler that respects the PPC C calling conventions. // // We get called by the native entry code with our output register // area == 8. First we call InterpreterRuntime::get_result_handler // to copy the pointer to the signature string temporarily to the // first C-argument and to return the result_handler in // R3_RET. Since native_entry will copy the jni-pointer to the // first C-argument slot later on, it is OK to occupy this slot // temporarilly. Then we copy the argument list on the java // expression stack into native varargs format on the native stack // and load arguments into argument registers. Integer arguments in // the varargs vector will be sign-extended to 8 bytes. // // On entry: // R3_ARG1 - intptr_t* Address of java argument list in memory. // R15_prev_state - BytecodeInterpreter* Address of interpreter state for // this method // R19_method // // On exit (just before return instruction): // R3_RET - contains the address of the result_handler. // R4_ARG2 - is not updated for static methods and contains "this" otherwise. // R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double, // ARGi contains this argument. Otherwise, ARGi is not updated. // F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double. const int LogSizeOfTwoInstructions = 3; // FIXME: use Argument:: GL: Argument names different numbers! const int max_fp_register_arguments = 13; const int max_int_register_arguments = 6; // first 2 are reserved const Register arg_java = R21_tmp1; const Register arg_c = R22_tmp2; const Register signature = R23_tmp3; // is string const Register sig_byte = R24_tmp4; const Register fpcnt = R25_tmp5; const Register argcnt = R26_tmp6; const Register intSlot = R27_tmp7; const Register target_sp = R28_tmp8; const FloatRegister floatSlot = F0; address entry = __ function_entry(); __ save_LR_CR(R0); __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); // We use target_sp for storing arguments in the C frame. __ mr(target_sp, R1_SP); __ push_frame_reg_args_nonvolatiles(0, R11_scratch1); __ mr(arg_java, R3_ARG1); __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method); // Signature is in R3_RET. Signature is callee saved. __ mr(signature, R3_RET); // Get the result handler. __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method); { Label L; // test if static // _access_flags._flags must be at offset 0. // TODO PPC port: requires change in shared code. //assert(in_bytes(AccessFlags::flags_offset()) == 0, // "MethodDesc._access_flags == MethodDesc._access_flags._flags"); // _access_flags must be a 32 bit value. assert(sizeof(AccessFlags) == 4, "wrong size"); __ lwa(R11_scratch1/*access_flags*/, method_(access_flags)); // testbit with condition register. __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT); __ btrue(CCR0, L); // For non-static functions, pass "this" in R4_ARG2 and copy it // to 2nd C-arg slot. // We need to box the Java object here, so we use arg_java // (address of current Java stack slot) as argument and don't // dereference it as in case of ints, floats, etc. __ mr(R4_ARG2, arg_java); __ addi(arg_java, arg_java, -BytesPerWord); __ std(R4_ARG2, _abi(carg_2), target_sp); __ bind(L); } // Will be incremented directly after loop_start. argcnt=0 // corresponds to 3rd C argument. __ li(argcnt, -1); // arg_c points to 3rd C argument __ addi(arg_c, target_sp, _abi(carg_3)); // no floating-point args parsed so far __ li(fpcnt, 0); Label move_intSlot_to_ARG, move_floatSlot_to_FARG; Label loop_start, loop_end; Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed; // signature points to '(' at entry #ifdef ASSERT __ lbz(sig_byte, 0, signature); __ cmplwi(CCR0, sig_byte, '('); __ bne(CCR0, do_dontreachhere); #endif __ bind(loop_start); __ addi(argcnt, argcnt, 1); __ lbzu(sig_byte, 1, signature); __ cmplwi(CCR0, sig_byte, ')'); // end of signature __ beq(CCR0, loop_end); __ cmplwi(CCR0, sig_byte, 'B'); // byte __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'C'); // char __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'D'); // double __ beq(CCR0, do_double); __ cmplwi(CCR0, sig_byte, 'F'); // float __ beq(CCR0, do_float); __ cmplwi(CCR0, sig_byte, 'I'); // int __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'J'); // long __ beq(CCR0, do_long); __ cmplwi(CCR0, sig_byte, 'S'); // short __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'Z'); // boolean __ beq(CCR0, do_int); __ cmplwi(CCR0, sig_byte, 'L'); // object __ beq(CCR0, do_object); __ cmplwi(CCR0, sig_byte, '['); // array __ beq(CCR0, do_array); // __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type // __ beq(CCR0, do_void); __ bind(do_dontreachhere); __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120); __ bind(do_array); { Label start_skip, end_skip; __ bind(start_skip); __ lbzu(sig_byte, 1, signature); __ cmplwi(CCR0, sig_byte, '['); __ beq(CCR0, start_skip); // skip further brackets __ cmplwi(CCR0, sig_byte, '9'); __ bgt(CCR0, end_skip); // no optional size __ cmplwi(CCR0, sig_byte, '0'); __ bge(CCR0, start_skip); // skip optional size __ bind(end_skip); __ cmplwi(CCR0, sig_byte, 'L'); __ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped __ b(do_boxed); // otherwise, go directly to do_boxed } __ bind(do_object); { Label L; __ bind(L); __ lbzu(sig_byte, 1, signature); __ cmplwi(CCR0, sig_byte, ';'); __ bne(CCR0, L); } // Need to box the Java object here, so we use arg_java (address of // current Java stack slot) as argument and don't dereference it as // in case of ints, floats, etc. Label do_null; __ bind(do_boxed); __ ld(R0,0, arg_java); __ cmpdi(CCR0, R0, 0); __ li(intSlot,0); __ beq(CCR0, do_null); __ mr(intSlot, arg_java); __ bind(do_null); __ std(intSlot, 0, arg_c); __ addi(arg_java, arg_java, -BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, argcnt, max_int_register_arguments); __ blt(CCR0, move_intSlot_to_ARG); __ b(loop_start); __ bind(do_int); __ lwa(intSlot, 0, arg_java); __ std(intSlot, 0, arg_c); __ addi(arg_java, arg_java, -BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, argcnt, max_int_register_arguments); __ blt(CCR0, move_intSlot_to_ARG); __ b(loop_start); __ bind(do_long); __ ld(intSlot, -BytesPerWord, arg_java); __ std(intSlot, 0, arg_c); __ addi(arg_java, arg_java, - 2 * BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, argcnt, max_int_register_arguments); __ blt(CCR0, move_intSlot_to_ARG); __ b(loop_start); __ bind(do_float); __ lfs(floatSlot, 0, arg_java); #if defined(LINUX) // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float // in the least significant word of an argument slot. #if defined(VM_LITTLE_ENDIAN) __ stfs(floatSlot, 0, arg_c); #else __ stfs(floatSlot, 4, arg_c); #endif #elif defined(AIX) // Although AIX runs on big endian CPU, float is in most significant // word of an argument slot. __ stfs(floatSlot, 0, arg_c); #else #error "unknown OS" #endif __ addi(arg_java, arg_java, -BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); __ blt(CCR0, move_floatSlot_to_FARG); __ b(loop_start); __ bind(do_double); __ lfd(floatSlot, - BytesPerWord, arg_java); __ stfd(floatSlot, 0, arg_c); __ addi(arg_java, arg_java, - 2 * BytesPerWord); __ addi(arg_c, arg_c, BytesPerWord); __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); __ blt(CCR0, move_floatSlot_to_FARG); __ b(loop_start); __ bind(loop_end); __ pop_frame(); __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); __ restore_LR_CR(R0); __ blr(); Label move_int_arg, move_float_arg; __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) __ mr(R5_ARG3, intSlot); __ b(loop_start); __ mr(R6_ARG4, intSlot); __ b(loop_start); __ mr(R7_ARG5, intSlot); __ b(loop_start); __ mr(R8_ARG6, intSlot); __ b(loop_start); __ mr(R9_ARG7, intSlot); __ b(loop_start); __ mr(R10_ARG8, intSlot); __ b(loop_start); __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) __ fmr(F1_ARG1, floatSlot); __ b(loop_start); __ fmr(F2_ARG2, floatSlot); __ b(loop_start); __ fmr(F3_ARG3, floatSlot); __ b(loop_start); __ fmr(F4_ARG4, floatSlot); __ b(loop_start); __ fmr(F5_ARG5, floatSlot); __ b(loop_start); __ fmr(F6_ARG6, floatSlot); __ b(loop_start); __ fmr(F7_ARG7, floatSlot); __ b(loop_start); __ fmr(F8_ARG8, floatSlot); __ b(loop_start); __ fmr(F9_ARG9, floatSlot); __ b(loop_start); __ fmr(F10_ARG10, floatSlot); __ b(loop_start); __ fmr(F11_ARG11, floatSlot); __ b(loop_start); __ fmr(F12_ARG12, floatSlot); __ b(loop_start); __ fmr(F13_ARG13, floatSlot); __ b(loop_start); __ bind(move_intSlot_to_ARG); __ sldi(R0, argcnt, LogSizeOfTwoInstructions); __ load_const(R11_scratch1, move_int_arg); // Label must be bound here. __ add(R11_scratch1, R0, R11_scratch1); __ mtctr(R11_scratch1/*branch_target*/); __ bctr(); __ bind(move_floatSlot_to_FARG); __ sldi(R0, fpcnt, LogSizeOfTwoInstructions); __ addi(fpcnt, fpcnt, 1); __ load_const(R11_scratch1, move_float_arg); // Label must be bound here. __ add(R11_scratch1, R0, R11_scratch1); __ mtctr(R11_scratch1/*branch_target*/); __ bctr(); return entry; }