void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, bool for_compiler_entry) { assert(method == rmethod, "interpreter calling convention"); Label L_no_such_method; __ cbz(rmethod, L_no_such_method); __ verify_method_ptr(method); if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; // JVMTI events, such as single-stepping, are implemented partly by avoiding running // compiled code in threads for which the event is enabled. Check here for // interp_only_mode if these events CAN be enabled. __ ldrb(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset())); __ cbnz(rscratch1, run_compiled_code); __ ldr(rscratch1, Address(method, Method::interpreter_entry_offset())); __ br(rscratch1); __ BIND(run_compiled_code); } const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : Method::from_interpreted_offset(); __ ldr(rscratch1,Address(method, entry_offset)); __ br(rscratch1); __ bind(L_no_such_method); __ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); }
void Assembler::cbnz(const Register& rt, Label* label) { // Flush the instruction buffer if necessary before getting an offset. BufferOffset branch = b(0); Instruction* ins = getInstructionAt(branch); // Encode the relative offset. cbnz(ins, rt, LinkAndGetInstructionOffsetTo(branch, label)); }
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { const int aarch64_code_length = VtableStub::pd_code_size_limit(true); VtableStub* s = new(aarch64_code_length) VtableStub(true, vtable_index); ResourceMark rm; CodeBuffer cb(s->entry_point(), aarch64_code_length); MacroAssembler* masm = new MacroAssembler(&cb); #ifndef PRODUCT if (CountCompiledCalls) { __ lea(r19, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); __ incrementw(Address(r19)); } #endif // get receiver (need to skip return address on top of stack) assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); // get receiver klass address npe_addr = __ pc(); __ load_klass(r19, j_rarg0); #ifndef PRODUCT if (DebugVtables) { Label L; // check offset vs vtable length __ ldrw(rscratch1, Address(r19, Klass::vtable_length_offset())); __ cmpw(rscratch1, vtable_index * vtableEntry::size()); __ br(Assembler::GT, L); __ enter(); __ mov(r2, vtable_index); __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, r2); __ leave(); __ bind(L); } #endif // PRODUCT __ lookup_virtual_method(r19, vtable_index, rmethod); if (DebugVtables) { Label L; __ cbz(rmethod, L); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ cbnz(rscratch1, L); __ stop("Vtable entry is NULL"); __ bind(L); } // r0: receiver klass // rmethod: Method* // r2: receiver address ame_addr = __ pc(); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ br(rscratch1); __ flush(); if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d", vtable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); s->set_exception_points(npe_addr, ame_addr); return s; }
VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Note well: pd_code_size_limit is the absolute minimum we can get // away with. If you add code here, bump the code stub size // returned by pd_code_size_limit! const int code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new(code_length) VtableStub(false, itable_index); ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); #ifndef PRODUCT if (CountCompiledCalls) { __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); __ incrementw(Address(r10)); } #endif // Entry arguments: // rscratch2: Interface // j_rarg0: Receiver // Free registers (non-args) are r0 (interface), rmethod // get receiver (need to skip return address on top of stack) assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); // get receiver klass (also an implicit null-check) address npe_addr = __ pc(); // Most registers are in use; we'll use r0, rmethod, r10, r11 __ load_klass(r10, j_rarg0); Label throw_icce; // Get Method* and entrypoint for compiler __ lookup_interface_method(// inputs: rec. class, interface, itable index r10, rscratch2, itable_index, // outputs: method, scan temp. reg rmethod, r11, throw_icce); // method (rmethod): Method* // j_rarg0: receiver #ifdef ASSERT if (DebugVtables) { Label L2; __ cbz(rmethod, L2); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ cbnz(rscratch1, L2); __ stop("compiler entrypoint is null"); __ bind(L2); } #endif // ASSERT // rmethod: Method* // j_rarg0: receiver address ame_addr = __ pc(); __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset())); __ br(rscratch1); __ bind(throw_icce); __ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); __ flush(); if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d", itable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); s->set_exception_points(npe_addr, ame_addr); return s; }
VtableStub* VtableStubs::create_itable_stub(int itable_index) { const int code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new(code_length) VtableStub(false, itable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0"); // R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled const Register Rclass = AARCH64_ONLY(R9) NOT_AARCH64(R4); const Register Rlength = AARCH64_ONLY(R10) NOT_AARCH64(R5); const Register Rscan = AARCH64_ONLY(R11) NOT_AARCH64(R6); const Register tmp = Rtemp; assert_different_registers(Ricklass, Rclass, Rlength, Rscan, tmp); // Calculate the start of itable (itable goes after vtable) const int scale = exact_log2(vtableEntry::size_in_bytes()); address npe_addr = __ pc(); __ load_klass(Rclass, R0); __ ldr_s32(Rlength, Address(Rclass, Klass::vtable_length_offset())); __ add(Rscan, Rclass, in_bytes(Klass::vtable_start_offset())); __ add(Rscan, Rscan, AsmOperand(Rlength, lsl, scale)); // Search through the itable for an interface equal to incoming Ricklass // itable looks like [intface][offset][intface][offset][intface][offset] const int entry_size = itableOffsetEntry::size() * HeapWordSize; assert(itableOffsetEntry::interface_offset_in_bytes() == 0, "not added for convenience"); Label loop; __ bind(loop); __ ldr(tmp, Address(Rscan, entry_size, post_indexed)); #ifdef AARCH64 Label found; __ cmp(tmp, Ricklass); __ b(found, eq); __ cbnz(tmp, loop); #else __ cmp(tmp, Ricklass); // set ZF and CF if interface is found __ cmn(tmp, 0, ne); // check if tmp == 0 and clear CF if it is __ b(loop, ne); #endif // AARCH64 assert(StubRoutines::throw_IncompatibleClassChangeError_entry() != NULL, "Check initialization order"); #ifdef AARCH64 __ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, tmp); __ bind(found); #else // CF == 0 means we reached the end of itable without finding icklass __ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, noreg, cc); #endif // !AARCH64 // Interface found at previous position of Rscan, now load the method oop __ ldr_s32(tmp, Address(Rscan, itableOffsetEntry::offset_offset_in_bytes() - entry_size)); { const int method_offset = itableMethodEntry::size() * HeapWordSize * itable_index + itableMethodEntry::method_offset_in_bytes(); __ add_slow(Rmethod, Rclass, method_offset); } __ ldr(Rmethod, Address(Rmethod, tmp)); address ame_addr = __ pc(); #ifdef AARCH64 __ ldr(tmp, Address(Rmethod, Method::from_compiled_offset())); __ br(tmp); #else __ ldr(PC, Address(Rmethod, Method::from_compiled_offset())); #endif // AARCH64 masm->flush(); if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d", itable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); // FIXME ARM: need correct 'slop' - below is x86 code // shut the door on sizing bugs //int slop = 8; // 32-bit offset is this much larger than a 13-bit one //assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); s->set_exception_points(npe_addr, ame_addr); return s; }