address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { const char *name; switch (type) { case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break; case T_CHAR: name = "jni_fast_GetCharField"; break; case T_SHORT: name = "jni_fast_GetShortField"; break; case T_INT: name = "jni_fast_GetIntField"; break; case T_LONG: name = "jni_fast_GetLongField"; break; case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); CodeBuffer cbuf(blob); MacroAssembler* masm = new MacroAssembler(&cbuf); address fast_entry = __ pc(); Label slow; unsigned long offset; __ adrp(rcounter_addr, SafepointSynchronize::safepoint_counter_addr(), offset); Address safepoint_counter_addr(rcounter_addr, offset); __ ldrw(rcounter, safepoint_counter_addr); __ andw(rscratch1, rcounter, 1); __ cbnzw(rscratch1, slow); __ eor(robj, c_rarg1, rcounter); __ eor(robj, robj, rcounter); // obj, since // robj ^ rcounter ^ rcounter == robj // robj is address dependent on rcounter. __ ldr(robj, Address(robj, 0)); // *obj __ lsr(roffset, c_rarg2, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); // Used by the segfault handler switch (type) { case T_BOOLEAN: __ ldrb (result, Address(robj, roffset)); break; case T_BYTE: __ ldrsb (result, Address(robj, roffset)); break; case T_CHAR: __ ldrh (result, Address(robj, roffset)); break; case T_SHORT: __ ldrsh (result, Address(robj, roffset)); break; case T_FLOAT: __ ldrw (result, Address(robj, roffset)); break; case T_INT: __ ldrsw (result, Address(robj, roffset)); break; case T_DOUBLE: case T_LONG: __ ldr (result, Address(robj, roffset)); break; default: ShouldNotReachHere(); } // counter_addr is address dependent on result. __ eor(rcounter_addr, rcounter_addr, result); __ eor(rcounter_addr, rcounter_addr, result); __ ldrw(rscratch1, safepoint_counter_addr); __ cmpw(rcounter, rscratch1); __ br (Assembler::NE, slow); switch (type) { case T_FLOAT: __ fmovs(v0, result); break; case T_DOUBLE: __ fmovd(v0, result); break; default: __ mov(r0, result); break; } __ ret(lr); slowcase_entry_pclist[count++] = __ pc(); __ bind(slow); address slow_case_addr; switch (type) { case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break; case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break; case T_INT: slow_case_addr = jni_GetIntField_addr(); break; case T_LONG: slow_case_addr = jni_GetLongField_addr(); break; case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; default: ShouldNotReachHere(); } { __ enter(); __ lea(rscratch1, ExternalAddress(slow_case_addr)); __ blr(rscratch1); __ maybe_isb(); __ leave(); __ ret(lr); } __ flush (); return fast_entry; }
void InterpreterStubs::generate_interpreter_fill_in_tags() { Segment seg(this, code_segment, "Interpreter fill in tags"); Register param_size = tmp0; // must be preserved Register callinfo = tmp1; { bind_local("interpreter_fill_in_tags"); comment("%s: size of parameters", reg_name(tmp0)); comment("%s: call info from call size", reg_name(tmp1)); comment(""); comment("Must preserve lr, %s (method), and %s (parameter size)", reg_name(r0), reg_name(tmp0)); Label loop_entry; // tos_val = r0 must be preserved Register arg_index = tmp2; Register one_reg = tmp3; Register tag_address = JavaStackDirection < 0 ? tmp4 : jsp; mov_imm(one_reg, 1 << CallInfo::format1_tag_start); sub(arg_index, param_size, one, set_CC); report_fatal("shouldn't be called on no arguments", lt); if (JavaStackDirection < 0) { comment("Tag address of last argument"); add(tag_address, jsp, imm(BytesPerWord)); } else { comment("jsp points to tag address of last argument"); } bind(loop_entry); comment("test the bit in the call info"); tst(callinfo, reg_shift(one_reg, lsl, arg_index)); mov(tos_tag, imm(obj_tag), ne); mov(tos_tag, imm(int_tag), eq); if (JavaStackDirection < 0) { str(tos_tag, add_index(tag_address, arg_index, lsl, 3)); } else { str(tos_tag, sub_index(tag_address, arg_index, lsl, 3)); } sub(arg_index, arg_index, one, set_CC); b(loop_entry, ge); mov(pc, reg(locals)); } { Register bit_offset = tmp1; // callinfo not needed Register one_reg = tmp2; Register tag_address = tmp3; Register x1 = tmp4; Register x2 = tmp5; Register index = tos_tag; Label loop; bind_local("interpreter_fill_in_extended_tags"); comment("Total number of tags"); if (HARDWARE_LITTLE_ENDIAN) { ldrh(bit_offset, imm_index3(lr, -2 * BytesPerWord)); } else { ldrh(bit_offset, imm_index3(lr, -2 * BytesPerWord + 2)); } comment("Tag address of first argument"); if (JavaStackDirection < 0) { add(tag_address, jsp, imm_shift(param_size, lsl, 3)); } else { sub(tag_address, jsp, imm_shift(param_size, lsl, 3)); } // tag_address points to the last address of the previous stack add_imm(tag_address, tag_address, JavaFrame::arg_offset_from_sp(-1) + BytesPerWord); comment("Index of last argument"); sub(index, param_size, one); comment("Bit number of first argument"); sub(bit_offset, bit_offset, reg(param_size)); mov(bit_offset, imm_shift(bit_offset, lsl, 2)); add(bit_offset, bit_offset, imm(32 + 32 + 16)); comment("A useful constant"); mov(one_reg, one); bind(loop); comment("Get the bit offset for this argument"); add(x1, bit_offset, imm_shift(index, lsl, 2)); comment("Get the appropriate word"); mov(x2, imm_shift(x1, lsr, 5)); ldr(x2, sub_index(lr, x2, lsl, 2)); comment("Pick out the nybble"); andr(x1, x1, imm(31)); mov(x2, reg_shift(x2, lsr, x1)); andr(x2, x2, imm(15), set_CC); comment("Convert the nybble into a stack type"); sub(x2, x2, one, ne); mov(x2, reg_shift(one_reg, lsl, x2), ne); if (JavaStackDirection < 0) { str(x2, sub_index(tag_address, index, lsl, 3)); } else { str(x2, add_index(tag_address, index, lsl, 3)); } comment("Update the info"); sub(index, index, one, set_CC); b(loop, ge); mov(pc, reg(locals)); } }
// Code generation address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid) { const bool not_for_compiler_entry = false; // this is the interpreter entry assert(is_signature_polymorphic(iid), "expected invoke iid"); if (iid == vmIntrinsics::_invokeGeneric || iid == vmIntrinsics::_compiledLambdaForm) { // Perhaps surprisingly, the symbolic references visible to Java are not directly used. // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. // They all allow an appendix argument. __ hlt(0); // empty stubs make SG sick return NULL; } // r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) // rmethod: Method* // r3: argument locator (parameter slot count, added to rsp) // r1: used as temp to hold mh or receiver // r0, r11: garbage temps, blown away Register argp = r3; // argument list ptr, live on error paths Register temp = r0; Register mh = r1; // MH receiver; dies quickly and is recycled // here's where control starts out: __ align(CodeEntryAlignment); address entry_point = __ pc(); if (VerifyMethodHandles) { assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); Label L; BLOCK_COMMENT("verify_intrinsic_id {"); __ ldrh(rscratch1, Address(rmethod, Method::intrinsic_id_offset_in_bytes())); __ cmp(rscratch1, (int) iid); __ br(Assembler::EQ, L); if (iid == vmIntrinsics::_linkToVirtual || iid == vmIntrinsics::_linkToSpecial) { // could do this for all kinds, but would explode assembly code size trace_method_handle(_masm, "bad Method*::intrinsic_id"); } __ hlt(0); __ bind(L); BLOCK_COMMENT("} verify_intrinsic_id"); } // First task: Find out how big the argument list is. Address r3_first_arg_addr; int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { __ ldr(argp, Address(rmethod, Method::const_offset())); __ load_sized_value(argp, Address(argp, ConstMethod::size_of_parameters_offset()), sizeof(u2), /*is_signed*/ false); // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); r3_first_arg_addr = __ argument_address(argp, -1); } else { DEBUG_ONLY(argp = noreg); } if (!is_signature_polymorphic_static(iid)) { __ ldr(mh, r3_first_arg_addr); DEBUG_ONLY(argp = noreg); } // r3_first_arg_addr is live! trace_method_handle_interpreter_entry(_masm, iid); if (iid == vmIntrinsics::_invokeBasic) { generate_method_handle_dispatch(_masm, iid, mh, noreg, not_for_compiler_entry); } else { // Adjust argument list by popping the trailing MemberName argument. Register recv = noreg; if (MethodHandles::ref_kind_has_receiver(ref_kind)) { // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. __ ldr(recv = r2, r3_first_arg_addr); } DEBUG_ONLY(argp = noreg); Register rmember = rmethod; // MemberName ptr; incoming method ptr is dead now __ pop(rmember); // extract last argument generate_method_handle_dispatch(_masm, iid, recv, rmember, not_for_compiler_entry); } return entry_point; }