void MethodHandles::verify_klass(MacroAssembler* _masm, Register obj_reg, SystemDictionary::WKID klass_id, Register temp_reg, Register temp2_reg, const char* error_message) { Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); KlassHandle klass = SystemDictionary::well_known_klass(klass_id); Label L_ok, L_bad; BLOCK_COMMENT("verify_klass {"); __ verify_oop(obj_reg); __ cmpdi(CCR0, obj_reg, 0); __ beq(CCR0, L_bad); __ load_klass(temp_reg, obj_reg); __ load_const_optimized(temp2_reg, (address) klass_addr); __ ld(temp2_reg, 0, temp2_reg); __ cmpd(CCR0, temp_reg, temp2_reg); __ beq(CCR0, L_ok); __ ld(temp_reg, klass->super_check_offset(), temp_reg); __ cmpd(CCR0, temp_reg, temp2_reg); __ beq(CCR0, L_ok); __ BIND(L_bad); __ stop(error_message); __ BIND(L_ok); BLOCK_COMMENT("} verify_klass"); }
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { const Register temp_reg = R12_scratch2; verify_oop(receiver); load_klass(temp_reg, receiver); if (TrapBasedICMissChecks) { trap_ic_miss_check(temp_reg, iCache); } else { Label L; cmpd(CCR0, temp_reg, iCache); beq(CCR0, L); //load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0); calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false); mtctr(temp_reg); bctr(); align(32, 12); bind(L); } }
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, Register recv, Register method_temp, Register temp2, Register temp3, bool for_compiler_entry) { BLOCK_COMMENT("jump_to_lambda_form {"); // This is the initial entry point of a lazy method handle. // After type checking, it picks up the invoker from the LambdaForm. assert_different_registers(recv, method_temp, temp2); // temp3 is only passed on assert(method_temp == R19_method, "required register for loading method"); // Load the invoker, as MH -> MH.form -> LF.vmentry __ verify_oop(recv); __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv, temp2); __ verify_oop(method_temp); __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2); __ verify_oop(method_temp); // The following assumes that a Method* is normally compressed in the vmtarget field: __ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp); if (VerifyMethodHandles && !for_compiler_entry) { // Make sure recv is already on stack. __ ld(temp2, in_bytes(Method::const_offset()), method_temp); __ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2, sizeof(u2), /*is_signed*/ false); // assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), ""); Label L; __ ld(temp2, __ argument_offset(temp2, temp2, 0), CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp)); __ cmpd(CCR1, temp2, recv); __ beq(CCR1, L); __ stop("receiver not on stack"); __ BIND(L); } jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry); BLOCK_COMMENT("} jump_to_lambda_form"); }
VtableStub* VtableStubs::create_itable_stub(int itable_index) { // PPC port: use fixed size. const int code_length = VtableStub::pd_code_size_limit(false); VtableStub* s = new (code_length) VtableStub(false, itable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { return NULL; } ResourceMark rm; CodeBuffer cb(s->entry_point(), code_length); MacroAssembler* masm = new MacroAssembler(&cb); address start_pc; #ifndef PRODUCT if (CountCompiledCalls) { int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true); __ lwz(R12_scratch2, offs, R11_scratch1); __ addi(R12_scratch2, R12_scratch2, 1); __ stw(R12_scratch2, offs, R11_scratch1); } #endif assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1"); // Entry arguments: // R19_method: Interface // R3_ARG1: Receiver Label L_no_such_interface; const Register rcvr_klass = R11_scratch1, interface = R12_scratch2, tmp1 = R21_tmp1, tmp2 = R22_tmp2; address npe_addr = __ pc(); // npe = null pointer exception __ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1); // Receiver subtype check against REFC. __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method); __ lookup_interface_method(rcvr_klass, interface, noreg, R0, tmp1, tmp2, L_no_such_interface, /*return_method=*/ false); // Get Method* and entrypoint for compiler __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method); __ lookup_interface_method(rcvr_klass, interface, itable_index, R19_method, tmp1, tmp2, L_no_such_interface, /*return_method=*/ true); #ifndef PRODUCT if (DebugVtables) { Label ok; __ cmpd(CCR0, R19_method, 0); __ bne(CCR0, ok); __ stop("method is null", 103); __ bind(ok); } #endif // If the vtable entry is null, the method is abstract. address ame_addr = __ pc(); // ame = abstract method error // Must do an explicit check if implicit checks are disabled. assert(!MacroAssembler::needs_explicit_null_check(in_bytes(Method::from_compiled_offset())), "sanity"); if (!ImplicitNullChecks || !os::zero_page_read_protected()) { if (TrapBasedNullChecks) { __ trap_null_check(R19_method); } else { __ cmpdi(CCR0, R19_method, 0); __ beq(CCR0, L_no_such_interface); } } __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ mtctr(R12_scratch2); __ bctr(); // Handle IncompatibleClassChangeError in itable stubs. // More detailed error message. // We force resolving of the call site by jumping to the "handle // wrong method" stub, and so let the interpreter runtime do all the // dirty work. __ bind(L_no_such_interface); __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2); __ mtctr(R11_scratch1); __ bctr(); masm->flush(); guarantee(__ pc() <= s->code_end(), "overflowed buffer"); s->set_exception_points(npe_addr, ame_addr); return s; }
// Interpreter intrinsic for WeakReference.get(). // 1. Don't push a full blown frame and go on dispatching, but fetch the value // into R8 and return quickly // 2. If G1 is active we *must* execute this intrinsic for corrrectness: // It contains a GC barrier which puts the reference into the satb buffer // to indicate that someone holds a strong reference to the object the // weak ref points to! address InterpreterGenerator::generate_Reference_get_entry(void) { // Code: _aload_0, _getfield, _areturn // parameter size = 1 // // The code that gets generated by this routine is split into 2 parts: // 1. the "intrinsified" code for G1 (or any SATB based GC), // 2. the slow path - which is an expansion of the regular method entry. // // Notes: // * In the G1 code we do not check whether we need to block for // a safepoint. If G1 is enabled then we must execute the specialized // code for Reference.get (except when the Reference object is null) // so that we can log the value in the referent field with an SATB // update buffer. // If the code for the getfield template is modified so that the // G1 pre-barrier code is executed when the current method is // Reference.get() then going through the normal method entry // will be fine. // * The G1 code can, however, check the receiver object (the instance // of java.lang.Reference) and jump to the slow path if null. If the // Reference object is null then we obviously cannot fetch the referent // and so we don't need to call the G1 pre-barrier. Thus we can use the // regular method entry code to generate the NPE. // // This code is based on generate_accessor_enty. address entry = __ pc(); const int referent_offset = java_lang_ref_Reference::referent_offset; guarantee(referent_offset > 0, "referent offset not initialized"); if (UseG1GC) { Label slow_path; // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH); // In the G1 code we don't check if we need to reach a safepoint. We // continue and the thread will safepoint at the next bytecode dispatch. // If the receiver is null then it is OK to jump to the slow path. __ ld(R3_RET, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp)); // get receiver // Check if receiver == NULL and go the slow path. __ cmpdi(CCR0, R3_RET, 0); __ beq(CCR0, slow_path); // Load the value of the referent field. __ load_heap_oop(R3_RET, referent_offset, R3_RET); // Generate the G1 pre-barrier code to log the value of // the referent field in an SATB buffer. Note with // these parameters the pre-barrier does not generate // the load of the previous value. // Restore caller sp for c2i case. #ifdef ASSERT __ ld(R9_ARG7, 0, R1_SP); __ ld(R10_ARG8, 0, R21_sender_SP); __ cmpd(CCR0, R9_ARG7, R10_ARG8); __ asm_assert_eq("backlink", 0x544); #endif // ASSERT __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. __ g1_write_barrier_pre(noreg, // obj noreg, // offset R3_RET, // pre_val R11_scratch1, // tmp R12_scratch2, // tmp true); // needs_frame __ blr(); // Generate regular method entry. __ bind(slow_path); __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1); __ flush(); return entry; } else { return generate_accessor_entry(); } }
// Call an accessor method (assuming it is resolved, otherwise drop into // vanilla (slow path) entry. address InterpreterGenerator::generate_accessor_entry(void) { if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) { return NULL; } Label Lslow_path, Lacquire; const Register Rclass_or_obj = R3_ARG1, Rconst_method = R4_ARG2, Rcodes = Rconst_method, Rcpool_cache = R5_ARG3, Rscratch = R11_scratch1, Rjvmti_mode = Rscratch, Roffset = R12_scratch2, Rflags = R6_ARG4, Rbtable = R7_ARG5; static address branch_table[number_of_states]; address entry = __ pc(); // Check for safepoint: // Ditch this, real man don't need safepoint checks. // Also check for JVMTI mode // Check for null obj, take slow path if so. __ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp)); __ lwz(Rjvmti_mode, thread_(interp_only_mode)); __ cmpdi(CCR1, Rclass_or_obj, 0); __ cmpwi(CCR0, Rjvmti_mode, 0); __ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); __ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0 // Do 2 things in parallel: // 1. Load the index out of the first instruction word, which looks like this: // <0x2a><0xb4><index (2 byte, native endianess)>. // 2. Load constant pool cache base. __ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method); __ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method); __ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field. __ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache); // Get the const pool entry by means of <index>. const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord); __ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift __ add(Rcpool_cache, Rscratch, Rcpool_cache); // Check if cpool cache entry is resolved. // We are resolved if the indices offset contains the current bytecode. ByteSize cp_base_offset = ConstantPoolCache::base_offset(); // Big Endian: __ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache); __ cmpwi(CCR0, Rscratch, Bytecodes::_getfield); __ bne(CCR0, Lslow_path); __ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache. // Finally, start loading the value: Get cp cache entry into regs. __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache); __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache); // Following code is from templateTable::getfield_or_static // Load pointer to branch table __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); // Get volatile flag __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit // note: sync is needed before volatile load on PPC64 // Check field type __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); #ifdef ASSERT Label LFlagInvalid; __ cmpldi(CCR0, Rflags, number_of_states); __ bge(CCR0, LFlagInvalid); __ ld(R9_ARG7, 0, R1_SP); __ ld(R10_ARG8, 0, R21_sender_SP); __ cmpd(CCR0, R9_ARG7, R10_ARG8); __ asm_assert_eq("backlink", 0x543); #endif // ASSERT __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. // Load from branch table and dispatch (volatile case: one instruction ahead) __ sldi(Rflags, Rflags, LogBytesPerWord); __ cmpwi(CCR6, Rscratch, 1); // volatile? if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0 } __ ldx(Rbtable, Rbtable, Rflags); if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point } __ mtctr(Rbtable); __ bctr(); #ifdef ASSERT __ bind(LFlagInvalid); __ stop("got invalid flag", 0x6541); bool all_uninitialized = true, all_initialized = true; for (int i = 0; i<number_of_states; ++i) { all_uninitialized = all_uninitialized && (branch_table[i] == NULL); all_initialized = all_initialized && (branch_table[i] != NULL); } assert(all_uninitialized != all_initialized, "consistency"); // either or __ fence(); // volatile entry point (one instruction before non-volatile_entry point) if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point __ stop("unexpected type", 0x6551); #endif if (branch_table[itos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[itos] = __ pc(); // non-volatile_entry point __ lwax(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[ltos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[ltos] = __ pc(); // non-volatile_entry point __ ldx(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[btos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[btos] = __ pc(); // non-volatile_entry point __ lbzx(R3_RET, Rclass_or_obj, Roffset); __ extsb(R3_RET, R3_RET); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[ctos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[ctos] = __ pc(); // non-volatile_entry point __ lhzx(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[stos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[stos] = __ pc(); // non-volatile_entry point __ lhax(R3_RET, Rclass_or_obj, Roffset); __ beq(CCR6, Lacquire); __ blr(); } if (branch_table[atos] == 0) { // generate only once __ align(32, 28, 28); // align load __ fence(); // volatile entry point (one instruction before non-volatile_entry point) branch_table[atos] = __ pc(); // non-volatile_entry point __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj); __ verify_oop(R3_RET); //__ dcbt(R3_RET); // prefetch __ beq(CCR6, Lacquire); __ blr(); } __ align(32, 12); __ bind(Lacquire); __ twi_0(R3_RET); __ isync(); // acquire __ blr(); #ifdef ASSERT for (int i = 0; i<number_of_states; ++i) { assert(branch_table[i], "accessor_entry initialization"); //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i])); } #endif __ bind(Lslow_path); __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch); __ flush(); return entry; }