void PatchingStub::emit_code(LIR_Assembler* ce) { assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); Label call_patch; // static field accesses have special semantics while the class // initializer is being run so we emit a test which can be used to // check that this code is being executed by the initializing // thread. address being_initialized_entry = __ pc(); if (CommentedAssembly) { __ block_comment(" patch template"); } if (_id == load_klass_id) { // produce a copy of the load klass instruction for use by the being initialized case address start = __ pc(); jobject o = NULL; __ movoop(_obj, o); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else { // make a copy the code which is going to be patched. for ( int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; __ a_byte (a_byte); *ptr = 0x90; // make the site look like a nop } } address end_of_patch = __ pc(); int bytes_to_skip = 0; if (_id == load_klass_id) { int offset = __ offset(); if (CommentedAssembly) { __ block_comment(" being_initialized check"); } assert(_obj != noreg, "must be a valid register"); Register tmp = rax; Register tmp2 = rbx; __ push(tmp); __ push(tmp2); __ load_heap_oop(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); __ get_thread(tmp); __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); __ pop(tmp2); __ pop(tmp); __ jcc(Assembler::notEqual, call_patch); // access_field patches may execute the patched code before it's // copied back into place so we need to jump back into the main // code of the nmethod to continue execution. __ jmp(_patch_site_continuation); // make sure this extra code gets skipped bytes_to_skip += __ offset() - offset; } if (CommentedAssembly) { __ block_comment("patch data encoded as movl"); } // Now emit the patch record telling the runtime how to find the // pieces of the patch. We only need 3 bytes but for readability of // the disassembly we make the data look like a movl reg, imm32, // which requires 5 bytes int sizeof_patch_record = 5; bytes_to_skip += sizeof_patch_record; // emit the offsets needed to find the code to patch int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; __ a_byte(0xB8); __ a_byte(0); __ a_byte(being_initialized_entry_offset); __ a_byte(bytes_to_skip); __ a_byte(_bytes_to_copy); address patch_info_pc = __ pc(); assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); address entry = __ pc(); NativeGeneralJump::insert_unconditional((address)_pc_start, entry); address target = NULL; switch (_id) { case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; default: ShouldNotReachHere(); } __ bind(call_patch); if (CommentedAssembly) { __ block_comment("patch entry point"); } __ call(RuntimeAddress(target)); assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); ce->add_call_info_here(_info); int jmp_off = __ offset(); __ jmp(_patch_site_entry); // Add enough nops so deoptimization can overwrite the jmp above with a call // and not destroy the world. for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { __ nop(); } if (_id == load_klass_id) { CodeSection* cs = __ code_section(); RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none); } }
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { __ block_comment("generate_handle_exception"); // Save registers, if required. OopMapSet* oop_maps = new OopMapSet(); OopMap* oop_map = NULL; switch (id) { case forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler // if found. Otherwise unwind and dispatch to the callers // exception handler. oop_map = generate_oop_map(sasm, true); // transfer the pending exception to the exception_oop __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); __ ld_ptr(Oexception, 0, G0); __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); __ add(I7, frame::pc_return_offset, Oissuing_pc); break; case handle_exception_id: // At this point all registers MAY be live. oop_map = save_live_registers(sasm); __ mov(Oexception->after_save(), Oexception); __ mov(Oissuing_pc->after_save(), Oissuing_pc); break; case handle_exception_from_callee_id: // At this point all registers except exception oop (Oexception) // and exception pc (Oissuing_pc) are dead. oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); __ save_frame_c1(frame_size_in_bytes); __ mov(Oexception->after_save(), Oexception); __ mov(Oissuing_pc->after_save(), Oissuing_pc); break; default: ShouldNotReachHere(); } __ verify_not_null_oop(Oexception); // save the exception and issuing pc in the thread __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); // use the throwing pc as the return address to lookup (has bci & oop map) __ mov(Oissuing_pc, I7); __ sub(I7, frame::pc_return_offset, I7); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); oop_maps->add_gc_map(call_offset, oop_map); // Note: if nmethod has been deoptimized then regardless of // whether it had a handler or not we will deoptimize // by entering the deopt blob with a pending exception. // Restore the registers that were saved at the beginning, remove // the frame and jump to the exception handler. switch (id) { case forward_exception_id: case handle_exception_id: restore_live_registers(sasm); __ jmp(O0, 0); __ delayed()->restore(); break; case handle_exception_from_callee_id: // Restore SP from L7 if the exception PC is a method handle call site. __ mov(O0, G5); // Save the target address. __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); __ tst(L0); // Condition codes are preserved over the restore. __ restore(); __ jmp(G5, 0); // jump to the exception handler __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. break; default: ShouldNotReachHere(); } return oop_maps; }
void PatchingStub::emit_code(LIR_Assembler* ce) { // copy original code here assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes"); Label call_patch; int being_initialized_entry = __ offset(); if (_id == load_klass_id) { // produce a copy of the load klass instruction for use by the being initialized case #ifdef ASSERT address start = __ pc(); #endif AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index)); __ patchable_set(addrlit, _obj); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else { // make a copy the code which is going to be patched. for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; __ a_byte (a_byte); } } address end_of_patch = __ pc(); int bytes_to_skip = 0; if (_id == load_klass_id) { int offset = __ offset(); if (CommentedAssembly) { __ block_comment(" being_initialized check"); } // static field accesses have special semantics while the class // initializer is being run so we emit a test which can be used to // check that this code is being executed by the initializing // thread. assert(_obj != noreg, "must be a valid register"); assert(_oop_index >= 0, "must have oop index"); __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); __ cmp(G2_thread, G3); __ br(Assembler::notEqual, false, Assembler::pn, call_patch); __ delayed()->nop(); // load_klass patches may execute the patched code before it's // copied back into place so we need to jump back into the main // code of the nmethod to continue execution. __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation); __ delayed()->nop(); // make sure this extra code gets skipped bytes_to_skip += __ offset() - offset; } // Now emit the patch record telling the runtime how to find the // pieces of the patch. We only need 3 bytes but it has to be // aligned as an instruction so emit 4 bytes. int sizeof_patch_record = 4; bytes_to_skip += sizeof_patch_record; // emit the offsets needed to find the code to patch int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; // Emit the patch record. We need to emit a full word, so emit an extra empty byte __ a_byte(0); __ a_byte(being_initialized_entry_offset); __ a_byte(bytes_to_skip); __ a_byte(_bytes_to_copy); address patch_info_pc = __ pc(); assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); address entry = __ pc(); NativeGeneralJump::insert_unconditional((address)_pc_start, entry); address target = NULL; switch (_id) { case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; default: ShouldNotReachHere(); } __ bind(call_patch); if (CommentedAssembly) { __ block_comment("patch entry point"); } __ call(target, relocInfo::runtime_call_type); __ delayed()->nop(); assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); ce->add_call_info_here(_info); __ br(Assembler::always, false, Assembler::pt, _patch_site_entry); __ delayed()->nop(); if (_id == load_klass_id) { CodeSection* cs = __ code_section(); address pc = (address)_pc_start; RelocIterator iter(cs, pc, pc + 1); relocInfo::change_reloc_info_for_address(&iter, (address) pc, relocInfo::oop_type, relocInfo::none); pc = (address)(_pc_start + NativeMovConstReg::add_offset); RelocIterator iter2(cs, pc, pc+1); relocInfo::change_reloc_info_for_address(&iter2, (address) pc, relocInfo::oop_type, relocInfo::none); } }
void PatchingStub::emit_code(LIR_Assembler* ce) { // Copy original code here. assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); NearLabel call_patch; int being_initialized_entry = __ offset(); if (_id == load_klass_id) { // Produce a copy of the load klass instruction for use by the case being initialized. #ifdef ASSERT address start = __ pc(); #endif AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index)); __ load_const(_obj, addrlit); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else if (_id == load_mirror_id || _id == load_appendix_id) { // Produce a copy of the load mirror instruction for use by the case being initialized. #ifdef ASSERT address start = __ pc(); #endif AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index)); __ load_const(_obj, addrlit); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else { // Make a copy the code which is going to be patched. for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; __ emit_int8 (a_byte); } } address end_of_patch = __ pc(); int bytes_to_skip = 0; if (_id == load_mirror_id) { int offset = __ offset(); if (CommentedAssembly) { __ block_comment(" being_initialized check"); } // Static field accesses have special semantics while the class // initializer is being run, so we emit a test which can be used to // check that this code is being executed by the initializing // thread. assert(_obj != noreg, "must be a valid register"); assert(_index >= 0, "must have oop index"); __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj); __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset())); __ branch_optimized(Assembler::bcondNotEqual, call_patch); // Load_klass patches may execute the patched code before it's // copied back into place so we need to jump back into the main // code of the nmethod to continue execution. __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation); // Make sure this extra code gets skipped. bytes_to_skip += __ offset() - offset; } // Now emit the patch record telling the runtime how to find the // pieces of the patch. We only need 3 bytes but to help the disassembler // we make the data look like a the following add instruction: // A R1, D2(X2, B2) // which requires 4 bytes. int sizeof_patch_record = 4; bytes_to_skip += sizeof_patch_record; // Emit the offsets needed to find the code to patch. int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; // Emit the patch record: opcode of the add followed by 3 bytes patch record data. __ emit_int8((int8_t)(A_ZOPC>>24)); __ emit_int8(being_initialized_entry_offset); __ emit_int8(bytes_to_skip); __ emit_int8(_bytes_to_copy); address patch_info_pc = __ pc(); assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); address entry = __ pc(); NativeGeneralJump::insert_unconditional((address)_pc_start, entry); address target = NULL; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_mirror_id: target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); if (CommentedAssembly) { __ block_comment("patch entry point"); } // Cannot use call_c_opt() because its size is not constant. __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant. __ z_basr(Z_R14, Z_R1_scratch); assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); ce->add_call_info_here(_info); __ z_brcl(Assembler::bcondAlways, _patch_site_entry); if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { CodeSection* cs = __ code_section(); address pc = (address)_pc_start; RelocIterator iter(cs, pc, pc + 1); relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none); } }