// Move all my code into another code buffer. Consult applicable // relocs to repair embedded addresses. The layout in the destination // CodeBuffer is different to the source CodeBuffer: the destination // CodeBuffer gets the final layout (consts, insts, stubs in order of // ascending address). void CodeBuffer::relocate_code_to(CodeBuffer* dest) const { address dest_end = dest->_total_start + dest->_total_size; address dest_filled = NULL; for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { // pull code out of each section const CodeSection* cs = code_section(n); if (cs->is_empty()) continue; // skip trivial section CodeSection* dest_cs = dest->code_section(n); assert(cs->size() == dest_cs->size(), "sanity"); csize_t usize = dest_cs->size(); csize_t wsize = align_size_up(usize, HeapWordSize); assert(dest_cs->start() + wsize <= dest_end, "no overflow"); // Copy the code as aligned machine words. // This may also include an uninitialized partial word at the end. Copy::disjoint_words((HeapWord*)cs->start(), (HeapWord*)dest_cs->start(), wsize / HeapWordSize); if (dest->blob() == NULL) { // Destination is a final resting place, not just another buffer. // Normalize uninitialized bytes in the final padding. Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(), Assembler::code_fill_byte()); } // Keep track of the highest filled address dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining()); assert(cs->locs_start() != (relocInfo*)badAddress, "this section carries no reloc storage, but reloc was attempted"); // Make the new code copy use the old copy's relocations: dest_cs->initialize_locs_from(cs); } // Do relocation after all sections are copied. // This is necessary if the code uses constants in stubs, which are // relocated when the corresponding instruction in the code (e.g., a // call) is relocated. Stubs are placed behind the main code // section, so that section has to be copied before relocating. for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { // pull code out of each section const CodeSection* cs = code_section(n); if (cs->is_empty()) continue; // skip trivial section CodeSection* dest_cs = dest->code_section(n); { // Repair the pc relative information in the code after the move RelocIterator iter(dest_cs); while (iter.next()) { iter.reloc()->fix_relocation_after_move(this, dest); } } } if (dest->blob() == NULL && dest_filled != NULL) { // Destination is a final resting place, not just another buffer. // Normalize uninitialized bytes in the final padding. Copy::fill_to_bytes(dest_filled, dest_end - dest_filled, Assembler::code_fill_byte()); } }
int CodeBuffer::section_index_of(address addr) const { for (int n = 0; n < (int)SECT_LIMIT; n++) { const CodeSection* cs = code_section(n); if (cs->allocates(addr)) return n; } return SECT_NONE; }
void linker::add_code_section (code_section& sect_in) { if (this->out->find_section (sect_in.get_name ())) throw std::runtime_error ("linker::add_code_section: attempting to add section with same name twice"); this->out->add_section (code_section (sect_in)); code_section& sect = static_cast<code_section&> ( *this->out->find_section (sect_in.get_name ())); // handle relocations for (auto& reloc : sect.get_relocations ()) { // move relocation into linker's store reloc.sym = this->rstore->get (reloc.sym.store->get_name (reloc.sym.id)); auto& sym_name = reloc.sym.store->get_name (reloc.sym.id); auto& mod = this->find_module_containing_export (sym_name); if (mod.get_type () != module_type::shared) throw std::runtime_error ("linker::add_code_section: relocations from non-shared objects not handled yet"); module_import_id mod_id; if (this->out->has_import (mod.get_export_name ())) mod_id = this->out->get_import (mod.get_export_name ()); else mod_id = this->out->add_import (mod.get_export_name ()); auto& exp_sym = mod.get_export_symbol (sym_name); this->out->add_import_symbol (reloc.sym.id, mod_id, exp_sym.version); } }
csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity) { csize_t new_total_cap = 0; for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { const CodeSection* sect = code_section(n); if (!sect->is_empty()) { // Compute initial padding; assign it to the previous section, // even if it's empty (e.g. consts section can be empty). // Cf. compute_final_layout csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap; if (padding != 0) { new_total_cap += padding; assert(n - 1 >= SECT_FIRST, "sanity"); new_capacity[n - 1] += padding; } } csize_t exp = sect->size(); // 100% increase if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase if (sect == which_cs) { if (exp < amount) exp = amount; if (StressCodeBuffers) exp = amount; // expand only slightly } else if (n == SECT_INSTS) { // scale down inst increases to a more modest 25% exp = 4*K + ((exp - 4*K) >> 2); if (StressCodeBuffers) exp = amount / 2; // expand only slightly } else if (sect->is_empty()) {
void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { address buf = dest->_total_start; csize_t buf_offset = 0; assert(dest->_total_size >= total_content_size(), "must be big enough"); { // not sure why this is here, but why not... int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment); assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment"); } const CodeSection* prev_cs = NULL; CodeSection* prev_dest_cs = NULL; for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { // figure compact layout of each section const CodeSection* cs = code_section(n); csize_t csize = cs->size(); CodeSection* dest_cs = dest->code_section(n); if (!cs->is_empty()) { // Compute initial padding; assign it to the previous non-empty guy. // Cf. figure_expanded_capacities. csize_t padding = cs->align_at_start(buf_offset) - buf_offset; if (padding != 0) { buf_offset += padding; assert(prev_dest_cs != NULL, "sanity"); prev_dest_cs->_limit += padding; } #ifdef ASSERT if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) { // Make sure the ends still match up. // This is important because a branch in a frozen section // might target code in a following section, via a Label, // and without a relocation record. See Label::patch_instructions. address dest_start = buf+buf_offset; csize_t start2start = cs->start() - prev_cs->start(); csize_t dest_start2start = dest_start - prev_dest_cs->start(); assert(start2start == dest_start2start, "cannot stretch frozen sect"); } #endif //ASSERT prev_dest_cs = dest_cs; prev_cs = cs; } debug_only(dest_cs->_start = NULL); // defeat double-initialization assert dest_cs->initialize(buf+buf_offset, csize); dest_cs->set_end(buf+buf_offset+csize); assert(dest_cs->is_allocated(), "must always be allocated"); assert(cs->is_empty() == dest_cs->is_empty(), "sanity"); buf_offset += csize; } // Done calculating sections; did it come out to the right end? assert(buf_offset == total_content_size(), "sanity"); dest->verify_section_allocation(); }
int CodeBuffer::locator(address addr) const { for (int n = 0; n < (int)SECT_LIMIT; n++) { const CodeSection* cs = code_section(n); if (cs->allocates(addr)) { return locator(addr - cs->start(), n); } } return -1; }
void AbstractAssembler::set_code_section(CodeSection* cs) { assert(cs->outer() == code_section()->outer(), "sanity"); assert(cs->is_allocated(), "need to pre-allocate this section"); cs->clear_mark(); // new assembly into this section kills old mark _code_section = cs; _code_begin = cs->start(); _code_limit = cs->limit(); _code_pos = cs->end(); }
csize_t CodeBuffer::total_content_size() const { csize_t size_so_far = 0; for (int n = 0; n < (int)SECT_LIMIT; n++) { const CodeSection* cs = code_section(n); if (cs->is_empty()) continue; // skip trivial section size_so_far = cs->align_at_start(size_so_far); size_so_far += cs->size(); } return size_so_far; }
csize_t CodeBuffer::total_offset_of(CodeSection* cs) const { csize_t size_so_far = 0; for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { const CodeSection* cur_cs = code_section(n); if (!cur_cs->is_empty()) { size_so_far = cur_cs->align_at_start(size_so_far); } if (cur_cs->index() == cs->index()) { return size_so_far; } size_so_far += cur_cs->size(); } ShouldNotReachHere(); return -1; }
void CodeBuffer::freeze_section(CodeSection* cs) { CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1); csize_t frozen_size = cs->size(); if (next_cs != NULL) { frozen_size = next_cs->align_at_start(frozen_size); } address old_limit = cs->limit(); address new_limit = cs->start() + frozen_size; relocInfo* old_locs_limit = cs->locs_limit(); relocInfo* new_locs_limit = cs->locs_end(); // Patch the limits. cs->_limit = new_limit; cs->_locs_limit = new_locs_limit; cs->_frozen = true; if (!next_cs->is_allocated() && !next_cs->is_frozen()) { // Give remaining buffer space to the following section. next_cs->initialize(new_limit, old_limit - new_limit); next_cs->initialize_shared_locs(new_locs_limit, old_locs_limit - new_locs_limit); } }
inline void AbstractAssembler::clear_inst_mark() { code_section()->clear_mark(); }
inline void AbstractAssembler::set_inst_mark() { code_section()->set_mark(); }
inline address AbstractAssembler::inst_mark() const { return code_section()->mark(); }
inline void AbstractAssembler::sync() { CodeSection* cs = code_section(); guarantee(cs->start() == _code_begin, "must not shift code buffer"); cs->set_end(_code_pos); }
inline address AbstractAssembler::target(Label& L) { return code_section()->target(L, pc()); }
address CodeBuffer::locator_address(int locator) const { if (locator < 0) return NULL; address start = code_section(locator_sect(locator))->start(); return start + locator_pos(locator); }
inline int AbstractAssembler::sect() const { return code_section()->index(); }
void MacroAssembler::advance(int bytes) { code_section()->set_end(code_section()->end() + bytes); }
const char* AbstractAssembler::code_string(const char* str) { if (sect() == CodeBuffer::SECT_INSTS || sect() == CodeBuffer::SECT_STUBS) { return code_section()->outer()->code_string(str); } return NULL; }
void PatchingStub::emit_code(LIR_Assembler* ce) { // copy original code here assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes"); Label call_patch; int being_initialized_entry = __ offset(); if (_id == load_klass_id) { // produce a copy of the load klass instruction for use by the being initialized case #ifdef ASSERT address start = __ pc(); #endif AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index)); __ patchable_set(addrlit, _obj); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else { // make a copy the code which is going to be patched. for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; __ a_byte (a_byte); } } address end_of_patch = __ pc(); int bytes_to_skip = 0; if (_id == load_klass_id) { int offset = __ offset(); if (CommentedAssembly) { __ block_comment(" being_initialized check"); } // static field accesses have special semantics while the class // initializer is being run so we emit a test which can be used to // check that this code is being executed by the initializing // thread. assert(_obj != noreg, "must be a valid register"); assert(_oop_index >= 0, "must have oop index"); __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); __ cmp(G2_thread, G3); __ br(Assembler::notEqual, false, Assembler::pn, call_patch); __ delayed()->nop(); // load_klass patches may execute the patched code before it's // copied back into place so we need to jump back into the main // code of the nmethod to continue execution. __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation); __ delayed()->nop(); // make sure this extra code gets skipped bytes_to_skip += __ offset() - offset; } // Now emit the patch record telling the runtime how to find the // pieces of the patch. We only need 3 bytes but it has to be // aligned as an instruction so emit 4 bytes. int sizeof_patch_record = 4; bytes_to_skip += sizeof_patch_record; // emit the offsets needed to find the code to patch int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; // Emit the patch record. We need to emit a full word, so emit an extra empty byte __ a_byte(0); __ a_byte(being_initialized_entry_offset); __ a_byte(bytes_to_skip); __ a_byte(_bytes_to_copy); address patch_info_pc = __ pc(); assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); address entry = __ pc(); NativeGeneralJump::insert_unconditional((address)_pc_start, entry); address target = NULL; switch (_id) { case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; default: ShouldNotReachHere(); } __ bind(call_patch); if (CommentedAssembly) { __ block_comment("patch entry point"); } __ call(target, relocInfo::runtime_call_type); __ delayed()->nop(); assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); ce->add_call_info_here(_info); __ br(Assembler::always, false, Assembler::pt, _patch_site_entry); __ delayed()->nop(); if (_id == load_klass_id) { CodeSection* cs = __ code_section(); address pc = (address)_pc_start; RelocIterator iter(cs, pc, pc + 1); relocInfo::change_reloc_info_for_address(&iter, (address) pc, relocInfo::oop_type, relocInfo::none); pc = (address)(_pc_start + NativeMovConstReg::add_offset); RelocIterator iter2(cs, pc, pc+1); relocInfo::change_reloc_info_for_address(&iter2, (address) pc, relocInfo::oop_type, relocInfo::none); } }
csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const { address buf = NULL; csize_t buf_offset = 0; csize_t buf_limit = 0; if (dest != NULL) { buf = (address)dest->relocation_begin(); buf_limit = (address)dest->relocation_end() - buf; assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned"); assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized"); } // if dest == NULL, this is just the sizing pass csize_t code_end_so_far = 0; csize_t code_point_so_far = 0; for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) { // pull relocs out of each section const CodeSection* cs = code_section(n); assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity"); if (cs->is_empty()) continue; // skip trivial section relocInfo* lstart = cs->locs_start(); relocInfo* lend = cs->locs_end(); csize_t lsize = (csize_t)( (address)lend - (address)lstart ); csize_t csize = cs->size(); code_end_so_far = cs->align_at_start(code_end_so_far); if (lsize > 0) { // Figure out how to advance the combined relocation point // first to the beginning of this section. // We'll insert one or more filler relocs to span that gap. // (Don't bother to improve this by editing the first reloc's offset.) csize_t new_code_point = code_end_so_far; for (csize_t jump; code_point_so_far < new_code_point; code_point_so_far += jump) { jump = new_code_point - code_point_so_far; relocInfo filler = filler_relocInfo(); if (jump >= filler.addr_offset()) { jump = filler.addr_offset(); } else { // else shrink the filler to fit filler = relocInfo(relocInfo::none, jump); } if (buf != NULL) { assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds"); *(relocInfo*)(buf+buf_offset) = filler; } buf_offset += sizeof(filler); } // Update code point and end to skip past this section: csize_t last_code_point = code_end_so_far + cs->locs_point_off(); assert(code_point_so_far <= last_code_point, "sanity"); code_point_so_far = last_code_point; // advance past this guy's relocs } code_end_so_far += csize; // advance past this guy's instructions too // Done with filler; emit the real relocations: if (buf != NULL && lsize != 0) { assert(buf_offset + lsize <= buf_limit, "target in bounds"); assert((uintptr_t)lstart % HeapWordSize == 0, "sane start"); if (buf_offset % HeapWordSize == 0) { // Use wordwise copies if possible: Copy::disjoint_words((HeapWord*)lstart, (HeapWord*)(buf+buf_offset), (lsize + HeapWordSize-1) / HeapWordSize); } else { Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); } } buf_offset += lsize; } // Align end of relocation info in target. while (buf_offset % HeapWordSize != 0) { if (buf != NULL) { relocInfo padding = relocInfo(relocInfo::none, 0); assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds"); *(relocInfo*)(buf+buf_offset) = padding; } buf_offset += sizeof(relocInfo); } assert(code_end_so_far == total_content_size(), "sanity"); // Account for index: if (buf != NULL) { RelocIterator::create_index(dest->relocation_begin(), buf_offset / sizeof(relocInfo), dest->relocation_end()); } return buf_offset; }
void CodeBuffer::finalize_oop_references(methodHandle mh) { No_Safepoint_Verifier nsv; GrowableArray<oop> oops; // Make sure that immediate metadata records something in the OopRecorder for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { // pull code out of each section CodeSection* cs = code_section(n); if (cs->is_empty()) continue; // skip trivial section RelocIterator iter(cs); while (iter.next()) { if (iter.type() == relocInfo::metadata_type) { metadata_Relocation* md = iter.metadata_reloc(); if (md->metadata_is_immediate()) { Metadata* m = md->metadata_value(); if (oop_recorder()->is_real(m)) { if (m->is_methodData()) { m = ((MethodData*)m)->method(); } if (m->is_method()) { m = ((Method*)m)->method_holder(); } if (m->is_klass()) { append_oop_references(&oops, (Klass*)m); } else { // XXX This will currently occur for MDO which don't // have a backpointer. This has to be fixed later. m->print(); ShouldNotReachHere(); } } } } } } if (!oop_recorder()->is_unused()) { for (int i = 0; i < oop_recorder()->metadata_count(); i++) { Metadata* m = oop_recorder()->metadata_at(i); if (oop_recorder()->is_real(m)) { if (m->is_methodData()) { m = ((MethodData*)m)->method(); } if (m->is_method()) { m = ((Method*)m)->method_holder(); } if (m->is_klass()) { append_oop_references(&oops, (Klass*)m); } else { m->print(); ShouldNotReachHere(); } } } } // Add the class loader of Method* for the nmethod itself append_oop_references(&oops, mh->method_holder()); // Add any oops that we've found Thread* thread = Thread::current(); for (int i = 0; i < oops.length(); i++) { oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i))); } }
inline void AbstractAssembler::relocate(RelocationHolder const& rspec, int format) { assert(!pd_check_instruction_mark() || inst_mark() == NULL || inst_mark() == _code_pos, "call relocate() between instructions"); code_section()->relocate(_code_pos, rspec, format); }
inline CodeBuffer* AbstractAssembler::code() const { return code_section()->outer(); }
void MacroAssembler::store_oop(jobject obj) { code_section()->relocate(pc(), oop_Relocation::spec_for_immediate()); emit_address((address) obj); }
void AbstractAssembler::block_comment(const char* comment) { if (sect() == CodeBuffer::SECT_INSTS) { code_section()->outer()->block_comment(offset(), comment); } }
void MacroAssembler::store_Metadata(Metadata* md) { code_section()->relocate(pc(), metadata_Relocation::spec_for_immediate()); emit_address((address) md); }
void PatchingStub::emit_code(LIR_Assembler* ce) { assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); Label call_patch; // static field accesses have special semantics while the class // initializer is being run so we emit a test which can be used to // check that this code is being executed by the initializing // thread. address being_initialized_entry = __ pc(); if (CommentedAssembly) { __ block_comment(" patch template"); } if (_id == load_klass_id) { // produce a copy of the load klass instruction for use by the being initialized case address start = __ pc(); jobject o = NULL; __ movoop(_obj, o); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else { // make a copy the code which is going to be patched. for ( int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; __ a_byte (a_byte); *ptr = 0x90; // make the site look like a nop } } address end_of_patch = __ pc(); int bytes_to_skip = 0; if (_id == load_klass_id) { int offset = __ offset(); if (CommentedAssembly) { __ block_comment(" being_initialized check"); } assert(_obj != noreg, "must be a valid register"); Register tmp = rax; Register tmp2 = rbx; __ push(tmp); __ push(tmp2); __ load_heap_oop(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); __ get_thread(tmp); __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); __ pop(tmp2); __ pop(tmp); __ jcc(Assembler::notEqual, call_patch); // access_field patches may execute the patched code before it's // copied back into place so we need to jump back into the main // code of the nmethod to continue execution. __ jmp(_patch_site_continuation); // make sure this extra code gets skipped bytes_to_skip += __ offset() - offset; } if (CommentedAssembly) { __ block_comment("patch data encoded as movl"); } // Now emit the patch record telling the runtime how to find the // pieces of the patch. We only need 3 bytes but for readability of // the disassembly we make the data look like a movl reg, imm32, // which requires 5 bytes int sizeof_patch_record = 5; bytes_to_skip += sizeof_patch_record; // emit the offsets needed to find the code to patch int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; __ a_byte(0xB8); __ a_byte(0); __ a_byte(being_initialized_entry_offset); __ a_byte(bytes_to_skip); __ a_byte(_bytes_to_copy); address patch_info_pc = __ pc(); assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); address entry = __ pc(); NativeGeneralJump::insert_unconditional((address)_pc_start, entry); address target = NULL; switch (_id) { case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; default: ShouldNotReachHere(); } __ bind(call_patch); if (CommentedAssembly) { __ block_comment("patch entry point"); } __ call(RuntimeAddress(target)); assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); ce->add_call_info_here(_info); int jmp_off = __ offset(); __ jmp(_patch_site_entry); // Add enough nops so deoptimization can overwrite the jmp above with a call // and not destroy the world. for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { __ nop(); } if (_id == load_klass_id) { CodeSection* cs = __ code_section(); RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none); } }
void PatchingStub::emit_code(LIR_Assembler* ce) { // Copy original code here. assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); NearLabel call_patch; int being_initialized_entry = __ offset(); if (_id == load_klass_id) { // Produce a copy of the load klass instruction for use by the case being initialized. #ifdef ASSERT address start = __ pc(); #endif AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index)); __ load_const(_obj, addrlit); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else if (_id == load_mirror_id || _id == load_appendix_id) { // Produce a copy of the load mirror instruction for use by the case being initialized. #ifdef ASSERT address start = __ pc(); #endif AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index)); __ load_const(_obj, addrlit); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; assert(a_byte == *start++, "should be the same code"); } #endif } else { // Make a copy the code which is going to be patched. for (int i = 0; i < _bytes_to_copy; i++) { address ptr = (address)(_pc_start + i); int a_byte = (*ptr) & 0xFF; __ emit_int8 (a_byte); } } address end_of_patch = __ pc(); int bytes_to_skip = 0; if (_id == load_mirror_id) { int offset = __ offset(); if (CommentedAssembly) { __ block_comment(" being_initialized check"); } // Static field accesses have special semantics while the class // initializer is being run, so we emit a test which can be used to // check that this code is being executed by the initializing // thread. assert(_obj != noreg, "must be a valid register"); assert(_index >= 0, "must have oop index"); __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj); __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset())); __ branch_optimized(Assembler::bcondNotEqual, call_patch); // Load_klass patches may execute the patched code before it's // copied back into place so we need to jump back into the main // code of the nmethod to continue execution. __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation); // Make sure this extra code gets skipped. bytes_to_skip += __ offset() - offset; } // Now emit the patch record telling the runtime how to find the // pieces of the patch. We only need 3 bytes but to help the disassembler // we make the data look like a the following add instruction: // A R1, D2(X2, B2) // which requires 4 bytes. int sizeof_patch_record = 4; bytes_to_skip += sizeof_patch_record; // Emit the offsets needed to find the code to patch. int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; // Emit the patch record: opcode of the add followed by 3 bytes patch record data. __ emit_int8((int8_t)(A_ZOPC>>24)); __ emit_int8(being_initialized_entry_offset); __ emit_int8(bytes_to_skip); __ emit_int8(_bytes_to_copy); address patch_info_pc = __ pc(); assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); address entry = __ pc(); NativeGeneralJump::insert_unconditional((address)_pc_start, entry); address target = NULL; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break; case load_klass_id: target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_mirror_id: target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); if (CommentedAssembly) { __ block_comment("patch entry point"); } // Cannot use call_c_opt() because its size is not constant. __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant. __ z_basr(Z_R14, Z_R1_scratch); assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); ce->add_call_info_here(_info); __ z_brcl(Assembler::bcondAlways, _patch_site_entry); if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { CodeSection* cs = __ code_section(); address pc = (address)_pc_start; RelocIterator iter(cs, pc, pc + 1); relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none); } }