void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) { address pc = _instructions->start() + pc_offset; NativeInstruction* inst = nativeInstruction_at(pc); NativeInstruction* inst1 = nativeInstruction_at(pc + 4); if(inst->is_sethi() && inst1->is_nop()) { address const_start = _constants->start(); address dest = _constants->start() + data_offset; if(_constants_size > 0) { _instructions->relocate(pc + NativeMovConstReg::sethi_offset, internal_word_Relocation::spec((address) dest)); _instructions->relocate(pc + NativeMovConstReg::add_offset, internal_word_Relocation::spec((address) dest)); } TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset); }else { int const_size = align_size_up(_constants->end()-_constants->start(), CodeEntryAlignment); NativeMovRegMem* load = nativeMovRegMem_at(pc); // This offset must match with SPARCLoadConstantTableBaseOp.emitCode load->set_offset(- (const_size - data_offset + Assembler::min_simm13())); TRACE_jvmci_3("relocating ld at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset); } }
// Code for unit testing implementation of NativeMovRegMem class void NativeMovRegMem::test() { #ifdef ASSERT ResourceMark rm; CodeBuffer cb("test", 1000, 1000); MacroAssembler* a = new MacroAssembler(&cb); NativeMovRegMem* nm; uint idx = 0; uint idx1; int offsets[] = { 0x0, 0xffffffff, 0x7fffffff, 0x80000000, 4096, 4097, 0x20, 0x4000, }; VM_Version::allow_all(); AddressLiteral al1(0xffffffff, relocInfo::external_word_type); AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type); a->ldsw( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldsw( G5, I3, G4 ); idx++; a->ldsb( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldsb( G5, I3, G4 ); idx++; a->ldsh( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldsh( G5, I3, G4 ); idx++; a->lduw( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->lduw( G5, I3, G4 ); idx++; a->ldub( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldub( G5, I3, G4 ); idx++; a->lduh( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->lduh( G5, I3, G4 ); idx++; a->ldx( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldx( G5, I3, G4 ); idx++; a->ldd( G5, al1.low10(), G4 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldd( G5, I3, G4 ); idx++; a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; a->stw( G5, G4, al1.low10() ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stw( G5, G4, I3 ); idx++; a->stb( G5, G4, al1.low10() ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stb( G5, G4, I3 ); idx++; a->sth( G5, G4, al1.low10() ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->sth( G5, G4, I3 ); idx++; a->stx( G5, G4, al1.low10() ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stx( G5, G4, I3 ); idx++; a->std( G5, G4, al1.low10() ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->std( G5, G4, I3 ); idx++; a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; nm = nativeMovRegMem_at( cb.insts_begin() ); nm->print(); nm->set_offset( low10(0) ); nm->print(); nm->add_offset_in_bytes( low10(0xbb) * wordSize ); nm->print(); while (--idx) { nm = nativeMovRegMem_at( nm->next_instruction_address() ); nm->print(); for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), "check unit test"); nm->print(); } nm->add_offset_in_bytes( low10(0xbb) * wordSize ); nm->print(); } VM_Version::revert(); #endif // ASSERT }
inline void CodeInstaller::pd_site_DataPatch(int pc_offset, oop site) { oop constant = CompilationResult_DataPatch::constant(site); int alignment = CompilationResult_DataPatch::alignment(site); bool inlined = CompilationResult_DataPatch::inlined(site) == JNI_TRUE; oop kind = Constant::kind(constant); char typeChar = Kind::typeChar(kind); address pc = _instructions->start() + pc_offset; switch (typeChar) { case 'z': case 'b': case 's': case 'c': case 'i': fatal("int-sized values not expected in DataPatch"); break; case 'f': case 'j': case 'd': { if (inlined) { NativeMovConstReg* move = nativeMovConstReg_at(pc); uint64_t value = Constant::primitive(constant); move->set_data(value); } else { int size = _constants->size(); if (alignment > 0) { guarantee(alignment <= _constants->alignment(), "Alignment inside constants section is restricted by alignment of section begin"); size = align_size_up(size, alignment); } // we don't care if this is a long/double/etc., the primitive field contains the right bits address dest = _constants->start() + size; _constants->set_end(dest); uint64_t value = Constant::primitive(constant); _constants->emit_int64(value); NativeMovRegMem* load = nativeMovRegMem_at(pc); int disp = _constants_size + pc_offset - size - BytesPerInstWord; load->set_offset(-disp); } break; } case 'a': { int size = _constants->size(); if (alignment > 0) { guarantee(alignment <= _constants->alignment(), "Alignment inside constants section is restricted by alignment of section begin"); size = align_size_up(size, alignment); } address dest = _constants->start() + size; _constants->set_end(dest); Handle obj = Constant::object(constant); jobject value = JNIHandles::make_local(obj()); _constants->emit_address((address) value); NativeMovRegMem* load = nativeMovRegMem_at(pc); int disp = _constants_size + pc_offset - size - BytesPerInstWord; load->set_offset(-disp); int oop_index = _oop_recorder->find_index(value); _constants->relocate(dest, oop_Relocation::spec(oop_index)); break; } default: fatal(err_msg("unexpected Kind (%d) in DataPatch", typeChar)); break; } }