bool InstWidget::generate(const codeGen &, const RelocBlock *, CodeBuffer &buffer) { // We should work baseTramp code generation into the CodeBuffer // system, but that's for future work... InstWidgetPatch *patch = new InstWidgetPatch(point_->tramp()); buffer.addPatch(patch, tracker()); return true; }
// Inform CodeBuffer that incoming code and relocation will be for stubs address AbstractAssembler::start_a_const(int required_space, int required_align) { CodeBuffer* cb = code(); CodeSection* cs = cb->consts(); assert(_code_section == cb->insts(), "not in insts?"); sync(); address end = cs->end(); int pad = -(intptr_t)end & (required_align-1); if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) { if (cb->blob() == NULL) return NULL; end = cs->end(); // refresh pointer } if (pad > 0) { while (--pad >= 0) { *end++ = 0; } cs->set_end(end); } set_code_section(cs); return end; }
/*---------------------------------------------------Routines::startInterpret-+ | | +----------------------------------------------------------------------------*/ void Routines::startInterpret( CodeBuffer & cb, int top, int base, bool isInteractiveTrace ) { save(); // set the new values m_isProcedure = false; m_ct = CT_INTERPRET; m_posCur = cb.getRunPos(); m_posEnd = cb.getEndPos(); m_top = top; m_base = base; if (isInteractiveTrace) { m_signals |= SIG_DISABLED; // trap everything } }
bool CFWidget::generateConditionalBranch(CodeBuffer &buffer, TargetInt *to, const RelocBlock *trace, Instruction::Ptr insn) { assert(to); CFPatch *newPatch = new CFPatch(CFPatch::JCC, insn, to, trace->func(), addr_); buffer.addPatch(newPatch, tracker(trace)); return true; }
void Label::print_instructions(MacroAssembler* masm) const { CodeBuffer* cb = masm->code(); for (int i = 0; i < _patch_index; ++i) { int branch_loc; if (i >= PatchCacheSize) { branch_loc = _patch_overflow->at(i - PatchCacheSize); } else { branch_loc = _patches[i]; } int branch_pos = CodeBuffer::locator_pos(branch_loc); int branch_sect = CodeBuffer::locator_sect(branch_loc); address branch = cb->locator_address(branch_loc); tty->print_cr("unbound label"); tty->print("@ %d|%d ", branch_pos, branch_sect); if (branch_sect == CodeBuffer::SECT_CONSTS) { tty->print_cr(PTR_FORMAT, *(address*)branch); continue; } masm->pd_print_patched_instruction(branch); tty->cr(); } }
void Label::patch_instructions(MacroAssembler* masm) { assert(is_bound(), "Label is bound"); CodeBuffer* cb = masm->code(); int target_sect = CodeBuffer::locator_sect(loc()); address target = cb->locator_address(loc()); while (_patch_index > 0) { --_patch_index; int branch_loc; if (_patch_index >= PatchCacheSize) { branch_loc = _patch_overflow->pop(); } else { branch_loc = _patches[_patch_index]; } int branch_sect = CodeBuffer::locator_sect(branch_loc); address branch = cb->locator_address(branch_loc); if (branch_sect == CodeBuffer::SECT_CONSTS) { // The thing to patch is a constant word. *(address*)branch = target; continue; } #ifdef ASSERT // Cross-section branches only work if the // intermediate section boundaries are frozen. if (target_sect != branch_sect) { for (int n = MIN2(target_sect, branch_sect), nlimit = (target_sect + branch_sect) - n; n < nlimit; n++) { CodeSection* cs = cb->code_section(n); assert(cs->is_frozen(), "cross-section branch needs stable offsets"); } } #endif //ASSERT // Push the target offset into the branch instruction. masm->pd_patch_instruction(branch, target); } }
void SVPCodeGen::getCurrentSixSeconds( CodeBuffer& pcb ) { // Compute appropriate X2A offset long X2count; long X1count = currentZTime.GPSzcount(); /* Trivial, but special, case for beginning of week. This can't be simplified into the general case due to the beginning of week chip delays that are equivalent to the PRNID. These chips are stored at the beginning of the X2 chips sequence. This is the only time the X2count should be "negative". The offset is handled within the X2Sequence::operator[] method. */ if (X1count==0) X2count = -PRNID; /* At the beginning of an X1 epoch, the previous X2 epoch will still be unfinished due to delays. The accumulated delay is based on the PRNID and the delay per X1 epoch. Subtract this delay from the max length of the X2 sequence to determine the current chip within the X2 sequence. */ else { long cumulativeX2Delay = X1count * X2A_EPOCH_DELAY + PRNID; X2count = MAX_X2_TEST - cumulativeX2Delay; if (X2count<0) X2count += MAX_X2_TEST; } /* If this if the final six-second interval of the week, signal the X2 bit sequence generator to use the "end of week" sequence. Otherwise, use the "regular" sequence. */ if ( X1count==LAST_6SEC_ZCOUNT_OF_WEEK) X2Seq.setEOWX2Epoch(true); else X2Seq.setEOWX2Epoch(false); // Update the time and code state in the CodeBuffer object pcb.updateBufferStatus( currentZTime, P_CODE ); // Starting at the beginning of the interval, step through // the six second period loading the code buffer as we go. for ( long i=0;i<NUM_6SEC_WORDS;++i ) { pcb[i] = X1Seq[i] ^ X2Seq[X2count]; X2count += MAX_BIT; if (X2count>=MAX_X2_TEST) X2count -= MAX_X2_TEST; } }
bool CFWidget::generateCall(CodeBuffer &buffer, TargetInt *to, const RelocBlock *trace, Instruction::Ptr insn) { if (!to) { // This can mean an inter-module branch... return true; } CFPatch *newPatch = new CFPatch(CFPatch::Call, insn, to, trace->func(), addr_); buffer.addPatch(newPatch, tracker(trace)); return true; }
bool CFWidget::generateIndirectCall(CodeBuffer &buffer, Register /*reg*/, Instruction insn, const RelocBlock *trace, Address /*origAddr*/) { NS_power::instruction ugly_insn(insn.ptr()); IFORM_LK_SET(ugly_insn, 1); codeGen gen(4); insnCodeGen::generate(gen, ugly_insn); // TODO don't ignore the register parameter buffer.addPIC(gen, tracker(trace)); return true; }
/*-----------------------------------------------------Routines::startRoutine-+ | | +----------------------------------------------------------------------------*/ void Routines::startRoutine( CallType ct, CodeBuffer & cb, int top, RexxString ** stack, Arguments::PresenceBits presenceBits ) { save(); m_base = m_top = top; // don't change the stack top. m_args.~Arguments(); new(&m_args) Arguments(stack, presenceBits, m_base); if (ct == CT_SUBROUTINE) { --m_base; } m_isProcedure = false; m_ct = ct; m_posCur = cb.getRunPos(); }
bool CFWidget::generateIndirect(CodeBuffer &buffer, Register, const RelocBlock *trace, Instruction insn) { // Copying an indirect jump; unlike x86 we don't do // call -> indirect conversion yet. // ... though that would be really freaking easy. NS_power::instruction ugly_insn(insn.ptr()); IFORM_LK_SET(ugly_insn, 0); codeGen gen(4); insnCodeGen::generate(gen, ugly_insn); // TODO don't ignore the register parameter buffer.addPIC(gen, tracker(trace)); return true; }
// this updates the unpacked info for a new code buffer address BoundRelocation::update_addrs(address old_addr, const CodeBuffer& new_cb, const CodeBuffer& old_cb) { // Point to the relocation info Relocation *r = reloc(); // Transform the addr for the new code buffer address new_addr = old_cb.transform_address(new_cb, old_addr); // Set the transformed address set_addr( new_addr ); // Update any internal addresses r->update_addrs(new_cb, old_cb); // Apply a delta to internal pointers r->fix_relocation_at_move(new_addr - old_addr); // Return the new address return (new_addr); }
bool RelDataWidget::generate(const codeGen &, const RelocBlock *t, CodeBuffer &buffer) { // We want to take the original instruction and emulate // it at whatever our new address is. // Fortunately, we can reuse old code to handle the // translation // Find the original target of the instruction relocation_cerr << " Generating a PC-relative data access (" << insn_.format() << "," << std::hex << addr_ <<"," << target_ << std::dec << ")" << endl; RelDataPatch *newPatch = new RelDataPatch(insn_, target_, addr_); newPatch->setBlock(t->block()); newPatch->setFunc(t->func()); buffer.addPatch(newPatch, tracker(t)); return true; }
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) { #ifdef COMPILER2 // Stub is fixed up when the corresponding call is converted from calling // compiled code to calling interpreted code. if (mark == NULL) { // Get the mark within main instrs section which is set to the address of the call. mark = cbuf.insts_mark(); } assert(mark != NULL, "mark must not be NULL"); // Note that the code buffer's insts_mark is always relative to insts. // That's why we must use the macroassembler to generate a stub. MacroAssembler _masm(&cbuf); address stub = __ start_a_stub(Compile::MAX_stubs_size); if (stub == NULL) { return NULL; // CodeBuffer::expand failed. } __ relocate(static_stub_Relocation::spec(mark)); AddressLiteral meta = __ allocate_metadata_address(NULL); bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta); __ set_inst_mark(); AddressLiteral a((address)-1); success = success && __ load_const_from_toc(Z_R1, a); if (!success) { return NULL; // CodeCache is full. } __ z_br(Z_R1); __ end_a_stub(); // Update current stubs pointer and restore insts_end. return stub; #else ShouldNotReachHere(); #endif }
// this updates the unpacked info for a new code buffer void breakpoint_Relocation::update_addrs(const CodeBuffer& new_cb, const CodeBuffer& old_cb) { _target = old_cb.transform_address(new_cb, _target); }
// this updates the unpacked info for a new code buffer void virtual_call_Relocation::update_addrs(const CodeBuffer& new_cb, const CodeBuffer& old_cb) { _first_oop = old_cb.transform_address(new_cb, _first_oop); _oop_limit = old_cb.transform_address(new_cb, _oop_limit); }
void static_stub_Relocation::update_addrs(const CodeBuffer& new_cb, const CodeBuffer& old_cb) { _static_call = old_cb.transform_address(new_cb, _static_call); }
// this updates the unpacked info for a new code buffer void internal_word_Relocation::update_addrs(const CodeBuffer& new_cb, const CodeBuffer& old_cb) { _target = old_cb.transform_address(new_cb, _target); }
bool CFWidget::generate(const codeGen &templ, const RelocBlock *trace, CodeBuffer &buffer) { // We need to create jumps to wherever our successors are // We can assume the addresses returned by our Targets // are valid, since we'll fixpoint until those stabilize. // // There are the following cases: // // No explicit control flow/unconditional direct branch: // 1) One target // 2) Generate a branch unless it's unnecessary // Conditional branch: // 1) Two targets // 2) Use stored instruction to generate correct condition // 3) Generate a fallthrough "branch" if necessary // Call: // 1) Two targets (call and natural successor) // 2) As above, except make sure call bit is flipped on // Indirect branch: // 1) Just go for it... we have no control, really relocation_cerr << "CFWidget generation for " << trace->id() << endl; if (destMap_.empty() && !isIndirect_) { // No successors at all? Well, it happens if // we hit a halt... relocation_cerr << "CFWidget /w/ no successors, ret true" << endl; return true; } typedef enum { Illegal, Single, Taken_FT, Indirect } Options; Options opt = Illegal; if (isIndirect_) { opt = Indirect; relocation_cerr << " generating CFWidget as indirect branch" << endl; } else if (isConditional_ || isCall_) { opt = Taken_FT; relocation_cerr << " generating CFWidget as call or conditional branch" << endl; } else { opt = Single; relocation_cerr << " generating CFWidget as direct branch" << endl; } switch (opt) { case Single: { assert(!isIndirect_); assert(!isConditional_); assert(!isCall_); // Check for a taken destination first. bool fallthrough = false; DestinationMap::iterator iter = destMap_.find(Taken); if (iter == destMap_.end()) { iter = destMap_.find(Fallthrough); fallthrough = true; } if (iter == destMap_.end()) { cerr << "Error in CFWidget from trace " << trace->id() << ", could not find target for single control transfer" << endl; cerr << "\t DestMap dump:" << endl; for (DestinationMap::iterator d = destMap_.begin(); d != destMap_.end(); ++d) { cerr << "\t\t " << d->first << " : " << d->second->format() << endl; } } assert(iter != destMap_.end()); TargetInt *target = iter->second; assert(target); if (target->necessary()) { if (!generateBranch(buffer, target, insn_, trace, fallthrough)) { return false; } } else { relocation_cerr << " target reported unnecessary" << endl; } break; } case Taken_FT: { // This can be either a call (with an implicit fallthrough as shown by // the FUNLINK) or a conditional branch. if (isCall_) { // Well, that kinda explains things assert(!isConditional_); relocation_cerr << " ... generating call" << endl; if (!generateCall(buffer, destMap_[Taken], trace, insn_)) return false; } else { assert(!isCall_); relocation_cerr << " ... generating conditional branch" << endl; if (!generateConditionalBranch(buffer, destMap_[Taken], trace, insn_)) return false; } // Not necessary by design - fallthroughs are always to the next generated // We can have calls that don't return and thus don't have funlink edges if (destMap_.find(Fallthrough) != destMap_.end()) { TargetInt *ft = destMap_[Fallthrough]; if (ft->necessary()) { if (!generateBranch(buffer, ft, insn_, trace, true)) { return false; } } } break; } case Indirect: { Register reg = Null_Register; /* = originalRegister... */ // Originally for use in helping with jump tables, I'm taking // this for the memory emulation effort. Huzzah! if (!generateAddressTranslator(buffer, templ, reg, trace)) return false; if (isCall_) { if (!generateIndirectCall(buffer, reg, insn_, trace, addr_)) return false; // We may be putting another block in between this // one and its fallthrough due to edge instrumentation // So if there's the possibility for a return put in // a fallthrough branch if (destMap_.find(Fallthrough) != destMap_.end()) { if (!generateBranch(buffer, destMap_[Fallthrough], Instruction::Ptr(), trace, true)) return false; } } else { if (!generateIndirect(buffer, reg, trace, insn_)) return false; } break; } default: assert(0); } if (gap_) { // We don't know what the callee does to the return addr, // so we'll catch it at runtime. buffer.addPatch(new PaddingPatch(gap_, true, false, trace->block()), padTracker(addr_, gap_, trace)); } return true; }
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) { #ifdef COMPILER2 if (mark == NULL) { // Get the mark within main instrs section which is set to the address of the call. mark = cbuf.insts_mark(); } // Note that the code buffer's insts_mark is always relative to insts. // That's why we must use the macroassembler to generate a stub. MacroAssembler _masm(&cbuf); // Start the stub. address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size()); if (stub == NULL) { return NULL; // CodeCache is full } // For java_to_interp stubs we use R11_scratch1 as scratch register // and in call trampoline stubs we use R12_scratch2. This way we // can distinguish them (see is_NativeCallTrampolineStub_at()). Register reg_scratch = R11_scratch1; // Create a static stub relocation which relates this stub // with the call instruction at insts_call_instruction_offset in the // instructions code-section. __ relocate(static_stub_Relocation::spec(mark)); const int stub_start_offset = __ offset(); // Now, create the stub's code: // - load the TOC // - load the inline cache oop from the constant pool // - load the call target from the constant pool // - call __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); bool success = __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch, /*fixed_size*/ true); if (!success) { return NULL; // CodeCache is full } if (ReoptimizeCallSequences) { __ b64_patchable((address)-1, relocInfo::none); } else { AddressLiteral a((address)-1); success = __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); if (!success) { return NULL; // CodeCache is full } __ mtctr(reg_scratch); __ bctr(); } // FIXME: Assert that the stub can be identified and patched. // Java_to_interp_stub_size should be good. assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(), "should be good size"); assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)), "must not confuse java_to_interp with trampoline stubs"); // End the stub. __ end_a_stub(); return stub; #else ShouldNotReachHere(); return NULL; #endif }