MachineBasicBlock::iterator XCoreInstrInfo::loadImmediate( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Reg, uint64_t Value) const { DebugLoc dl; if (MI != MBB.end() && !MI->isDebugValue()) dl = MI->getDebugLoc(); if (isImmMskBitp(Value)) { int N = Log2_32(Value) + 1; return BuildMI(MBB, MI, dl, get(XCore::MKMSK_rus), Reg) .addImm(N) .getInstr(); } if (isImmU16(Value)) { int Opcode = isImmU6(Value) ? XCore::LDC_ru6 : XCore::LDC_lru6; return BuildMI(MBB, MI, dl, get(Opcode), Reg).addImm(Value).getInstr(); } MachineConstantPool *ConstantPool = MBB.getParent()->getConstantPool(); const Constant *C = ConstantInt::get( Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Value); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); return BuildMI(MBB, MI, dl, get(XCore::LDWCP_lru6), Reg) .addConstantPoolIndex(Idx) .getInstr(); }
/// AddOperand - Add the specified operand to the specified machine instr. II /// specifies the instruction information for the node, and IIOpNum is the /// operand number (in the II) that we are adding. IIOpNum and II are used for /// assertions only. void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op, unsigned IIOpNum, const TargetInstrDesc *II, DenseMap<SDValue, unsigned> &VRBaseMap, bool IsDebug, bool IsClone, bool IsCloned) { if (Op.isMachineOpcode()) { AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap, IsDebug, IsClone, IsCloned); } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { MI->addOperand(MachineOperand::CreateImm(C->getSExtValue())); } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) { const ConstantFP *CFP = F->getConstantFPValue(); MI->addOperand(MachineOperand::CreateFPImm(CFP)); } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) { MI->addOperand(MachineOperand::CreateReg(R->getReg(), false)); } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) { MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(), TGA->getOffset(), TGA->getTargetFlags())); } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) { MI->addOperand(MachineOperand::CreateMBB(BBNode->getBasicBlock())); } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) { MI->addOperand(MachineOperand::CreateFI(FI->getIndex())); } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) { MI->addOperand(MachineOperand::CreateJTI(JT->getIndex(), JT->getTargetFlags())); } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) { int Offset = CP->getOffset(); unsigned Align = CP->getAlignment(); const Type *Type = CP->getType(); // MachineConstantPool wants an explicit alignment. if (Align == 0) { Align = TM->getTargetData()->getPrefTypeAlignment(Type); if (Align == 0) { // Alignment of vector types. FIXME! Align = TM->getTargetData()->getTypeAllocSize(Type); } } unsigned Idx; MachineConstantPool *MCP = MF->getConstantPool(); if (CP->isMachineConstantPoolEntry()) Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Align); else Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Align); MI->addOperand(MachineOperand::CreateCPI(Idx, Offset, CP->getTargetFlags())); } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) { MI->addOperand(MachineOperand::CreateES(ES->getSymbol(), ES->getTargetFlags())); } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) { MI->addOperand(MachineOperand::CreateBA(BA->getBlockAddress(), BA->getTargetFlags())); } else { assert(Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"); AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap, IsDebug, IsClone, IsCloned); } }
bool MIRParserImpl::initializeConstantPool(PerFunctionMIParsingState &PFS, MachineConstantPool &ConstantPool, const yaml::MachineFunction &YamlMF) { DenseMap<unsigned, unsigned> &ConstantPoolSlots = PFS.ConstantPoolSlots; const MachineFunction &MF = PFS.MF; const auto &M = *MF.getFunction().getParent(); SMDiagnostic Error; for (const auto &YamlConstant : YamlMF.Constants) { if (YamlConstant.IsTargetSpecific) // FIXME: Support target-specific constant pools return error(YamlConstant.Value.SourceRange.Start, "Can't parse target-specific constant pool entries yet"); const Constant *Value = dyn_cast_or_null<Constant>( parseConstantValue(YamlConstant.Value.Value, Error, M)); if (!Value) return error(Error, YamlConstant.Value.SourceRange); unsigned Alignment = YamlConstant.Alignment ? YamlConstant.Alignment : M.getDataLayout().getPrefTypeAlignment(Value->getType()); unsigned Index = ConstantPool.getConstantPoolIndex(Value, Alignment); if (!ConstantPoolSlots.insert(std::make_pair(YamlConstant.ID.Value, Index)) .second) return error(YamlConstant.ID.SourceRange.Start, Twine("redefinition of constant pool item '%const.") + Twine(YamlConstant.ID.Value) + "'"); } return false; }
void JITEmitter::startFunction(MachineFunction &F) { uintptr_t ActualSize = 0; if (MemMgr->NeedsExactSize()) { const TargetInstrInfo* TII = F.getTarget().getInstrInfo(); MachineJumpTableInfo *MJTI = F.getJumpTableInfo(); MachineConstantPool *MCP = F.getConstantPool(); // Ensure the constant pool/jump table info is at least 4-byte aligned. ActualSize = RoundUpToAlign(ActualSize, 16); // Add the alignment of the constant pool ActualSize = RoundUpToAlign(ActualSize, 1 << MCP->getConstantPoolAlignment()); // Add the constant pool size ActualSize += GetConstantPoolSizeInBytes(MCP); // Add the aligment of the jump table info ActualSize = RoundUpToAlign(ActualSize, MJTI->getAlignment()); // Add the jump table size ActualSize += GetJumpTableSizeInBytes(MJTI); // Add the alignment for the function ActualSize = RoundUpToAlign(ActualSize, std::max(F.getFunction()->getAlignment(), 8U)); // Add the function size ActualSize += TII->GetFunctionSizeInBytes(F); } BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(), ActualSize); BufferEnd = BufferBegin+ActualSize; // Ensure the constant pool/jump table info is at least 4-byte aligned. emitAlignment(16); emitConstantPool(F.getConstantPool()); initJumpTableInfo(F.getJumpTableInfo()); // About to start emitting the machine code for the function. emitAlignment(std::max(F.getFunction()->getAlignment(), 8U)); TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr); MBBLocations.clear(); }
/// emitLoadConstPool - Emits a load from constpool to materialize the /// specified immediate. void ARMBaseRegisterInfo::emitLoadConstPool( MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const { MachineFunction &MF = *MBB.getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); MachineConstantPool *ConstantPool = MF.getConstantPool(); const Constant *C = ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) .addReg(DestReg, getDefRegState(true), SubIdx) .addConstantPoolIndex(Idx) .addImm(0).addImm(Pred).addReg(PredReg) .setMIFlags(MIFlags); }
/// emitLoadConstPool - Emits a load from constpool to materialize the /// specified immediate. void Thumb2RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, DebugLoc dl, unsigned DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred, unsigned PredReg) const { MachineFunction &MF = *MBB.getParent(); MachineConstantPool *ConstantPool = MF.getConstantPool(); Constant *C = ConstantInt::get( Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci)) .addReg(DestReg, getDefRegState(true), SubIdx) .addConstantPoolIndex(Idx).addImm((int64_t)ARMCC::AL).addReg(0); }
/// emitLoadConstPool - Emits a load from constpool to materialize the /// specified immediate. void Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, DebugLoc dl, unsigned DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const { assert((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) && "Thumb1 does not have ldr to high register"); MachineFunction &MF = *MBB.getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); MachineConstantPool *ConstantPool = MF.getConstantPool(); const Constant *C = ConstantInt::get( Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci)) .addReg(DestReg, getDefRegState(true), SubIdx) .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg) .setMIFlags(MIFlags); }
void MIRPrinter::convert(yaml::MachineFunction &MF, const MachineConstantPool &ConstantPool) { unsigned ID = 0; for (const MachineConstantPoolEntry &Constant : ConstantPool.getConstants()) { // TODO: Serialize target specific constant pool entries. if (Constant.isMachineConstantPoolEntry()) llvm_unreachable("Can't print target specific constant pool entries yet"); yaml::MachineConstantPoolValue YamlConstant; std::string Str; raw_string_ostream StrOS(Str); Constant.Val.ConstVal->printAsOperand(StrOS); YamlConstant.ID = ID++; YamlConstant.Value = StrOS.str(); YamlConstant.Alignment = Constant.getAlignment(); MF.Constants.push_back(YamlConstant); } }
bool MIRParserImpl::initializeConstantPool( MachineConstantPool &ConstantPool, const yaml::MachineFunction &YamlMF, const MachineFunction &MF, DenseMap<unsigned, unsigned> &ConstantPoolSlots) { const auto &M = *MF.getFunction()->getParent(); SMDiagnostic Error; for (const auto &YamlConstant : YamlMF.Constants) { const Constant *Value = dyn_cast_or_null<Constant>( parseConstantValue(YamlConstant.Value.Value, Error, M)); if (!Value) return error(Error, YamlConstant.Value.SourceRange); unsigned Alignment = YamlConstant.Alignment ? YamlConstant.Alignment : M.getDataLayout().getPrefTypeAlignment(Value->getType()); // TODO: Report an error when the same constant pool value ID is redefined. ConstantPoolSlots.insert(std::make_pair( YamlConstant.ID, ConstantPool.getConstantPoolIndex(Value, Alignment))); } return false; }
void MIRPrinter::convert(yaml::MachineFunction &MF, const MachineConstantPool &ConstantPool) { unsigned ID = 0; for (const MachineConstantPoolEntry &Constant : ConstantPool.getConstants()) { std::string Str; raw_string_ostream StrOS(Str); if (Constant.isMachineConstantPoolEntry()) { Constant.Val.MachineCPVal->print(StrOS); } else { Constant.Val.ConstVal->printAsOperand(StrOS); } yaml::MachineConstantPoolValue YamlConstant; YamlConstant.ID = ID++; YamlConstant.Value = StrOS.str(); YamlConstant.Alignment = Constant.getAlignment(); YamlConstant.IsTargetSpecific = Constant.isMachineConstantPoolEntry(); MF.Constants.push_back(YamlConstant); } }
bool MIRParserImpl::initializeConstantPool( MachineConstantPool &ConstantPool, const yaml::MachineFunction &YamlMF, const MachineFunction &MF, DenseMap<unsigned, unsigned> &ConstantPoolSlots) { const auto &M = *MF.getFunction()->getParent(); SMDiagnostic Error; for (const auto &YamlConstant : YamlMF.Constants) { const Constant *Value = dyn_cast_or_null<Constant>( parseConstantValue(YamlConstant.Value.Value, Error, M)); if (!Value) return error(Error, YamlConstant.Value.SourceRange); unsigned Alignment = YamlConstant.Alignment ? YamlConstant.Alignment : M.getDataLayout().getPrefTypeAlignment(Value->getType()); unsigned Index = ConstantPool.getConstantPoolIndex(Value, Alignment); if (!ConstantPoolSlots.insert(std::make_pair(YamlConstant.ID.Value, Index)) .second) return error(YamlConstant.ID.SourceRange.Start, Twine("redefinition of constant pool item '%const.") + Twine(YamlConstant.ID.Value) + "'"); } return false; }
/// AddOperand - Add the specified operand to the specified machine instr. II /// specifies the instruction information for the node, and IIOpNum is the /// operand number (in the II) that we are adding. void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, SDValue Op, unsigned IIOpNum, const MCInstrDesc *II, DenseMap<SDValue, unsigned> &VRBaseMap, bool IsDebug, bool IsClone, bool IsCloned) { if (Op.isMachineOpcode()) { AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, IsDebug, IsClone, IsCloned); } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { MIB.addImm(C->getSExtValue()); } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) { MIB.addFPImm(F->getConstantFPValue()); } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) { unsigned VReg = R->getReg(); MVT OpVT = Op.getSimpleValueType(); const TargetRegisterClass *OpRC = TLI->isTypeLegal(OpVT) ? TLI->getRegClassFor(OpVT) : nullptr; const TargetRegisterClass *IIRC = II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF)) : nullptr; if (OpRC && IIRC && OpRC != IIRC && TargetRegisterInfo::isVirtualRegister(VReg)) { unsigned NewVReg = MRI->createVirtualRegister(IIRC); BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); VReg = NewVReg; } // Turn additional physreg operands into implicit uses on non-variadic // instructions. This is used by call and return instructions passing // arguments in registers. bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic()); MIB.addReg(VReg, getImplRegState(Imp)); } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) { MIB.addRegMask(RM->getRegMask()); } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) { MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(), TGA->getTargetFlags()); } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) { MIB.addMBB(BBNode->getBasicBlock()); } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) { MIB.addFrameIndex(FI->getIndex()); } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) { MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags()); } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) { int Offset = CP->getOffset(); unsigned Align = CP->getAlignment(); Type *Type = CP->getType(); // MachineConstantPool wants an explicit alignment. if (Align == 0) { Align = MF->getDataLayout().getPrefTypeAlignment(Type); if (Align == 0) { // Alignment of vector types. FIXME! Align = MF->getDataLayout().getTypeAllocSize(Type); } } unsigned Idx; MachineConstantPool *MCP = MF->getConstantPool(); if (CP->isMachineConstantPoolEntry()) Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Align); else Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Align); MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags()); } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) { MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags()); } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) { MIB.addSym(SymNode->getMCSymbol()); } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) { MIB.addBlockAddress(BA->getBlockAddress(), BA->getOffset(), BA->getTargetFlags()); } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) { MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags()); } else { assert(Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"); AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, IsDebug, IsClone, IsCloned); } }