// Return true if Compare is a comparison against zero. static bool isCompareZero(MachineInstr &Compare) { switch (Compare.getOpcode()) { case SystemZ::LTEBRCompare: case SystemZ::LTDBRCompare: case SystemZ::LTXBRCompare: return true; default: if (isLoadAndTestAsCmp(Compare)) return true; return Compare.getNumExplicitOperands() == 2 && Compare.getOperand(1).isImm() && Compare.getOperand(1).getImm() == 0; } }
bool llvm::constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) { assert(!isPreISelGenericOpcode(I.getOpcode()) && "A selected instruction is expected"); MachineBasicBlock &MBB = *I.getParent(); MachineFunction &MF = *MBB.getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { MachineOperand &MO = I.getOperand(OpI); // There's nothing to be done on non-register operands. if (!MO.isReg()) continue; LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n'); assert(MO.isReg() && "Unsupported non-reg operand"); unsigned Reg = MO.getReg(); // Physical registers don't need to be constrained. if (TRI.isPhysicalRegister(Reg)) continue; // Register operands with a value of 0 (e.g. predicate operands) don't need // to be constrained. if (Reg == 0) continue; // If the operand is a vreg, we should constrain its regclass, and only // insert COPYs if that's impossible. // constrainOperandRegClass does that for us. MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI)); // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been // done. if (MO.isUse()) { int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO); if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx)) I.tieOperands(DefIdx, OpI); } } return true; }
// Fixup callee-save register save/restore instructions to take into account // combined SP bump by adding the local stack size to the stack offsets. static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, unsigned LocalStackSize) { unsigned Opc = MI.getOpcode(); (void)Opc; assert((Opc == AArch64::STPXi || Opc == AArch64::STPDi || Opc == AArch64::STRXui || Opc == AArch64::STRDui || Opc == AArch64::LDPXi || Opc == AArch64::LDPDi || Opc == AArch64::LDRXui || Opc == AArch64::LDRDui) && "Unexpected callee-save save/restore opcode!"); unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && "Unexpected base register in callee-save save/restore instruction!"); // Last operand is immediate offset that needs fixing. MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); // All generated opcodes have scaled offsets. assert(LocalStackSize % 8 == 0); OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / 8); }
static LLT getTypeToPrint(const MachineInstr &MI, unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) { const MachineOperand &Op = MI.getOperand(OpIdx); if (!Op.isReg()) return LLT{}; if (MI.isVariadic() || OpIdx >= MI.getNumExplicitOperands()) return MRI.getType(Op.getReg()); auto &OpInfo = MI.getDesc().OpInfo[OpIdx]; if (!OpInfo.isGenericType()) return MRI.getType(Op.getReg()); if (PrintedTypes[OpInfo.getGenericTypeIndex()]) return LLT{}; PrintedTypes.set(OpInfo.getGenericTypeIndex()); return MRI.getType(Op.getReg()); }
/// Whether \p MI really requires the exec state computed during analysis. /// /// Scalar instructions must occasionally be marked WQM for correct propagation /// (e.g. thread masks leading up to branches), but when it comes to actual /// execution, they don't care about EXEC. bool SIWholeQuadMode::requiresCorrectState(const MachineInstr &MI) const { if (MI.isTerminator()) return true; // Skip instructions that are not affected by EXEC if (TII->isScalarUnit(MI)) return false; // Generic instructions such as COPY will either disappear by register // coalescing or be lowered to SALU or VALU instructions. if (MI.isTransient()) { if (MI.getNumExplicitOperands() >= 1) { const MachineOperand &Op = MI.getOperand(0); if (Op.isReg()) { if (TRI->isSGPRReg(*MRI, Op.getReg())) { // SGPR instructions are not affected by EXEC return false; } } } } return true; }
bool InstructionSelector::constrainSelectedInstRegOperands( MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const { MachineBasicBlock &MBB = *I.getParent(); MachineFunction &MF = *MBB.getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) { MachineOperand &MO = I.getOperand(OpI); // There's nothing to be done on non-register operands. if (!MO.isReg()) continue; DEBUG(dbgs() << "Converting operand: " << MO << '\n'); assert(MO.isReg() && "Unsupported non-reg operand"); // Physical registers don't need to be constrained. if (TRI.isPhysicalRegister(MO.getReg())) continue; const TargetRegisterClass *RC = TII.getRegClass(I.getDesc(), OpI, &TRI, MF); assert(RC && "Selected inst should have regclass operand"); // If the operand is a vreg, we should constrain its regclass, and only // insert COPYs if that's impossible. // If the operand is a physreg, we only insert COPYs if the register class // doesn't contain the register. if (RBI.constrainGenericRegister(MO.getReg(), *RC, MRI)) continue; DEBUG(dbgs() << "Constraining with COPYs isn't implemented yet"); return false; } return true; }
// The CC users in CCUsers are testing the result of a comparison of some // value X against zero and we know that any CC value produced by MI // would also reflect the value of X. Try to adjust CCUsers so that // they test the result of MI directly, returning true on success. // Leave everything unchanged on failure. bool SystemZElimCompare:: adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare, SmallVectorImpl<MachineInstr *> &CCUsers) { int Opcode = MI->getOpcode(); const MCInstrDesc &Desc = TII->get(Opcode); unsigned MIFlags = Desc.TSFlags; // See which compare-style condition codes are available. unsigned ReusableCCMask = SystemZII::getCompareZeroCCMask(MIFlags); // For unsigned comparisons with zero, only equality makes sense. unsigned CompareFlags = Compare->getDesc().TSFlags; if (CompareFlags & SystemZII::IsLogical) ReusableCCMask &= SystemZ::CCMASK_CMP_EQ; if (ReusableCCMask == 0) return false; unsigned CCValues = SystemZII::getCCValues(MIFlags); assert((ReusableCCMask & ~CCValues) == 0 && "Invalid CCValues"); // Now check whether these flags are enough for all users. SmallVector<MachineOperand *, 4> AlterMasks; for (unsigned int I = 0, E = CCUsers.size(); I != E; ++I) { MachineInstr *MI = CCUsers[I]; // Fail if this isn't a use of CC that we understand. unsigned Flags = MI->getDesc().TSFlags; unsigned FirstOpNum; if (Flags & SystemZII::CCMaskFirst) FirstOpNum = 0; else if (Flags & SystemZII::CCMaskLast) FirstOpNum = MI->getNumExplicitOperands() - 2; else return false; // Check whether the instruction predicate treats all CC values // outside of ReusableCCMask in the same way. In that case it // doesn't matter what those CC values mean. unsigned CCValid = MI->getOperand(FirstOpNum).getImm(); unsigned CCMask = MI->getOperand(FirstOpNum + 1).getImm(); unsigned OutValid = ~ReusableCCMask & CCValid; unsigned OutMask = ~ReusableCCMask & CCMask; if (OutMask != 0 && OutMask != OutValid) return false; AlterMasks.push_back(&MI->getOperand(FirstOpNum)); AlterMasks.push_back(&MI->getOperand(FirstOpNum + 1)); } // All users are OK. Adjust the masks for MI. for (unsigned I = 0, E = AlterMasks.size(); I != E; I += 2) { AlterMasks[I]->setImm(CCValues); unsigned CCMask = AlterMasks[I + 1]->getImm(); if (CCMask & ~ReusableCCMask) AlterMasks[I + 1]->setImm((CCMask & ReusableCCMask) | (CCValues & ~ReusableCCMask)); } // CC is now live after MI. int CCDef = MI->findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI); assert(CCDef >= 0 && "Couldn't find CC set"); MI->getOperand(CCDef).setIsDead(false); // Clear any intervening kills of CC. MachineBasicBlock::iterator MBBI = MI, MBBE = Compare; for (++MBBI; MBBI != MBBE; ++MBBI) MBBI->clearRegisterKills(SystemZ::CC, TRI); return true; }