void MIPrinter::print(const MachineMemOperand &Op) { OS << '('; // TODO: Print operand's target specific flags. if (Op.isVolatile()) OS << "volatile "; if (Op.isNonTemporal()) OS << "non-temporal "; if (Op.isInvariant()) OS << "invariant "; if (Op.isLoad()) OS << "load "; else { assert(Op.isStore() && "Non load machine operand must be a store"); OS << "store "; } OS << Op.getSize() << (Op.isLoad() ? " from " : " into "); if (const Value *Val = Op.getValue()) printIRValueReference(*Val); // TODO: Print PseudoSourceValue. printOffset(Op.getOffset()); if (Op.getBaseAlignment() != Op.getSize()) OS << ", align " << Op.getBaseAlignment(); // TODO: Print the metadata attributes. OS << ')'; }
/// This returns true if the two MIs need a chain edge betwee them. /// If these are not even memory operations, we still may need /// chain deps between them. The question really is - could /// these two MIs be reordered during scheduling from memory dependency /// point of view. static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI, MachineInstr *MIa, MachineInstr *MIb) { // Cover a trivial case - no edge is need to itself. if (MIa == MIb) return false; if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI)) return true; // If we are dealing with two "normal" loads, we do not need an edge // between them - they could be reordered. if (!MIa->mayStore() && !MIb->mayStore()) return false; // To this point analysis is generic. From here on we do need AA. if (!AA) return true; MachineMemOperand *MMOa = *MIa->memoperands_begin(); MachineMemOperand *MMOb = *MIb->memoperands_begin(); // FIXME: Need to handle multiple memory operands to support all targets. if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) llvm_unreachable("Multiple memory operands."); // The following interface to AA is fashioned after DAGCombiner::isAlias // and operates with MachineMemOperand offset with some important // assumptions: // - LLVM fundamentally assumes flat address spaces. // - MachineOperand offset can *only* result from legalization and // cannot affect queries other than the trivial case of overlap // checking. // - These offsets never wrap and never step outside // of allocated objects. // - There should never be any negative offsets here. // // FIXME: Modify API to hide this math from "user" // FIXME: Even before we go to AA we can reason locally about some // memory objects. It can save compile time, and possibly catch some // corner cases not currently covered. assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset"); assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset"); int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset()); int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset; int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset; AliasAnalysis::AliasResult AAResult = AA->alias( AliasAnalysis::Location(MMOa->getValue(), Overlapa, MMOa->getTBAAInfo()), AliasAnalysis::Location(MMOb->getValue(), Overlapb, MMOb->getTBAAInfo())); return (AAResult != AliasAnalysis::NoAlias); }
/// getHazardType - We return hazard for any non-branch instruction that would /// terminate the dispatch group. We turn NoopHazard for any /// instructions that wouldn't terminate the dispatch group that would cause a /// pipeline flush. ScheduleHazardRecognizer::HazardType PPCHazardRecognizer970:: getHazardType(SUnit *SU, int Stalls) { assert(Stalls == 0 && "PPC hazards don't support scoreboard lookahead"); MachineInstr *MI = SU->getInstr(); if (MI->isDebugValue()) return NoHazard; unsigned Opcode = MI->getOpcode(); bool isFirst, isSingle, isCracked, isLoad, isStore; PPCII::PPC970_Unit InstrType = GetInstrType(Opcode, isFirst, isSingle, isCracked, isLoad, isStore); if (InstrType == PPCII::PPC970_Pseudo) return NoHazard; // We can only issue a PPC970_First/PPC970_Single instruction (such as // crand/mtspr/etc) if this is the first cycle of the dispatch group. if (NumIssued != 0 && (isFirst || isSingle)) return Hazard; // If this instruction is cracked into two ops by the decoder, we know that // it is not a branch and that it cannot issue if 3 other instructions are // already in the dispatch group. if (isCracked && NumIssued > 2) return Hazard; switch (InstrType) { default: llvm_unreachable("Unknown instruction type!"); case PPCII::PPC970_FXU: case PPCII::PPC970_LSU: case PPCII::PPC970_FPU: case PPCII::PPC970_VALU: case PPCII::PPC970_VPERM: // We can only issue a branch as the last instruction in a group. if (NumIssued == 4) return Hazard; break; case PPCII::PPC970_CRU: // We can only issue a CR instruction in the first two slots. if (NumIssued >= 2) return Hazard; break; case PPCII::PPC970_BRU: break; } // Do not allow MTCTR and BCTRL to be in the same dispatch group. if (HasCTRSet && Opcode == PPC::BCTRL) return NoopHazard; // If this is a load following a store, make sure it's not to the same or // overlapping address. if (isLoad && NumStores && !MI->memoperands_empty()) { MachineMemOperand *MO = *MI->memoperands_begin(); if (isLoadOfStoredAddress(MO->getSize(), MO->getOffset(), MO->getValue())) return NoopHazard; } return NoHazard; }
// Check if the machine memory operand MMO is aliased with any of the // stores in the store group Stores. bool HexagonStoreWidening::instrAliased(InstrGroup &Stores, const MachineMemOperand &MMO) { if (!MMO.getValue()) return true; MemoryLocation L(MMO.getValue(), MMO.getSize(), MMO.getAAInfo()); for (auto SI : Stores) { const MachineMemOperand &SMO = getStoreTarget(SI); if (!SMO.getValue()) return true; MemoryLocation SL(SMO.getValue(), SMO.getSize(), SMO.getAAInfo()); if (AA->alias(L, SL)) return true; } return false; }
void PPCHazardRecognizer970::EmitInstruction(SUnit *SU) { MachineInstr *MI = SU->getInstr(); if (MI->isDebugValue()) return; unsigned Opcode = MI->getOpcode(); bool isFirst, isSingle, isCracked, isLoad, isStore; PPCII::PPC970_Unit InstrType = GetInstrType(Opcode, isFirst, isSingle, isCracked, isLoad, isStore); if (InstrType == PPCII::PPC970_Pseudo) return; // Update structural hazard information. if (Opcode == PPC::MTCTR || Opcode == PPC::MTCTR8) HasCTRSet = true; // Track the address stored to. if (isStore && NumStores < 4 && !MI->memoperands_empty()) { MachineMemOperand *MO = *MI->memoperands_begin(); StoreSize[NumStores] = MO->getSize(); StoreOffset[NumStores] = MO->getOffset(); StoreValue[NumStores] = MO->getValue(); ++NumStores; } if (InstrType == PPCII::PPC970_BRU || isSingle) NumIssued = 4; // Terminate a d-group. ++NumIssued; // If this instruction is cracked into two ops by the decoder, remember that // we issued two pieces. if (isCracked) ++NumIssued; if (NumIssued == 5) EndDispatchGroup(); }
void MIPrinter::print(const MachineMemOperand &Op) { OS << '('; // TODO: Print operand's target specific flags. if (Op.isVolatile()) OS << "volatile "; if (Op.isNonTemporal()) OS << "non-temporal "; if (Op.isInvariant()) OS << "invariant "; if (Op.isLoad()) OS << "load "; else { assert(Op.isStore() && "Non load machine operand must be a store"); OS << "store "; } OS << Op.getSize() << (Op.isLoad() ? " from " : " into "); if (const Value *Val = Op.getValue()) { printIRValueReference(*Val); } else { const PseudoSourceValue *PVal = Op.getPseudoValue(); assert(PVal && "Expected a pseudo source value"); switch (PVal->kind()) { case PseudoSourceValue::Stack: OS << "stack"; break; case PseudoSourceValue::GOT: OS << "got"; break; case PseudoSourceValue::JumpTable: OS << "jump-table"; break; case PseudoSourceValue::ConstantPool: OS << "constant-pool"; break; case PseudoSourceValue::FixedStack: printStackObjectReference( cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex()); break; case PseudoSourceValue::GlobalValueCallEntry: OS << "call-entry "; cast<GlobalValuePseudoSourceValue>(PVal)->getValue()->printAsOperand( OS, /*PrintType=*/false, MST); break; case PseudoSourceValue::ExternalSymbolCallEntry: OS << "call-entry $"; printLLVMNameWithoutPrefix( OS, cast<ExternalSymbolPseudoSourceValue>(PVal)->getSymbol()); break; } } printOffset(Op.getOffset()); if (Op.getBaseAlignment() != Op.getSize()) OS << ", align " << Op.getBaseAlignment(); auto AAInfo = Op.getAAInfo(); if (AAInfo.TBAA) { OS << ", !tbaa "; AAInfo.TBAA->printAsOperand(OS, MST); } if (AAInfo.Scope) { OS << ", !alias.scope "; AAInfo.Scope->printAsOperand(OS, MST); } if (AAInfo.NoAlias) { OS << ", !noalias "; AAInfo.NoAlias->printAsOperand(OS, MST); } if (Op.getRanges()) { OS << ", !range "; Op.getRanges()->printAsOperand(OS, MST); } OS << ')'; }
void MIPrinter::print(const LLVMContext &Context, const TargetInstrInfo &TII, const MachineMemOperand &Op) { OS << '('; if (Op.isVolatile()) OS << "volatile "; if (Op.isNonTemporal()) OS << "non-temporal "; if (Op.isDereferenceable()) OS << "dereferenceable "; if (Op.isInvariant()) OS << "invariant "; if (Op.getFlags() & MachineMemOperand::MOTargetFlag1) OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag1) << "\" "; if (Op.getFlags() & MachineMemOperand::MOTargetFlag2) OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag2) << "\" "; if (Op.getFlags() & MachineMemOperand::MOTargetFlag3) OS << '"' << getTargetMMOFlagName(TII, MachineMemOperand::MOTargetFlag3) << "\" "; assert((Op.isLoad() || Op.isStore()) && "machine memory operand must be a load or store (or both)"); if (Op.isLoad()) OS << "load "; if (Op.isStore()) OS << "store "; printSyncScope(Context, Op.getSyncScopeID()); if (Op.getOrdering() != AtomicOrdering::NotAtomic) OS << toIRString(Op.getOrdering()) << ' '; if (Op.getFailureOrdering() != AtomicOrdering::NotAtomic) OS << toIRString(Op.getFailureOrdering()) << ' '; OS << Op.getSize(); if (const Value *Val = Op.getValue()) { OS << ((Op.isLoad() && Op.isStore()) ? " on " : Op.isLoad() ? " from " : " into "); printIRValueReference(*Val); } else if (const PseudoSourceValue *PVal = Op.getPseudoValue()) { OS << ((Op.isLoad() && Op.isStore()) ? " on " : Op.isLoad() ? " from " : " into "); assert(PVal && "Expected a pseudo source value"); switch (PVal->kind()) { case PseudoSourceValue::Stack: OS << "stack"; break; case PseudoSourceValue::GOT: OS << "got"; break; case PseudoSourceValue::JumpTable: OS << "jump-table"; break; case PseudoSourceValue::ConstantPool: OS << "constant-pool"; break; case PseudoSourceValue::FixedStack: printStackObjectReference( cast<FixedStackPseudoSourceValue>(PVal)->getFrameIndex()); break; case PseudoSourceValue::GlobalValueCallEntry: OS << "call-entry "; cast<GlobalValuePseudoSourceValue>(PVal)->getValue()->printAsOperand( OS, /*PrintType=*/false, MST); break; case PseudoSourceValue::ExternalSymbolCallEntry: OS << "call-entry $"; printLLVMNameWithoutPrefix( OS, cast<ExternalSymbolPseudoSourceValue>(PVal)->getSymbol()); break; case PseudoSourceValue::TargetCustom: llvm_unreachable("TargetCustom pseudo source values are not supported"); break; } } MachineOperand::printOperandOffset(OS, Op.getOffset()); if (Op.getBaseAlignment() != Op.getSize()) OS << ", align " << Op.getBaseAlignment(); auto AAInfo = Op.getAAInfo(); if (AAInfo.TBAA) { OS << ", !tbaa "; AAInfo.TBAA->printAsOperand(OS, MST); } if (AAInfo.Scope) { OS << ", !alias.scope "; AAInfo.Scope->printAsOperand(OS, MST); } if (AAInfo.NoAlias) { OS << ", !noalias "; AAInfo.NoAlias->printAsOperand(OS, MST); } if (Op.getRanges()) { OS << ", !range "; Op.getRanges()->printAsOperand(OS, MST); } OS << ')'; }
DeadMemOpElimination::instr_iterator DeadMemOpElimination::handleMemOp(instr_iterator I, DefMapTy &Defs, AliasSetTracker &AST) { MachineInstr *MI = I; MachineMemOperand *MO = *MI->memoperands_begin(); // AliasAnalysis cannot handle offset right now, so we pretend to write a // a big enough size to the location pointed by the base pointer. uint64_t Size = MO->getSize() + MO->getOffset(); AliasSet *ASet = &AST.getAliasSetForPointer(const_cast<Value*>(MO->getValue()), Size, 0); MachineInstr *&LastMI = Defs[ASet]; bool canHandleLastStore = LastMI && ASet->isMustAlias() && LastMI->getOpcode() != VTM::VOpInternalCall // FIXME: We may need to remember the last // definition for all predicates. && isPredIdentical(LastMI, MI); if (canHandleLastStore) { MachineMemOperand *LastMO = *LastMI->memoperands_begin(); // We can only handle last store if and only if their memory operand have // the must-alias address and the same size. canHandleLastStore = LastMO->getSize() == MO->getSize() && !LastMO->isVolatile() && MachineMemOperandAlias(MO, LastMO, AA, SE) == AliasAnalysis::MustAlias; } // FIXME: These elimination is only valid if we are in single-thread mode! if (VInstrInfo::mayStore(MI)) { if (canHandleLastStore) { // Dead store find, remove it. LastMI->eraseFromParent(); ++DeadStoreEliminated; } // Update the definition. LastMI = MI; return I; } // Now MI is a load. if (!canHandleLastStore) return I; // Loading the value that just be stored, the load is not necessary. MachineOperand LoadedMO = MI->getOperand(0); MachineOperand StoredMO = LastMI->getOperand(2); // Simply replace the load by a copy. DebugLoc dl = MI->getDebugLoc(); I = *BuildMI(*MI->getParent(), I, dl, VInstrInfo::getDesc(VTM::VOpMove)) .addOperand(LoadedMO).addOperand(StoredMO). addOperand(*VInstrInfo::getPredOperand(MI)). addOperand(*VInstrInfo::getTraceOperand(MI)); MI->eraseFromParent(); ++DeadLoadEliminated; return I; }