Пример #1
0
/// CloneDominatorInfo - Clone basicblock's dominator tree and, if available,
/// dominance info. It is expected that basic block is already cloned.
static void CloneDominatorInfo(BasicBlock *BB, 
                               DenseMap<const Value *, Value *> &ValueMap,
                               DominatorTree *DT,
                               DominanceFrontier *DF) {

  assert (DT && "DominatorTree is not available");
  DenseMap<const Value *, Value*>::iterator BI = ValueMap.find(BB);
  assert (BI != ValueMap.end() && "BasicBlock clone is missing");
  BasicBlock *NewBB = cast<BasicBlock>(BI->second);

  // NewBB already got dominator info.
  if (DT->getNode(NewBB))
    return;

  assert (DT->getNode(BB) && "BasicBlock does not have dominator info");
  // Entry block is not expected here. Infinite loops are not to cloned.
  assert (DT->getNode(BB)->getIDom() && "BasicBlock does not have immediate dominator");
  BasicBlock *BBDom = DT->getNode(BB)->getIDom()->getBlock();

  // NewBB's dominator is either BB's dominator or BB's dominator's clone.
  BasicBlock *NewBBDom = BBDom;
  DenseMap<const Value *, Value*>::iterator BBDomI = ValueMap.find(BBDom);
  if (BBDomI != ValueMap.end()) {
    NewBBDom = cast<BasicBlock>(BBDomI->second);
    if (!DT->getNode(NewBBDom))
      CloneDominatorInfo(BBDom, ValueMap, DT, DF);
  }
  DT->addNewBlock(NewBB, NewBBDom);

  // Copy cloned dominance frontiner set
  if (DF) {
    DominanceFrontier::DomSetType NewDFSet;
    DominanceFrontier::iterator DFI = DF->find(BB);
    if ( DFI != DF->end()) {
      DominanceFrontier::DomSetType S = DFI->second;
        for (DominanceFrontier::DomSetType::iterator I = S.begin(), E = S.end();
             I != E; ++I) {
          BasicBlock *DB = *I;
          DenseMap<const Value*, Value*>::iterator IDM = ValueMap.find(DB);
          if (IDM != ValueMap.end())
            NewDFSet.insert(cast<BasicBlock>(IDM->second));
          else
            NewDFSet.insert(DB);
        }
    }
    DF->addBasicBlock(NewBB, NewDFSet);
  }
}
Пример #2
0
// Replace a sequence of branches leading to a ret, with a clone of a ret
// instruction. Suspend instruction represented by a switch, track the PHI
// values and select the correct case successor when possible.
static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
  DenseMap<Value *, Value *> ResolvedValues;

  Instruction *I = InitialInst;
  while (isa<TerminatorInst>(I)) {
    if (isa<ReturnInst>(I)) {
      if (I != InitialInst)
        ReplaceInstWithInst(InitialInst, I->clone());
      return true;
    }
    if (auto *BR = dyn_cast<BranchInst>(I)) {
      if (BR->isUnconditional()) {
        BasicBlock *BB = BR->getSuccessor(0);
        scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
        I = BB->getFirstNonPHIOrDbgOrLifetime();
        continue;
      }
    } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
      Value *V = SI->getCondition();
      auto it = ResolvedValues.find(V);
      if (it != ResolvedValues.end())
        V = it->second;
      if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
        BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
        scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
        I = BB->getFirstNonPHIOrDbgOrLifetime();
        continue;
      }
    }
    return false;
  }
  return false;
}
Пример #3
0
/// DuplicateInstruction - Duplicate a TailBB instruction to PredBB and update
/// the source operands due to earlier PHI translation.
void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
                                     MachineBasicBlock *TailBB,
                                     MachineBasicBlock *PredBB,
                                     MachineFunction &MF,
                                     DenseMap<unsigned, unsigned> &LocalVRMap,
                                     const DenseSet<unsigned> &UsedByPhi) {
  MachineInstr *NewMI = TII->duplicate(MI, MF);
  for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = NewMI->getOperand(i);
    if (!MO.isReg())
      continue;
    unsigned Reg = MO.getReg();
    if (!TargetRegisterInfo::isVirtualRegister(Reg))
      continue;
    if (MO.isDef()) {
      const TargetRegisterClass *RC = MRI->getRegClass(Reg);
      unsigned NewReg = MRI->createVirtualRegister(RC);
      MO.setReg(NewReg);
      LocalVRMap.insert(std::make_pair(Reg, NewReg));
      if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg))
        AddSSAUpdateEntry(Reg, NewReg, PredBB);
    } else {
      DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg);
      if (VI != LocalVRMap.end()) {
        MO.setReg(VI->second);
        // Clear any kill flags from this operand.  The new register could have
        // uses after this one, so kills are not valid here.
        MO.setIsKill(false);
        MRI->constrainRegClass(VI->second, MRI->getRegClass(Reg));
      }
    }
  }
  PredBB->insert(PredBB->instr_end(), NewMI);
}
Пример #4
0
void LineNumberAnnotatedWriter::emitInstructionAnnot(
      const Instruction *I, formatted_raw_ostream &Out)
{
    DILocation *NewInstrLoc = I->getDebugLoc();
    if (!NewInstrLoc) {
        auto Loc = DebugLoc.find(I);
        if (Loc != DebugLoc.end())
            NewInstrLoc = Loc->second;
    }
    if (!NewInstrLoc || NewInstrLoc == InstrLoc)
        return;
    InstrLoc = NewInstrLoc;
    std::vector<DILineInfo> DIvec;
    do {
        DIvec.emplace_back();
        DILineInfo &DI = DIvec.back();
        DIScope *scope = NewInstrLoc->getScope();
        if (scope)
            DI.FunctionName = scope->getName();
        DI.FileName = NewInstrLoc->getFilename();
        DI.Line = NewInstrLoc->getLine();
        NewInstrLoc = NewInstrLoc->getInlinedAt();
    } while (NewInstrLoc);
    LinePrinter.emit_lineinfo(Out, DIvec);
}
Пример #5
0
void ScheduleDAG::EmitPhysRegCopy(SUnit *SU,
                                  DenseMap<SUnit*, unsigned> &VRBaseMap) {
  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
       I != E; ++I) {
    if (I->isCtrl()) continue;  // ignore chain preds
    if (I->getSUnit()->CopyDstRC) {
      // Copy to physical register.
      DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->getSUnit());
      assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
      // Find the destination physical register.
      unsigned Reg = 0;
      for (SUnit::const_succ_iterator II = SU->Succs.begin(),
             EE = SU->Succs.end(); II != EE; ++II) {
        if (II->getReg()) {
          Reg = II->getReg();
          break;
        }
      }
      BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
        .addReg(VRI->second);
    } else {
      // Copy from physical register.
      assert(I->getReg() && "Unknown physical register!");
      unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
      bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
      (void)isNew; // Silence compiler warning.
      assert(isNew && "Node emitted out of order - early");
      BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
        .addReg(I->getReg());
    }
    break;
  }
}
Пример #6
0
/// Build a bit set for BitSet using the object layouts in
/// GlobalLayout.
BitSetInfo LowerBitSets::buildBitSet(
    Metadata *BitSet,
    const DenseMap<GlobalObject *, uint64_t> &GlobalLayout) {
  BitSetBuilder BSB;

  // Compute the byte offset of each element of this bitset.
  if (BitSetNM) {
    for (MDNode *Op : BitSetNM->operands()) {
      if (Op->getOperand(0) != BitSet || !Op->getOperand(1))
        continue;
      Constant *OpConst =
          cast<ConstantAsMetadata>(Op->getOperand(1))->getValue();
      if (auto GA = dyn_cast<GlobalAlias>(OpConst))
        OpConst = GA->getAliasee();
      auto OpGlobal = dyn_cast<GlobalObject>(OpConst);
      if (!OpGlobal)
        continue;
      uint64_t Offset =
          cast<ConstantInt>(cast<ConstantAsMetadata>(Op->getOperand(2))
                                ->getValue())->getZExtValue();

      Offset += GlobalLayout.find(OpGlobal)->second;

      BSB.addOffset(Offset);
    }
  }

  return BSB.build();
}
Пример #7
0
// Calculate the intersection of all the InitialStates for a BasicBlock's
// successors.
static int getSuccState(DenseMap<BasicBlock *, int> &InitialStates, Function &F,
                        int ParentBaseState, BasicBlock *BB) {
  // This block rejoins normal control flow,
  // conservatively report this basic block as overdefined.
  if (isa<CatchReturnInst>(BB->getTerminator()))
    return OverdefinedState;

  int CommonState = OverdefinedState;
  for (BasicBlock *SuccBB : successors(BB)) {
    // We didn't manage to get a state for one of these predecessors,
    // conservatively report this basic block as overdefined.
    auto SuccStartState = InitialStates.find(SuccBB);
    if (SuccStartState == InitialStates.end())
      return OverdefinedState;

    // This is an EH Pad, conservatively report this basic block as overdefined.
    if (SuccBB->isEHPad())
      return OverdefinedState;

    int SuccState = SuccStartState->second;
    assert(SuccState != OverdefinedState &&
           "overdefined BBs shouldn't be in FinalStates");
    if (CommonState == OverdefinedState)
      CommonState = SuccState;

    // At least two successors have different InitialStates,
    // conservatively report this basic block as overdefined.
    if (CommonState != SuccState)
      return OverdefinedState;
  }

  return CommonState;
}
Пример #8
0
/// DuplicateInstruction - Duplicate a TailBB instruction to PredBB and update
/// the source operands due to earlier PHI translation.
void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
                                     MachineBasicBlock *TailBB,
                                     MachineBasicBlock *PredBB,
                                     MachineFunction &MF,
                                     DenseMap<unsigned, unsigned> &LocalVRMap,
                                     const DenseSet<unsigned> &UsedByPhi) {
  MachineInstr *NewMI = TII->duplicate(MI, MF);
  for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
    MachineOperand &MO = NewMI->getOperand(i);
    if (!MO.isReg())
      continue;
    unsigned Reg = MO.getReg();
    if (!TargetRegisterInfo::isVirtualRegister(Reg))
      continue;
    if (MO.isDef()) {
      const TargetRegisterClass *RC = MRI->getRegClass(Reg);
      unsigned NewReg = MRI->createVirtualRegister(RC);
      MO.setReg(NewReg);
      LocalVRMap.insert(std::make_pair(Reg, NewReg));
      if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg))
        AddSSAUpdateEntry(Reg, NewReg, PredBB);
    } else {
      DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg);
      if (VI != LocalVRMap.end())
        MO.setReg(VI->second);
    }
  }
  PredBB->insert(PredBB->end(), NewMI);
}
/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
/// into the caller, update the specified callgraph to reflect the changes we
/// made.  Note that it's possible that not all code was copied over, so only
/// some edges of the callgraph will be remain.
static void UpdateCallGraphAfterInlining(const Function *Caller,
                                         const Function *Callee,
                                         Function::iterator FirstNewBlock,
                                       DenseMap<const Value*, Value*> &ValueMap,
                                         CallGraph &CG) {
  // Update the call graph by deleting the edge from Callee to Caller
  CallGraphNode *CalleeNode = CG[Callee];
  CallGraphNode *CallerNode = CG[Caller];
  CallerNode->removeCallEdgeTo(CalleeNode);
  
  // Since we inlined some uninlined call sites in the callee into the caller,
  // add edges from the caller to all of the callees of the callee.
  for (CallGraphNode::iterator I = CalleeNode->begin(),
       E = CalleeNode->end(); I != E; ++I) {
    const Instruction *OrigCall = I->first.getInstruction();
    
    DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
    // Only copy the edge if the call was inlined!
    if (VMI != ValueMap.end() && VMI->second) {
      // If the call was inlined, but then constant folded, there is no edge to
      // add.  Check for this case.
      if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
        CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
    }
  }
}
Пример #10
0
/// Remove any duplicate (as SDValues) from the derived pointer pairs.  This
/// is not required for correctness.  It's purpose is to reduce the size of
/// StackMap section.  It has no effect on the number of spill slots required
/// or the actual lowering.
static void
removeDuplicateGCPtrs(SmallVectorImpl<const Value *> &Bases,
                      SmallVectorImpl<const Value *> &Ptrs,
                      SmallVectorImpl<const GCRelocateInst *> &Relocs,
                      SelectionDAGBuilder &Builder,
                      FunctionLoweringInfo::StatepointSpillMap &SSM) {
  DenseMap<SDValue, const Value *> Seen;

  SmallVector<const Value *, 64> NewBases, NewPtrs;
  SmallVector<const GCRelocateInst *, 64> NewRelocs;
  for (size_t i = 0, e = Ptrs.size(); i < e; i++) {
    SDValue SD = Builder.getValue(Ptrs[i]);
    auto SeenIt = Seen.find(SD);

    if (SeenIt == Seen.end()) {
      // Only add non-duplicates
      NewBases.push_back(Bases[i]);
      NewPtrs.push_back(Ptrs[i]);
      NewRelocs.push_back(Relocs[i]);
      Seen[SD] = Ptrs[i];
    } else {
      // Duplicate pointer found, note in SSM and move on:
      SSM.DuplicateMap[Ptrs[i]] = SeenIt->second;
    }
  }
  assert(Bases.size() >= NewBases.size());
  assert(Ptrs.size() >= NewPtrs.size());
  assert(Relocs.size() >= NewRelocs.size());
  Bases = NewBases;
  Ptrs = NewPtrs;
  Relocs = NewRelocs;
  assert(Ptrs.size() == Bases.size());
  assert(Ptrs.size() == Relocs.size());
}
Пример #11
0
bool BitSetInfo::containsValue(
    const DataLayout &DL,
    const DenseMap<GlobalObject *, uint64_t> &GlobalLayout, Value *V,
    uint64_t COffset) const {
  if (auto GV = dyn_cast<GlobalObject>(V)) {
    auto I = GlobalLayout.find(GV);
    if (I == GlobalLayout.end())
      return false;
    return containsGlobalOffset(I->second + COffset);
  }

  if (auto GEP = dyn_cast<GEPOperator>(V)) {
    APInt APOffset(DL.getPointerSizeInBits(0), 0);
    bool Result = GEP->accumulateConstantOffset(DL, APOffset);
    if (!Result)
      return false;
    COffset += APOffset.getZExtValue();
    return containsValue(DL, GlobalLayout, GEP->getPointerOperand(),
                         COffset);
  }

  if (auto Op = dyn_cast<Operator>(V)) {
    if (Op->getOpcode() == Instruction::BitCast)
      return containsValue(DL, GlobalLayout, Op->getOperand(0), COffset);

    if (Op->getOpcode() == Instruction::Select)
      return containsValue(DL, GlobalLayout, Op->getOperand(1), COffset) &&
             containsValue(DL, GlobalLayout, Op->getOperand(2), COffset);
  }

  return false;
}
Пример #12
0
/// RemapInstruction - Convert the instruction operands from referencing the
/// current values into those specified by ValueMap.
static inline void RemapInstruction(Instruction *I,
                                    DenseMap<const Value *, Value*> &ValueMap) {
  for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
    Value *Op = I->getOperand(op);
    DenseMap<const Value *, Value*>::iterator It = ValueMap.find(Op);
    if (It != ValueMap.end()) Op = It->second;
    I->setOperand(op, Op);
  }
}
Пример #13
0
bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
  SmallSetVector<MachineInstr*, 8> MaybeDeadCopies;  // Candidates for deletion
  DenseMap<unsigned, MachineInstr*> AvailCopyMap;    // Def -> available copies map
  DenseMap<unsigned, MachineInstr*> CopyMap;         // Def -> copies map
  SourceMap SrcMap; // Src -> Def map

  DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");

  bool Changed = false;
  for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ) {
    MachineInstr *MI = &*I;
    ++I;

    if (MI->isCopy()) {
      unsigned Def = MI->getOperand(0).getReg();
      unsigned Src = MI->getOperand(1).getReg();

      if (TargetRegisterInfo::isVirtualRegister(Def) ||
          TargetRegisterInfo::isVirtualRegister(Src))
        report_fatal_error("MachineCopyPropagation should be run after"
                           " register allocation!");

      DenseMap<unsigned, MachineInstr*>::iterator CI = AvailCopyMap.find(Src);
      if (CI != AvailCopyMap.end()) {
        MachineInstr *CopyMI = CI->second;
        if (!MRI->isReserved(Def) &&
            (!MRI->isReserved(Src) || NoInterveningSideEffect(CopyMI, MI)) &&
            isNopCopy(CopyMI, Def, Src, TRI)) {
          // The two copies cancel out and the source of the first copy
          // hasn't been overridden, eliminate the second one. e.g.
          //  %ECX<def> = COPY %EAX<kill>
          //  ... nothing clobbered EAX.
          //  %EAX<def> = COPY %ECX
          // =>
          //  %ECX<def> = COPY %EAX
          //
          // Also avoid eliminating a copy from reserved registers unless the
          // definition is proven not clobbered. e.g.
          // %RSP<def> = COPY %RAX
          // CALL
          // %RAX<def> = COPY %RSP

          DEBUG(dbgs() << "MCP: copy is a NOP, removing: "; MI->dump());

          // Clear any kills of Def between CopyMI and MI. This extends the
          // live range.
          for (MachineBasicBlock::iterator I = CopyMI, E = MI; I != E; ++I)
            I->clearRegisterKills(Def, TRI);

          removeCopy(MI);
          Changed = true;
          ++NumDeletes;
          continue;
        }
      }
static void RemoveFromReverseMap(DenseMap<Instruction*, 
                                 SmallPtrSet<KeyTy*, 4> > &ReverseMap,
                                 Instruction *Inst, KeyTy *Val) {
  typename DenseMap<Instruction*, SmallPtrSet<KeyTy*, 4> >::iterator
  InstIt = ReverseMap.find(Inst);
  assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
  bool Found = InstIt->second.erase(Val);
  assert(Found && "Invalid reverse map!"); Found=Found;
  if (InstIt->second.empty())
    ReverseMap.erase(InstIt);
}
Пример #15
0
/// getMappedReg - Return the physical register the specified virtual register
/// might be mapped to.
static unsigned
getMappedReg(unsigned Reg, DenseMap<unsigned, unsigned> &RegMap) {
  while (TargetRegisterInfo::isVirtualRegister(Reg))  {
    DenseMap<unsigned, unsigned>::iterator SI = RegMap.find(Reg);
    if (SI == RegMap.end())
      return 0;
    Reg = SI->second;
  }
  if (TargetRegisterInfo::isPhysicalRegister(Reg))
    return Reg;
  return 0;
}
Пример #16
0
// Assuming we arrived at the block NewBlock from Prev instruction, store
// PHI's incoming values in the ResolvedValues map.
static void
scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
                          DenseMap<Value *, Value *> &ResolvedValues) {
  auto *PrevBB = Prev->getParent();
  for (PHINode &PN : NewBlock->phis()) {
    auto V = PN.getIncomingValueForBlock(PrevBB);
    // See if we already resolved it.
    auto VI = ResolvedValues.find(V);
    if (VI != ResolvedValues.end())
      V = VI->second;
    // Remember the value.
    ResolvedValues[&PN] = V;
  }
}
Пример #17
0
 Node *getChild(StringRef Name,
                const void *Entity,
                const UnifiedStatsReporter::TraceFormatter *TF) {
   Key K(Name, Entity, TF);
   auto I = Children.find(K);
   if (I != Children.end()) {
     return I->getSecond().get();
   } else {
     auto N = llvm::make_unique<Node>(this);
     auto P = N.get();
     Children.insert(std::make_pair(K, std::move(N)));
     return P;
   }
 }
Пример #18
0
CustomAttributesSet GetAttributes(const Decl* D)
{
  CustomAttributesSet attrs = {};
  if (D->hasAttr<AnnotateAttr>()) {
    Report(D, "Declaration has unhandled annotations.");
    attrs = CacheAttributes(D);
  } else {
    auto attributes = AttributesCache.find(D);
    if (attributes != AttributesCache.end()) {
      attrs = attributes->second;
    }
  }
  return attrs;
}
Пример #19
0
void
MachineCopyPropagation::SourceNoLongerAvailable(unsigned Reg,
                              DenseMap<unsigned, unsigned> &SrcMap,
                              DenseMap<unsigned, MachineInstr*> &AvailCopyMap) {
  DenseMap<unsigned, unsigned>::iterator SI = SrcMap.find(Reg);
  if (SI != SrcMap.end()) {
    unsigned MappedDef = SI->second;
    // Source of copy is no longer available for propagation.
    if (AvailCopyMap.erase(MappedDef)) {
      for (const unsigned *SR = TRI->getSubRegisters(MappedDef); *SR; ++SR)
        AvailCopyMap.erase(*SR);
    }
  }
  for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
    SI = SrcMap.find(*AS);
    if (SI != SrcMap.end()) {
      unsigned MappedDef = SI->second;
      if (AvailCopyMap.erase(MappedDef)) {
        for (const unsigned *SR = TRI->getSubRegisters(MappedDef); *SR; ++SR)
          AvailCopyMap.erase(*SR);
      }
    }
  }
}
Пример #20
0
/// getDepth - Computes depth of instructions in vector \InsInstr.
///
/// \param InsInstrs is a vector of machine instructions
/// \param InstrIdxForVirtReg is a dense map of virtual register to index
/// of defining machine instruction in \p InsInstrs
/// \param BlockTrace is a trace of machine instructions
///
/// \returns Depth of last instruction in \InsInstrs ("NewRoot")
unsigned
MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
                          DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
                          MachineTraceMetrics::Trace BlockTrace) {

  SmallVector<unsigned, 16> InstrDepth;
  assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");

  // Foreach instruction in in the new sequence compute the depth based on the
  // operands. Use the trace information when possible. For new operands which
  // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth
  for (auto *InstrPtr : InsInstrs) { // for each Use
    unsigned IDepth = 0;
    DEBUG(dbgs() << "NEW INSTR "; InstrPtr->dump(); dbgs() << "\n";);
    for (unsigned i = 0, e = InstrPtr->getNumOperands(); i != e; ++i) {
      const MachineOperand &MO = InstrPtr->getOperand(i);
      // Check for virtual register operand.
      if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
        continue;
      if (!MO.isUse())
        continue;
      unsigned DepthOp = 0;
      unsigned LatencyOp = 0;
      DenseMap<unsigned, unsigned>::iterator II =
          InstrIdxForVirtReg.find(MO.getReg());
      if (II != InstrIdxForVirtReg.end()) {
        // Operand is new virtual register not in trace
        assert(II->second < InstrDepth.size() && "Bad Index");
        MachineInstr *DefInstr = InsInstrs[II->second];
        assert(DefInstr &&
               "There must be a definition for a new virtual register");
        DepthOp = InstrDepth[II->second];
        LatencyOp = TSchedModel.computeOperandLatency(
            DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
            InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
      } else {
        MachineInstr *DefInstr = getOperandDef(MO);
        if (DefInstr) {
          DepthOp = BlockTrace.getInstrCycles(DefInstr).Depth;
          LatencyOp = TSchedModel.computeOperandLatency(
              DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
              InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
        }
      }
      IDepth = std::max(IDepth, DepthOp + LatencyOp);
    }
    InstrDepth.push_back(IDepth);
  }
Пример #21
0
unsigned
DWARFVerifier::verifyDebugNamesCULists(const DWARFDebugNames &AccelTable) {
  // A map from CU offset to the (first) Name Index offset which claims to index
  // this CU.
  DenseMap<uint32_t, uint32_t> CUMap;
  const uint32_t NotIndexed = std::numeric_limits<uint32_t>::max();

  CUMap.reserve(DCtx.getNumCompileUnits());
  for (const auto &CU : DCtx.compile_units())
    CUMap[CU->getOffset()] = NotIndexed;

  unsigned NumErrors = 0;
  for (const DWARFDebugNames::NameIndex &NI : AccelTable) {
    if (NI.getCUCount() == 0) {
      error() << formatv("Name Index @ {0:x} does not index any CU\n",
                         NI.getUnitOffset());
      ++NumErrors;
      continue;
    }
    for (uint32_t CU = 0, End = NI.getCUCount(); CU < End; ++CU) {
      uint32_t Offset = NI.getCUOffset(CU);
      auto Iter = CUMap.find(Offset);

      if (Iter == CUMap.end()) {
        error() << formatv(
            "Name Index @ {0:x} references a non-existing CU @ {1:x}\n",
            NI.getUnitOffset(), Offset);
        ++NumErrors;
        continue;
      }

      if (Iter->second != NotIndexed) {
        error() << formatv("Name Index @ {0:x} references a CU @ {1:x}, but "
                          "this CU is already indexed by Name Index @ {2:x}\n",
                          NI.getUnitOffset(), Offset, Iter->second);
        continue;
      }
      Iter->second = NI.getUnitOffset();
    }
  }

  for (const auto &KV : CUMap) {
    if (KV.second == NotIndexed)
      warn() << formatv("CU @ {0:x} not covered by any Name Index\n", KV.first);
  }

  return NumErrors;
}
Пример #22
0
bool FuncletLayout::runOnMachineFunction(MachineFunction &F) {
  DenseMap<const MachineBasicBlock *, int> FuncletMembership =
      getFuncletMembership(F);
  if (FuncletMembership.empty())
    return false;

  F.sort([&](MachineBasicBlock &X, MachineBasicBlock &Y) {
    auto FuncletX = FuncletMembership.find(&X);
    auto FuncletY = FuncletMembership.find(&Y);
    assert(FuncletX != FuncletMembership.end());
    assert(FuncletY != FuncletMembership.end());
    return FuncletX->second < FuncletY->second;
  });

  // Conservatively assume we changed something.
  return true;
}
Пример #23
0
bool JumpInstrTables::runOnModule(Module &M) {
  JITI = &getAnalysis<JumpInstrTableInfo>();

  // Get the set of jumptable-annotated functions.
  DenseMap<Function *, Function *> Functions;
  for (Function &F : M) {
    if (F.hasFnAttribute(Attribute::JumpTable)) {
      assert(F.hasUnnamedAddr() &&
             "Attribute 'jumptable' requires 'unnamed_addr'");
      Functions[&F] = nullptr;
    }
  }

  // Create the jump-table functions.
  for (auto &KV : Functions) {
    Function *F = KV.first;
    KV.second = insertEntry(M, F);
  }

  // GlobalAlias is a special case, because the target of an alias statement
  // must be a defined function. So, instead of replacing a given function in
  // the alias, we replace all uses of aliases that target jumptable functions.
  // Note that there's no need to create these functions, since only aliases
  // that target known jumptable functions are replaced, and there's no way to
  // put the jumptable annotation on a global alias.
  DenseMap<GlobalAlias *, Function *> Aliases;
  for (GlobalAlias &GA : M.aliases()) {
    Constant *Aliasee = GA.getAliasee();
    if (Function *F = dyn_cast<Function>(Aliasee)) {
      auto it = Functions.find(F);
      if (it != Functions.end()) {
        Aliases[&GA] = it->second;
      }
    }
  }

  // Replace each address taken function with its jump-instruction table entry.
  for (auto &KV : Functions)
    replaceValueWithFunction(KV.first, KV.second);

  for (auto &KV : Aliases)
    replaceValueWithFunction(KV.first, KV.second);

  return !Functions.empty();
}
Пример #24
0
void LineNumberAnnotatedWriter::emitFunctionAnnot(
      const Function *F, formatted_raw_ostream &Out)
{
    InstrLoc = nullptr;
    DISubprogram *FuncLoc = F->getSubprogram();
    if (!FuncLoc) {
        auto SP = Subprogram.find(F);
        if (SP != Subprogram.end())
            FuncLoc = SP->second;
    }
    if (!FuncLoc)
        return;
    std::vector<DILineInfo> DIvec(1);
    DILineInfo &DI = DIvec.back();
    DI.FunctionName = FuncLoc->getName();
    DI.FileName = FuncLoc->getFilename();
    DI.Line = FuncLoc->getLine();
    LinePrinter.emit_lineinfo(Out, DIvec);
}
Пример #25
0
/*
 *  When phis are created, only the sigma and phi operands are inserted into them. Thus, we need to insert V, for which sigmas and phis were created, as incoming value of all
 *  incoming edges that still haven't an operand associated for them
 */
void vSSA::populatePhis(SmallVector<PHINode*, 25> &vssaphi_created, Value *V)
{
	// If any vSSA_PHI was created, iterate over the predecessors of vSSA_PHIs to insert V as an operand from the branches where sigma was not created
	for (SmallVectorImpl<PHINode*>::iterator vit = vssaphi_created.begin(), vend = vssaphi_created.end(); vit != vend; ++vit) {
		PHINode *vssaphi = *vit;
		BasicBlock *BB_parent = vssaphi->getParent();
		
		DenseMap<BasicBlock*, unsigned> howManyTimesIsPred;
		
		// Get how many times each basicblock is predecessor of BB_parent
		for (pred_iterator PI = pred_begin(BB_parent), PE = pred_end(BB_parent); PI != PE; ++PI) {
			BasicBlock *predBB = *PI;
			
			DenseMap<BasicBlock*, unsigned>::iterator mit = howManyTimesIsPred.find(predBB);
			
			if (mit == howManyTimesIsPred.end()) {
				howManyTimesIsPred.insert(std::make_pair(predBB, 1));
			}
			else {
				++mit->second;
			}
		}
		
		unsigned i, e;
		
		// If a predecessor already has incoming values in the vSSA_phi, we don't count them
		for (i = 0, e = vssaphi->getNumIncomingValues(); i < e; ++i) {
			--howManyTimesIsPred[vssaphi->getIncomingBlock(i)];
		}
		
		// Finally, add V as incoming value of predBB as many as necessary
		for (DenseMap<BasicBlock*, unsigned>::iterator mit = howManyTimesIsPred.begin(), mend = howManyTimesIsPred.end(); mit != mend; ++mit) {
			unsigned count;
			BasicBlock *predBB = mit->first;
			
			for (count = mit->second; count > 0; --count) {
				vssaphi->addIncoming(V, predBB);
			}
		}
		
		howManyTimesIsPred.clear();
	}
}
Пример #26
0
/// Calculate the additional register pressure that the registers used in MI
/// cause.
///
/// If 'ConsiderSeen' is true, updates 'RegSeen' and uses the information to
/// figure out which usages are live-ins.
/// FIXME: Figure out a way to consider 'RegSeen' from all code paths.
DenseMap<unsigned, int>
MachineLICM::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
                              bool ConsiderUnseenAsDef) {
  DenseMap<unsigned, int> Cost;
  if (MI->isImplicitDef())
    return Cost;
  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg() || MO.isImplicit())
      continue;
    unsigned Reg = MO.getReg();
    if (!TargetRegisterInfo::isVirtualRegister(Reg))
      continue;

    // FIXME: It seems bad to use RegSeen only for some of these calculations.
    bool isNew = ConsiderSeen ? RegSeen.insert(Reg).second : false;
    const TargetRegisterClass *RC = MRI->getRegClass(Reg);

    RegClassWeight W = TRI->getRegClassWeight(RC);
    int RCCost = 0;
    if (MO.isDef())
      RCCost = W.RegWeight;
    else {
      bool isKill = isOperandKill(MO, MRI);
      if (isNew && !isKill && ConsiderUnseenAsDef)
        // Haven't seen this, it must be a livein.
        RCCost = W.RegWeight;
      else if (!isNew && isKill)
        RCCost = -W.RegWeight;
    }
    if (RCCost == 0)
      continue;
    const int *PS = TRI->getRegClassPressureSets(RC);
    for (; *PS != -1; ++PS) {
      if (Cost.find(*PS) == Cost.end())
        Cost[*PS] = RCCost;
      else
        Cost[*PS] += RCCost;
    }
  }
  return Cost;
}
Пример #27
0
/// getVR - Return the virtual register corresponding to the specified result
/// of the specified node.
unsigned ScheduleDAGSDNodes::getVR(SDValue Op,
                                   DenseMap<SDValue, unsigned> &VRBaseMap) {
  if (Op.isMachineOpcode() &&
      Op.getMachineOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
    // Add an IMPLICIT_DEF instruction before every use.
    unsigned VReg = getDstOfOnlyCopyToRegUse(Op.getNode(), Op.getResNo());
    // IMPLICIT_DEF can produce any type of result so its TargetInstrDesc
    // does not include operand register class info.
    if (!VReg) {
      const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
      VReg = MRI.createVirtualRegister(RC);
    }
    BuildMI(BB, Op.getDebugLoc(), TII->get(TargetInstrInfo::IMPLICIT_DEF),VReg);
    return VReg;
  }

  DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op);
  assert(I != VRBaseMap.end() && "Node emitted out of order - late");
  return I->second;
}
Пример #28
0
// bypassSlowDivision - This optimization identifies DIV instructions that can
// be profitably bypassed and carried out with a shorter, faster divide.
bool llvm::bypassSlowDivision(Function &F,
                              Function::iterator &I,
                              const DenseMap<unsigned int, unsigned int> &BypassWidths) {
  DivCacheTy DivCache;

  bool MadeChange = false;
  for (BasicBlock::iterator J = I->begin(); J != I->end(); J++) {

    // Get instruction details
    unsigned Opcode = J->getOpcode();
    bool UseDivOp = Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
    bool UseRemOp = Opcode == Instruction::SRem || Opcode == Instruction::URem;
    bool UseSignedOp = Opcode == Instruction::SDiv ||
                       Opcode == Instruction::SRem;

    // Only optimize div or rem ops
    if (!UseDivOp && !UseRemOp)
      continue;

    // Skip division on vector types, only optimize integer instructions
    if (!J->getType()->isIntegerTy())
      continue;

    // Get bitwidth of div/rem instruction
    IntegerType *T = cast<IntegerType>(J->getType());
    unsigned int bitwidth = T->getBitWidth();

    // Continue if bitwidth is not bypassed
    DenseMap<unsigned int, unsigned int>::const_iterator BI = BypassWidths.find(bitwidth);
    if (BI == BypassWidths.end())
      continue;

    // Get type for div/rem instruction with bypass bitwidth
    IntegerType *BT = IntegerType::get(J->getContext(), BI->second);

    MadeChange |= reuseOrInsertFastDiv(F, I, J, BT, UseDivOp,
                                       UseSignedOp, DivCache);
  }

  return MadeChange;
}
Пример #29
0
/// foldImmediate - Try folding register operands that are defined by move
/// immediate instructions, i.e. a trivial constant folding optimization, if
/// and only if the def and use are in the same BB.
bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
                                      SmallSet<unsigned, 4> &ImmDefRegs,
                                 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
    MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg() || MO.isDef())
      continue;
    unsigned Reg = MO.getReg();
    if (!TargetRegisterInfo::isVirtualRegister(Reg))
      continue;
    if (ImmDefRegs.count(Reg) == 0)
      continue;
    DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
    assert(II != ImmDefMIs.end());
    if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
      ++NumImmFold;
      return true;
    }
  }
  return false;
}
Пример #30
0
// Calculate the intersection of all the FinalStates for a BasicBlock's
// predecessors.
static int getPredState(DenseMap<BasicBlock *, int> &FinalStates, Function &F,
                        int ParentBaseState, BasicBlock *BB) {
  // The entry block has no predecessors but we know that the prologue always
  // sets us up with a fixed state.
  if (&F.getEntryBlock() == BB)
    return ParentBaseState;

  // This is an EH Pad, conservatively report this basic block as overdefined.
  if (BB->isEHPad())
    return OverdefinedState;

  int CommonState = OverdefinedState;
  for (BasicBlock *PredBB : predecessors(BB)) {
    // We didn't manage to get a state for one of these predecessors,
    // conservatively report this basic block as overdefined.
    auto PredEndState = FinalStates.find(PredBB);
    if (PredEndState == FinalStates.end())
      return OverdefinedState;

    // This code is reachable via exceptional control flow,
    // conservatively report this basic block as overdefined.
    if (isa<CatchReturnInst>(PredBB->getTerminator()))
      return OverdefinedState;

    int PredState = PredEndState->second;
    assert(PredState != OverdefinedState &&
           "overdefined BBs shouldn't be in FinalStates");
    if (CommonState == OverdefinedState)
      CommonState = PredState;

    // At least two predecessors have different FinalStates,
    // conservatively report this basic block as overdefined.
    if (CommonState != PredState)
      return OverdefinedState;
  }

  return CommonState;
}