Beispiel #1
0
void Lint::visitEHBeginCatch(IntrinsicInst *II) {
  // The checks in this function make a potentially dubious assumption about
  // the CFG, namely that any block involved in a catch is only used for the
  // catch.  This will very likely be true of IR generated by a front end,
  // but it may cease to be true, for example, if the IR is run through a
  // pass which combines similar blocks.
  //
  // In general, if we encounter a block the isn't dominated by the catch
  // block while we are searching the catch block's successors for a call
  // to end catch intrinsic, then it is possible that it will be legal for
  // a path through this block to never reach a call to llvm.eh.endcatch.
  // An analogous statement could be made about our search for a landing
  // pad among the catch block's predecessors.
  //
  // What is actually required is that no path is possible at runtime that
  // reaches a call to llvm.eh.begincatch without having previously visited
  // a landingpad instruction and that no path is possible at runtime that
  // calls llvm.eh.begincatch and does not subsequently call llvm.eh.endcatch
  // (mentally adjusting for the fact that in reality these calls will be
  // removed before code generation).
  //
  // Because this is a lint check, we take a pessimistic approach and warn if
  // the control flow is potentially incorrect.

  SmallSet<BasicBlock *, 4> VisitedBlocks;
  BasicBlock *CatchBB = II->getParent();

  // The begin catch must occur in a landing pad block or all paths
  // to it must have come from a landing pad.
  Assert(allPredsCameFromLandingPad(CatchBB, VisitedBlocks),
         "llvm.eh.begincatch may be reachable without passing a landingpad",
         II);

  // Reset the visited block list.
  VisitedBlocks.clear();

  IntrinsicInst *SecondBeginCatch = nullptr;

  // This has to be called before it is asserted.  Otherwise, the first assert
  // below can never be hit.
  bool EndCatchFound = allSuccessorsReachEndCatch(
      CatchBB, std::next(static_cast<BasicBlock::iterator>(II)),
      &SecondBeginCatch, VisitedBlocks);
  Assert(
      SecondBeginCatch == nullptr,
      "llvm.eh.begincatch may be called a second time before llvm.eh.endcatch",
      II, SecondBeginCatch);
  Assert(EndCatchFound,
         "Some paths from llvm.eh.begincatch may not reach llvm.eh.endcatch",
         II);
}
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
  if (DisablePeephole)
    return false;

  TM  = &MF.getTarget();
  TII = TM->getInstrInfo();
  MRI = &MF.getRegInfo();
  DT  = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;

  bool Changed = false;

  SmallPtrSet<MachineInstr*, 8> LocalMIs;
  SmallSet<unsigned, 4> ImmDefRegs;
  DenseMap<unsigned, MachineInstr*> ImmDefMIs;
  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
    MachineBasicBlock *MBB = &*I;

    bool SeenMoveImm = false;
    LocalMIs.clear();
    ImmDefRegs.clear();
    ImmDefMIs.clear();

    bool First = true;
    MachineBasicBlock::iterator PMII;
    for (MachineBasicBlock::iterator
           MII = I->begin(), MIE = I->end(); MII != MIE; ) {
      MachineInstr *MI = &*MII;
      LocalMIs.insert(MI);

      if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
          MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
          MI->hasUnmodeledSideEffects()) {
        ++MII;
        continue;
      }

      if (MI->isBitcast()) {
        if (optimizeBitcastInstr(MI, MBB)) {
          // MI is deleted.
          LocalMIs.erase(MI);
          Changed = true;
          MII = First ? I->begin() : llvm::next(PMII);
          continue;
        }
      } else if (MI->isCompare()) {
        if (optimizeCmpInstr(MI, MBB)) {
          // MI is deleted.
          LocalMIs.erase(MI);
          Changed = true;
          MII = First ? I->begin() : llvm::next(PMII);
          continue;
        }
      }

      if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
        SeenMoveImm = true;
      } else {
        Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
        if (SeenMoveImm)
          Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
      }

      First = false;
      PMII = MII;
      ++MII;
    }
  }

  return Changed;
}
Beispiel #3
0
bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
  bool Modified = false;

  SmallSet<unsigned, 4> Defs;
  SmallSet<unsigned, 4> Uses;
  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
  while (MBBI != E) {
    MachineInstr *MI = &*MBBI;
    DebugLoc dl = MI->getDebugLoc();
    unsigned PredReg = 0;
    ARMCC::CondCodes CC = llvm::getITInstrPredicate(MI, PredReg);
    if (CC == ARMCC::AL) {
      ++MBBI;
      continue;
    }

    Defs.clear();
    Uses.clear();
    TrackDefUses(MI, Defs, Uses, TRI);

    // Insert an IT instruction.
    MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT))
      .addImm(CC);

    // Add implicit use of ITSTATE to IT block instructions.
    MI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
                                             true/*isImp*/, false/*isKill*/));

    MachineInstr *LastITMI = MI;
    MachineBasicBlock::iterator InsertPos = MIB;
    ++MBBI;

    // Form IT block.
    ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
    unsigned Mask = 0, Pos = 3;
    // Branches, including tricky ones like LDM_RET, need to end an IT
    // block so check the instruction we just put in the block.
    for (; MBBI != E && Pos &&
           (!MI->isBranch() && !MI->isReturn()) ; ++MBBI) {
      if (MBBI->isDebugValue())
        continue;

      MachineInstr *NMI = &*MBBI;
      MI = NMI;

      unsigned NPredReg = 0;
      ARMCC::CondCodes NCC = llvm::getITInstrPredicate(NMI, NPredReg);
      if (NCC == CC || NCC == OCC) {
        Mask |= (NCC & 1) << Pos;
        // Add implicit use of ITSTATE.
        NMI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
                                               true/*isImp*/, false/*isKill*/));
        LastITMI = NMI;
      } else {
        if (NCC == ARMCC::AL &&
            MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) {
          --MBBI;
          MBB.remove(NMI);
          MBB.insert(InsertPos, NMI);
          ++NumMovedInsts;
          continue;
        }
        break;
      }
      TrackDefUses(NMI, Defs, Uses, TRI);
      --Pos;
    }

    // Finalize IT mask.
    Mask |= (1 << Pos);
    // Tag along (firstcond[0] << 4) with the mask.
    Mask |= (CC & 1) << 4;
    MIB.addImm(Mask);

    // Last instruction in IT block kills ITSTATE.
    LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill();

    Modified = true;
    ++NumITs;
  }

  return Modified;
}
bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) {

  DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
               << "********** Function: "
               << fn.getFunction()->getName() << '\n');

  LiveIntervals *lis = &getAnalysis<LiveIntervals>();
  MachineLoopInfo *loopInfo = &getAnalysis<MachineLoopInfo>();
  const TargetInstrInfo *tii = fn.getTarget().getInstrInfo();
  MachineRegisterInfo *mri = &fn.getRegInfo();

  SmallSet<unsigned, 4> processed;
  for (MachineFunction::iterator mbbi = fn.begin(), mbbe = fn.end();
       mbbi != mbbe; ++mbbi) {
    MachineBasicBlock* mbb = mbbi;
    SlotIndex mbbEnd = lis->getMBBEndIdx(mbb);
    MachineLoop* loop = loopInfo->getLoopFor(mbb);
    unsigned loopDepth = loop ? loop->getLoopDepth() : 0;
    bool isExiting = loop ? loop->isLoopExiting(mbb) : false;

    for (MachineBasicBlock::const_iterator mii = mbb->begin(), mie = mbb->end();
         mii != mie; ++mii) {
      const MachineInstr *mi = mii;
      if (tii->isIdentityCopy(*mi) || mi->isImplicitDef() || mi->isDebugValue())
        continue;

      for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
        const MachineOperand &mopi = mi->getOperand(i);
        if (!mopi.isReg() || mopi.getReg() == 0)
          continue;
        unsigned reg = mopi.getReg();
        if (!TargetRegisterInfo::isVirtualRegister(mopi.getReg()))
          continue;
        // Multiple uses of reg by the same instruction. It should not
        // contribute to spill weight again.
        if (!processed.insert(reg))
          continue;

        bool hasDef = mopi.isDef();
        bool hasUse = !hasDef;
        for (unsigned j = i+1; j != e; ++j) {
          const MachineOperand &mopj = mi->getOperand(j);
          if (!mopj.isReg() || mopj.getReg() != reg)
            continue;
          hasDef |= mopj.isDef();
          hasUse |= mopj.isUse();
          if (hasDef && hasUse)
            break;
        }

        LiveInterval &regInt = lis->getInterval(reg);
        float weight = lis->getSpillWeight(hasDef, hasUse, loopDepth);
        if (hasDef && isExiting) {
          // Looks like this is a loop count variable update.
          SlotIndex defIdx = lis->getInstructionIndex(mi).getDefIndex();
          const LiveRange *dlr =
            lis->getInterval(reg).getLiveRangeContaining(defIdx);
          if (dlr->end >= mbbEnd)
            weight *= 3.0F;
        }
        regInt.weight += weight;
      }
      processed.clear();
    }
  }

  for (LiveIntervals::iterator I = lis->begin(), E = lis->end(); I != E; ++I) {
    LiveInterval &li = *I->second;
    if (TargetRegisterInfo::isVirtualRegister(li.reg)) {
      // If the live interval length is essentially zero, i.e. in every live
      // range the use follows def immediately, it doesn't make sense to spill
      // it and hope it will be easier to allocate for this li.
      if (isZeroLengthInterval(&li)) {
        li.weight = HUGE_VALF;
        continue;
      }

      bool isLoad = false;
      SmallVector<LiveInterval*, 4> spillIs;
      if (lis->isReMaterializable(li, spillIs, isLoad)) {
        // If all of the definitions of the interval are re-materializable,
        // it is a preferred candidate for spilling. If non of the defs are
        // loads, then it's potentially very cheap to re-materialize.
        // FIXME: this gets much more complicated once we support non-trivial
        // re-materialization.
        if (isLoad)
          li.weight *= 0.9F;
        else
          li.weight *= 0.5F;
      }

      // Slightly prefer live interval that has been assigned a preferred reg.
      std::pair<unsigned, unsigned> Hint = mri->getRegAllocationHint(li.reg);
      if (Hint.first || Hint.second)
        li.weight *= 1.01F;

      lis->normalizeSpillWeight(li);
    }
  }
  
  return false;
}
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
  if (skipOptnoneFunction(*MF.getFunction()))
    return false;

  DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
  DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');

  if (DisablePeephole)
    return false;

  TM  = &MF.getTarget();
  TII = TM->getInstrInfo();
  MRI = &MF.getRegInfo();
  DT  = Aggressive ? &getAnalysis<MachineDominatorTree>() : nullptr;

  bool Changed = false;

  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
    MachineBasicBlock *MBB = &*I;

    bool SeenMoveImm = false;
    SmallPtrSet<MachineInstr*, 8> LocalMIs;
    SmallSet<unsigned, 4> ImmDefRegs;
    DenseMap<unsigned, MachineInstr*> ImmDefMIs;
    SmallSet<unsigned, 16> FoldAsLoadDefCandidates;

    for (MachineBasicBlock::iterator
           MII = I->begin(), MIE = I->end(); MII != MIE; ) {
      MachineInstr *MI = &*MII;
      // We may be erasing MI below, increment MII now.
      ++MII;
      LocalMIs.insert(MI);

      // Skip debug values. They should not affect this peephole optimization.
      if (MI->isDebugValue())
          continue;

      // If there exists an instruction which belongs to the following
      // categories, we will discard the load candidates.
      if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() ||
          MI->isKill() || MI->isInlineAsm() ||
          MI->hasUnmodeledSideEffects()) {
        FoldAsLoadDefCandidates.clear();
        continue;
      }
      if (MI->mayStore() || MI->isCall())
        FoldAsLoadDefCandidates.clear();

      if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) ||
          (MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
          (MI->isSelect() && optimizeSelect(MI))) {
        // MI is deleted.
        LocalMIs.erase(MI);
        Changed = true;
        continue;
      }

      if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
        SeenMoveImm = true;
      } else {
        Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
        // optimizeExtInstr might have created new instructions after MI
        // and before the already incremented MII. Adjust MII so that the
        // next iteration sees the new instructions.
        MII = MI;
        ++MII;
        if (SeenMoveImm)
          Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
      }

      // Check whether MI is a load candidate for folding into a later
      // instruction. If MI is not a candidate, check whether we can fold an
      // earlier load into MI.
      if (!isLoadFoldable(MI, FoldAsLoadDefCandidates) &&
          !FoldAsLoadDefCandidates.empty()) {
        const MCInstrDesc &MIDesc = MI->getDesc();
        for (unsigned i = MIDesc.getNumDefs(); i != MIDesc.getNumOperands();
             ++i) {
          const MachineOperand &MOp = MI->getOperand(i);
          if (!MOp.isReg())
            continue;
          unsigned FoldAsLoadDefReg = MOp.getReg();
          if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
            // We need to fold load after optimizeCmpInstr, since
            // optimizeCmpInstr can enable folding by converting SUB to CMP.
            // Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and
            // we need it for markUsesInDebugValueAsUndef().
            unsigned FoldedReg = FoldAsLoadDefReg;
            MachineInstr *DefMI = nullptr;
            MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
                                                          FoldAsLoadDefReg,
                                                          DefMI);
            if (FoldMI) {
              // Update LocalMIs since we replaced MI with FoldMI and deleted
              // DefMI.
              DEBUG(dbgs() << "Replacing: " << *MI);
              DEBUG(dbgs() << "     With: " << *FoldMI);
              LocalMIs.erase(MI);
              LocalMIs.erase(DefMI);
              LocalMIs.insert(FoldMI);
              MI->eraseFromParent();
              DefMI->eraseFromParent();
              MRI->markUsesInDebugValueAsUndef(FoldedReg);
              FoldAsLoadDefCandidates.erase(FoldedReg);
              ++NumLoadFold;
              // MI is replaced with FoldMI.
              Changed = true;
              break;
            }
          }
        }
      }
    }
  }

  return Changed;
}
Beispiel #6
0
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
    if (DisablePeephole)
        return false;

    TM  = &MF.getTarget();
    TII = TM->getInstrInfo();
    MRI = &MF.getRegInfo();
    DT  = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;

    bool Changed = false;

    SmallPtrSet<MachineInstr*, 8> LocalMIs;
    SmallSet<unsigned, 4> ImmDefRegs;
    DenseMap<unsigned, MachineInstr*> ImmDefMIs;
    unsigned FoldAsLoadDefReg;
    for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
        MachineBasicBlock *MBB = &*I;

        bool SeenMoveImm = false;
        LocalMIs.clear();
        ImmDefRegs.clear();
        ImmDefMIs.clear();
        FoldAsLoadDefReg = 0;

        for (MachineBasicBlock::iterator
                MII = I->begin(), MIE = I->end(); MII != MIE; ) {
            MachineInstr *MI = &*MII;
            // We may be erasing MI below, increment MII now.
            ++MII;
            LocalMIs.insert(MI);

            // If there exists an instruction which belongs to the following
            // categories, we will discard the load candidate.
            if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
                    MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
                    MI->hasUnmodeledSideEffects()) {
                FoldAsLoadDefReg = 0;
                continue;
            }
            if (MI->mayStore() || MI->isCall())
                FoldAsLoadDefReg = 0;

            if ((MI->isBitcast() && optimizeBitcastInstr(MI, MBB)) ||
                    (MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
                    (MI->isSelect() && optimizeSelect(MI))) {
                // MI is deleted.
                LocalMIs.erase(MI);
                Changed = true;
                continue;
            }

            if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
                SeenMoveImm = true;
            } else {
                Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
                // optimizeExtInstr might have created new instructions after MI
                // and before the already incremented MII. Adjust MII so that the
                // next iteration sees the new instructions.
                MII = MI;
                ++MII;
                if (SeenMoveImm)
                    Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
            }

            // Check whether MI is a load candidate for folding into a later
            // instruction. If MI is not a candidate, check whether we can fold an
            // earlier load into MI.
            if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) {
                // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr
                // can enable folding by converting SUB to CMP.
                MachineInstr *DefMI = 0;
                MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
                                       FoldAsLoadDefReg, DefMI);
                if (FoldMI) {
                    // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI.
                    LocalMIs.erase(MI);
                    LocalMIs.erase(DefMI);
                    LocalMIs.insert(FoldMI);
                    MI->eraseFromParent();
                    DefMI->eraseFromParent();
                    ++NumLoadFold;

                    // MI is replaced with FoldMI.
                    Changed = true;
                    continue;
                }
            }
        }
    }

    return Changed;
}
bool AddressSanitizer::handleFunction(Module &M, Function &F) {
  if (BL->isIn(F)) return false;
  if (&F == AsanCtorFunction) return false;

  // If needed, insert __asan_init before checking for AddressSafety attr.
  maybeInsertAsanInitAtFunctionEntry(F);

  if (!F.hasFnAttr(Attribute::AddressSafety)) return false;

  if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
    return false;
  // We want to instrument every address only once per basic block
  // (unless there are calls between uses).
  SmallSet<Value*, 16> TempsToInstrument;
  SmallVector<Instruction*, 16> ToInstrument;
  SmallVector<Instruction*, 8> NoReturnCalls;
  bool IsWrite;

  // Fill the set of memory operations to instrument.
  for (Function::iterator FI = F.begin(), FE = F.end();
       FI != FE; ++FI) {
    TempsToInstrument.clear();
    int NumInsnsPerBB = 0;
    for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
         BI != BE; ++BI) {
      if (LooksLikeCodeInBug11395(BI)) return false;
      if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite)) {
        if (ClOpt && ClOptSameTemp) {
          if (!TempsToInstrument.insert(Addr))
            continue;  // We've seen this temp in the current BB.
        }
      } else if (isa<MemIntrinsic>(BI) && ClMemIntrin) {
        // ok, take it.
      } else {
        if (CallInst *CI = dyn_cast<CallInst>(BI)) {
          // A call inside BB.
          TempsToInstrument.clear();
          if (CI->doesNotReturn()) {
            NoReturnCalls.push_back(CI);
          }
        }
        continue;
      }
      ToInstrument.push_back(BI);
      NumInsnsPerBB++;
      if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
        break;
    }
  }

  AsanFunctionContext AFC(F);

  // Instrument.
  int NumInstrumented = 0;
  for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
    Instruction *Inst = ToInstrument[i];
    if (ClDebugMin < 0 || ClDebugMax < 0 ||
        (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
      if (isInterestingMemoryAccess(Inst, &IsWrite))
        instrumentMop(AFC, Inst);
      else
        instrumentMemIntrinsic(AFC, cast<MemIntrinsic>(Inst));
    }
    NumInstrumented++;
  }

  DEBUG(dbgs() << F);

  bool ChangedStack = poisonStackInFunction(M, F);

  // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
  // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
  for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) {
    Instruction *CI = NoReturnCalls[i];
    IRBuilder<> IRB(CI);
    IRB.CreateCall(M.getOrInsertFunction(kAsanHandleNoReturnName,
                                         IRB.getVoidTy(), NULL));
  }

  return NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
}
Beispiel #8
0
/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
/// there is one implicit_def for each use. Add isUndef marker to
/// implicit_def defs and their uses.
bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {

  DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
               << "********** Function: "
               << ((Value*)fn.getFunction())->getName() << '\n');

  bool Changed = false;

  TII = fn.getTarget().getInstrInfo();
  TRI = fn.getTarget().getRegisterInfo();
  MRI = &fn.getRegInfo();
  LV = &getAnalysis<LiveVariables>();

  SmallSet<unsigned, 8> ImpDefRegs;
  SmallVector<MachineInstr*, 8> ImpDefMIs;
  SmallVector<MachineInstr*, 4> RUses;
  SmallPtrSet<MachineBasicBlock*,16> Visited;
  SmallPtrSet<MachineInstr*, 8> ModInsts;

  MachineBasicBlock *Entry = fn.begin();
  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
       DFI != E; ++DFI) {
    MachineBasicBlock *MBB = *DFI;
    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
         I != E; ) {
      MachineInstr *MI = &*I;
      ++I;
      if (MI->isImplicitDef()) {
        ImpDefMIs.push_back(MI);
        // Is this a sub-register read-modify-write?
        if (MI->getOperand(0).readsReg())
          continue;
        unsigned Reg = MI->getOperand(0).getReg();
        ImpDefRegs.insert(Reg);
        if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
          for (const unsigned *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
            ImpDefRegs.insert(*SS);
        }
        continue;
      }

      // Eliminate %reg1032:sub<def> = COPY undef.
      if (MI->isCopy() && MI->getOperand(0).readsReg()) {
        MachineOperand &MO = MI->getOperand(1);
        if (MO.isUndef() || ImpDefRegs.count(MO.getReg())) {
          if (MO.isKill()) {
            LiveVariables::VarInfo& vi = LV->getVarInfo(MO.getReg());
            vi.removeKill(MI);
          }
          unsigned Reg = MI->getOperand(0).getReg();
          MI->eraseFromParent();
          Changed = true;

          // A REG_SEQUENCE may have been expanded into partial definitions.
          // If this was the last one, mark Reg as implicitly defined.
          if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI->def_empty(Reg))
            ImpDefRegs.insert(Reg);
          continue;
        }
      }

      bool ChangedToImpDef = false;
      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
        MachineOperand& MO = MI->getOperand(i);
        if (!MO.isReg() || !MO.readsReg())
          continue;
        unsigned Reg = MO.getReg();
        if (!Reg)
          continue;
        if (!ImpDefRegs.count(Reg))
          continue;
        // Use is a copy, just turn it into an implicit_def.
        if (CanTurnIntoImplicitDef(MI, Reg, i, ImpDefRegs)) {
          bool isKill = MO.isKill();
          MI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
          for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
            MI->RemoveOperand(j);
          if (isKill) {
            ImpDefRegs.erase(Reg);
            LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
            vi.removeKill(MI);
          }
          ChangedToImpDef = true;
          Changed = true;
          break;
        }

        Changed = true;
        MO.setIsUndef();
        // This is a partial register redef of an implicit def.
        // Make sure the whole register is defined by the instruction.
        if (MO.isDef()) {
          MI->addRegisterDefined(Reg);
          continue;
        }
        if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
          // Make sure other reads of Reg are also marked <undef>.
          for (unsigned j = i+1; j != e; ++j) {
            MachineOperand &MOJ = MI->getOperand(j);
            if (MOJ.isReg() && MOJ.getReg() == Reg && MOJ.readsReg())
              MOJ.setIsUndef();
          }
          ImpDefRegs.erase(Reg);
        }
      }

      if (ChangedToImpDef) {
        // Backtrack to process this new implicit_def.
        --I;
      } else {
        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
          MachineOperand& MO = MI->getOperand(i);
          if (!MO.isReg() || !MO.isDef())
            continue;
          ImpDefRegs.erase(MO.getReg());
        }
      }
    }

    // Any outstanding liveout implicit_def's?
    for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
      MachineInstr *MI = ImpDefMIs[i];
      unsigned Reg = MI->getOperand(0).getReg();
      if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
          !ImpDefRegs.count(Reg)) {
        // Delete all "local" implicit_def's. That include those which define
        // physical registers since they cannot be liveout.
        MI->eraseFromParent();
        Changed = true;
        continue;
      }

      // If there are multiple defs of the same register and at least one
      // is not an implicit_def, do not insert implicit_def's before the
      // uses.
      bool Skip = false;
      SmallVector<MachineInstr*, 4> DeadImpDefs;
      for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg),
             DE = MRI->def_end(); DI != DE; ++DI) {
        MachineInstr *DeadImpDef = &*DI;
        if (!DeadImpDef->isImplicitDef()) {
          Skip = true;
          break;
        }
        DeadImpDefs.push_back(DeadImpDef);
      }
      if (Skip)
        continue;

      // The only implicit_def which we want to keep are those that are live
      // out of its block.
      for (unsigned j = 0, ee = DeadImpDefs.size(); j != ee; ++j)
        DeadImpDefs[j]->eraseFromParent();
      Changed = true;

      // Process each use instruction once.
      for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
             UE = MRI->use_end(); UI != UE; ++UI) {
        if (UI.getOperand().isUndef())
          continue;
        MachineInstr *RMI = &*UI;
        if (ModInsts.insert(RMI))
          RUses.push_back(RMI);
      }

      for (unsigned i = 0, e = RUses.size(); i != e; ++i) {
        MachineInstr *RMI = RUses[i];

        // Turn a copy use into an implicit_def.
        if (isUndefCopy(RMI, Reg, ImpDefRegs)) {
          RMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));

          bool isKill = false;
          SmallVector<unsigned, 4> Ops;
          for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
            MachineOperand &RRMO = RMI->getOperand(j);
            if (RRMO.isReg() && RRMO.getReg() == Reg) {
              Ops.push_back(j);
              if (RRMO.isKill())
                isKill = true;
            }
          }
          // Leave the other operands along.
          for (unsigned j = 0, ee = Ops.size(); j != ee; ++j) {
            unsigned OpIdx = Ops[j];
            RMI->RemoveOperand(OpIdx-j);
          }

          // Update LiveVariables varinfo if the instruction is a kill.
          if (isKill) {
            LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
            vi.removeKill(RMI);
          }
          continue;
        }

        // Replace Reg with a new vreg that's marked implicit.
        const TargetRegisterClass* RC = MRI->getRegClass(Reg);
        unsigned NewVReg = MRI->createVirtualRegister(RC);
        bool isKill = true;
        for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
          MachineOperand &RRMO = RMI->getOperand(j);
          if (RRMO.isReg() && RRMO.getReg() == Reg) {
            RRMO.setReg(NewVReg);
            RRMO.setIsUndef();
            if (isKill) {
              // Only the first operand of NewVReg is marked kill.
              RRMO.setIsKill();
              isKill = false;
            }
          }
        }
      }
      RUses.clear();
      ModInsts.clear();
    }
    ImpDefRegs.clear();
    ImpDefMIs.clear();
  }

  return Changed;
}
bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
  bool Modified = false;

  SmallSet<unsigned, 4> Defs;
  SmallSet<unsigned, 4> Uses;
  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
  while (MBBI != E) {
    MachineInstr *MI = &*MBBI;
    DebugLoc dl = MI->getDebugLoc();
    unsigned PredReg = 0;
    ARMCC::CondCodes CC = getPredicate(MI, PredReg);
    if (CC == ARMCC::AL) {
      ++MBBI;
      continue;
    }

    Defs.clear();
    Uses.clear();
    TrackDefUses(MI, Defs, Uses);

    // Insert an IT instruction.
    MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT))
      .addImm(CC);
    MachineBasicBlock::iterator InsertPos = MIB;
    ++MBBI;

    // Finalize IT mask.
    ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
    unsigned Mask = 0, Pos = 3;
    // Branches, including tricky ones like LDM_RET, need to end an IT
    // block so check the instruction we just put in the block.
    for (; MBBI != E && Pos &&
           (!MI->getDesc().isBranch() && !MI->getDesc().isReturn()) ; ++MBBI) {
      if (MBBI->isDebugValue())
        continue;

      MachineInstr *NMI = &*MBBI;
      MI = NMI;

      unsigned NPredReg = 0;
      ARMCC::CondCodes NCC = getPredicate(NMI, NPredReg);
      if (NCC == CC || NCC == OCC)
        Mask |= (NCC & 1) << Pos;
      else {
        unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
        if (NCC == ARMCC::AL &&
            TII->isMoveInstr(*NMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
          assert(SrcSubIdx == 0 && DstSubIdx == 0 &&
                 "Sub-register indices still around?");
          // llvm models select's as two-address instructions. That means a copy
          // is inserted before a t2MOVccr, etc. If the copy is scheduled in
          // between selects we would end up creating multiple IT blocks.
          if (!Uses.count(DstReg) && !Defs.count(SrcReg)) {
            --MBBI;
            MBB.remove(NMI);
            MBB.insert(InsertPos, NMI);
            ++NumMovedInsts;
            continue;
          }
        }
        break;
      }
      TrackDefUses(NMI, Defs, Uses);
      --Pos;
    }

    Mask |= (1 << Pos);
    // Tag along (firstcond[0] << 4) with the mask.
    Mask |= (CC & 1) << 4;
    MIB.addImm(Mask);
    Modified = true;
    ++NumITs;
  }

  return Modified;
}