示例#1
0
// Add newly allocated physical registers to the MBB live in sets.
void RegAllocBase::addMBBLiveIns(MachineFunction *MF) {
  NamedRegionTimer T("MBB Live Ins", TimerGroupName, TimePassesIsEnabled);
  SlotIndexes *Indexes = LIS->getSlotIndexes();
  if (MF->size() <= 1)
    return;

  LiveIntervalUnion::SegmentIter SI;
  for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
    LiveIntervalUnion &LiveUnion = PhysReg2LiveUnion[PhysReg];
    if (LiveUnion.empty())
      continue;
    MachineFunction::iterator MBB = llvm::next(MF->begin());
    MachineFunction::iterator MFE = MF->end();
    SlotIndex Start, Stop;
    tie(Start, Stop) = Indexes->getMBBRange(MBB);
    SI.setMap(LiveUnion.getMap());
    SI.find(Start);
    while (SI.valid()) {
      if (SI.start() <= Start) {
        if (!MBB->isLiveIn(PhysReg))
          MBB->addLiveIn(PhysReg);
      } else if (SI.start() > Stop)
        MBB = Indexes->getMBBFromIndex(SI.start().getPrevIndex());
      if (++MBB == MFE)
        break;
      tie(Start, Stop) = Indexes->getMBBRange(MBB);
      SI.advanceTo(Start);
    }
  }
}
static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
                                 ShrinkToUsesWorkList &WorkList,
                                 const LiveRange &OldRange) {
  // Keep track of the PHIs that are in use.
  SmallPtrSet<VNInfo*, 8> UsedPHIs;
  // Blocks that have already been added to WorkList as live-out.
  SmallPtrSet<MachineBasicBlock*, 16> LiveOut;

  // Extend intervals to reach all uses in WorkList.
  while (!WorkList.empty()) {
    SlotIndex Idx = WorkList.back().first;
    VNInfo *VNI = WorkList.back().second;
    WorkList.pop_back();
    const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot());
    SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB);

    // Extend the live range for VNI to be live at Idx.
    if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) {
      assert(ExtVNI == VNI && "Unexpected existing value number");
      (void)ExtVNI;
      // Is this a PHIDef we haven't seen before?
      if (!VNI->isPHIDef() || VNI->def != BlockStart ||
          !UsedPHIs.insert(VNI).second)
        continue;
      // The PHI is live, make sure the predecessors are live-out.
      for (auto &Pred : MBB->predecessors()) {
        if (!LiveOut.insert(Pred).second)
          continue;
        SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
        // A predecessor is not required to have a live-out value for a PHI.
        if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
          WorkList.push_back(std::make_pair(Stop, PVNI));
      }
      continue;
    }

    // VNI is live-in to MBB.
    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
    LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));

    // Make sure VNI is live-out from the predecessors.
    for (auto &Pred : MBB->predecessors()) {
      if (!LiveOut.insert(Pred).second)
        continue;
      SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
      assert(OldRange.getVNInfoBefore(Stop) == VNI &&
             "Wrong value out of predecessor");
      WorkList.push_back(std::make_pair(Stop, VNI));
    }
  }
}
示例#3
0
static void createDeadDef(SlotIndexes &Indexes, VNInfo::Allocator &Alloc,
                          LiveRange &LR, const MachineOperand &MO) {
  const MachineInstr &MI = *MO.getParent();
  SlotIndex DefIdx =
      Indexes.getInstructionIndex(MI).getRegSlot(MO.isEarlyClobber());

  // Create the def in LR. This may find an existing def.
  LR.createDeadDef(DefIdx, Alloc);
}
示例#4
0
static void determineMissingVNIs(const SlotIndexes &Indexes, LiveInterval &LI) {
  SmallPtrSet<const MachineBasicBlock*, 5> Visited;

  LiveRange::iterator OutIt;
  VNInfo *PrevValNo = nullptr;
  for (LiveRange::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
    LiveRange::Segment &S = *I;
    // Determine final VNI if necessary.
    if (S.valno == nullptr) {
      // This can only happen at the begin of a basic block.
      assert(S.start.isBlock() && "valno should only be missing at block begin");

      Visited.clear();
      const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(S.start);
      for (const MachineBasicBlock *Pred : MBB->predecessors()) {
        VNInfo *VNI = searchForVNI(Indexes, LI, Pred, Visited);
        if (VNI != nullptr) {
          S.valno = VNI;
          break;
        }
      }
      assert(S.valno != nullptr && "could not determine valno");
    }
    // Merge with previous segment if it has the same VNI.
    if (PrevValNo == S.valno && OutIt->end == S.start) {
      OutIt->end = S.end;
    } else {
      // Didn't merge. Move OutIt to next segment.
      if (PrevValNo == nullptr)
        OutIt = LI.begin();
      else
        ++OutIt;

      if (OutIt != I)
        *OutIt = *I;
      PrevValNo = S.valno;
    }
  }
  // If we merged some segments chop off the end.
  ++OutIt;
  LI.segments.erase(OutIt, LI.end());
}
示例#5
0
bool LiveRangeCalc::isJointlyDominated(const MachineBasicBlock *MBB,
                                       ArrayRef<SlotIndex> Defs,
                                       const SlotIndexes &Indexes) {
  const MachineFunction &MF = *MBB->getParent();
  BitVector DefBlocks(MF.getNumBlockIDs());
  for (SlotIndex I : Defs)
    DefBlocks.set(Indexes.getMBBFromIndex(I)->getNumber());

  SetVector<unsigned> PredQueue;
  PredQueue.insert(MBB->getNumber());
  for (unsigned i = 0; i != PredQueue.size(); ++i) {
    unsigned BN = PredQueue[i];
    if (DefBlocks[BN])
      return true;
    const MachineBasicBlock *B = MF.getBlockNumbered(BN);
    for (const MachineBasicBlock *P : B->predecessors())
      PredQueue.insert(P->getNumber());
  }
  return false;
}
示例#6
0
static void determineMissingVNIs(const SlotIndexes &Indexes, LiveInterval &LI) {
  SmallPtrSet<const MachineBasicBlock*, 5> Visited;
  for (LiveRange::Segment &S : LI.segments) {
    if (S.valno != nullptr)
      continue;
    // This can only happen at the begin of a basic block.
    assert(S.start.isBlock() && "valno should only be missing at block begin");

    Visited.clear();
    const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(S.start);
    for (const MachineBasicBlock *Pred : MBB->predecessors()) {
      VNInfo *VNI = searchForVNI(Indexes, LI, Pred, Visited);
      if (VNI != nullptr) {
        S.valno = VNI;
        break;
      }
    }
    assert(S.valno != nullptr && "could not determine valno");
  }
}
示例#7
0
bool LiveInterval::overlaps(const LiveInterval &Other,
                            const CoalescerPair &CP,
                            const SlotIndexes &Indexes) const {
  assert(!empty() && "empty interval");
  if (Other.empty())
    return false;

  // Use binary searches to find initial positions.
  const_iterator I = find(Other.beginIndex());
  const_iterator IE = end();
  if (I == IE)
    return false;
  const_iterator J = Other.find(I->start);
  const_iterator JE = Other.end();
  if (J == JE)
    return false;

  for (;;) {
    // J has just been advanced to satisfy:
    assert(J->end >= I->start);
    // Check for an overlap.
    if (J->start < I->end) {
      // I and J are overlapping. Find the later start.
      SlotIndex Def = std::max(I->start, J->start);
      // Allow the overlap if Def is a coalescable copy.
      if (Def.isBlock() ||
          !CP.isCoalescable(Indexes.getInstructionFromIndex(Def)))
        return true;
    }
    // Advance the iterator that ends first to check for more overlaps.
    if (J->end > I->end) {
      std::swap(I, J);
      std::swap(IE, JE);
    }
    // Advance J until J->end >= I->start.
    do
      if (++J == JE)
        return false;
    while (J->end < I->start);
  }
}
示例#8
0
void LiveInterval::computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs,
                                         LaneBitmask LaneMask,
                                         const MachineRegisterInfo &MRI,
                                         const SlotIndexes &Indexes) const {
  assert(TargetRegisterInfo::isVirtualRegister(reg));
  LaneBitmask VRegMask = MRI.getMaxLaneMaskForVReg(reg);
  assert((VRegMask & LaneMask).any());
  const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
  for (const MachineOperand &MO : MRI.def_operands(reg)) {
    if (!MO.isUndef())
      continue;
    unsigned SubReg = MO.getSubReg();
    assert(SubReg != 0 && "Undef should only be set on subreg defs");
    LaneBitmask DefMask = TRI.getSubRegIndexLaneMask(SubReg);
    LaneBitmask UndefMask = VRegMask & ~DefMask;
    if ((UndefMask & LaneMask).any()) {
      const MachineInstr &MI = *MO.getParent();
      bool EarlyClobber = MO.isEarlyClobber();
      SlotIndex Pos = Indexes.getInstructionIndex(MI).getRegSlot(EarlyClobber);
      Undefs.push_back(Pos);
    }
  }
}
示例#9
0
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
  // Splitting the critical edge to a landing pad block is non-trivial. Don't do
  // it in this generic function.
  if (Succ->isLandingPad())
    return nullptr;

  MachineFunction *MF = getParent();
  DebugLoc dl;  // FIXME: this is nowhere

  // Performance might be harmed on HW that implements branching using exec mask
  // where both sides of the branches are always executed.
  if (MF->getTarget().requiresStructuredCFG())
    return nullptr;

  // We may need to update this's terminator, but we can't do that if
  // AnalyzeBranch fails. If this uses a jump table, we won't touch it.
  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
  MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
  SmallVector<MachineOperand, 4> Cond;
  if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
    return nullptr;

  // Avoid bugpoint weirdness: A block may end with a conditional branch but
  // jumps to the same MBB is either case. We have duplicate CFG edges in that
  // case that we can't handle. Since this never happens in properly optimized
  // code, just skip those edges.
  if (TBB && TBB == FBB) {
    DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
                 << getNumber() << '\n');
    return nullptr;
  }

  MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
  MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
  DEBUG(dbgs() << "Splitting critical edge:"
        " BB#" << getNumber()
        << " -- BB#" << NMBB->getNumber()
        << " -- BB#" << Succ->getNumber() << '\n');

  LiveIntervals *LIS = P->getAnalysisIfAvailable<LiveIntervals>();
  SlotIndexes *Indexes = P->getAnalysisIfAvailable<SlotIndexes>();
  if (LIS)
    LIS->insertMBBInMaps(NMBB);
  else if (Indexes)
    Indexes->insertMBBInMaps(NMBB);

  // On some targets like Mips, branches may kill virtual registers. Make sure
  // that LiveVariables is properly updated after updateTerminator replaces the
  // terminators.
  LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();

  // Collect a list of virtual registers killed by the terminators.
  SmallVector<unsigned, 4> KilledRegs;
  if (LV)
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;
      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0 ||
            !OI->isUse() || !OI->isKill() || OI->isUndef())
          continue;
        unsigned Reg = OI->getReg();
        if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
            LV->getVarInfo(Reg).removeKill(MI)) {
          KilledRegs.push_back(Reg);
          DEBUG(dbgs() << "Removing terminator kill: " << *MI);
          OI->setIsKill(false);
        }
      }
    }

  SmallVector<unsigned, 4> UsedRegs;
  if (LIS) {
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I) {
      MachineInstr *MI = I;

      for (MachineInstr::mop_iterator OI = MI->operands_begin(),
           OE = MI->operands_end(); OI != OE; ++OI) {
        if (!OI->isReg() || OI->getReg() == 0)
          continue;

        unsigned Reg = OI->getReg();
        if (std::find(UsedRegs.begin(), UsedRegs.end(), Reg) == UsedRegs.end())
          UsedRegs.push_back(Reg);
      }
    }
  }

  ReplaceUsesOfBlockWith(Succ, NMBB);

  // If updateTerminator() removes instructions, we need to remove them from
  // SlotIndexes.
  SmallVector<MachineInstr*, 4> Terminators;
  if (Indexes) {
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I)
      Terminators.push_back(I);
  }

  updateTerminator();

  if (Indexes) {
    SmallVector<MachineInstr*, 4> NewTerminators;
    for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
         I != E; ++I)
      NewTerminators.push_back(I);

    for (SmallVectorImpl<MachineInstr*>::iterator I = Terminators.begin(),
        E = Terminators.end(); I != E; ++I) {
      if (std::find(NewTerminators.begin(), NewTerminators.end(), *I) ==
          NewTerminators.end())
       Indexes->removeMachineInstrFromMaps(*I);
    }
  }

  // Insert unconditional "jump Succ" instruction in NMBB if necessary.
  NMBB->addSuccessor(Succ);
  if (!NMBB->isLayoutSuccessor(Succ)) {
    Cond.clear();
    MF->getSubtarget().getInstrInfo()->InsertBranch(*NMBB, Succ, nullptr, Cond,
                                                    dl);

    if (Indexes) {
      for (instr_iterator I = NMBB->instr_begin(), E = NMBB->instr_end();
           I != E; ++I) {
        // Some instructions may have been moved to NMBB by updateTerminator(),
        // so we first remove any instruction that already has an index.
        if (Indexes->hasIndex(I))
          Indexes->removeMachineInstrFromMaps(I);
        Indexes->insertMachineInstrInMaps(I);
      }
    }
  }

  // Fix PHI nodes in Succ so they refer to NMBB instead of this
  for (MachineBasicBlock::instr_iterator
         i = Succ->instr_begin(),e = Succ->instr_end();
       i != e && i->isPHI(); ++i)
    for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
      if (i->getOperand(ni+1).getMBB() == this)
        i->getOperand(ni+1).setMBB(NMBB);

  // Inherit live-ins from the successor
  for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
         E = Succ->livein_end(); I != E; ++I)
    NMBB->addLiveIn(*I);

  // Update LiveVariables.
  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
  if (LV) {
    // Restore kills of virtual registers that were killed by the terminators.
    while (!KilledRegs.empty()) {
      unsigned Reg = KilledRegs.pop_back_val();
      for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
        if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
          continue;
        if (TargetRegisterInfo::isVirtualRegister(Reg))
          LV->getVarInfo(Reg).Kills.push_back(I);
        DEBUG(dbgs() << "Restored terminator kill: " << *I);
        break;
      }
    }
    // Update relevant live-through information.
    LV->addNewBlock(NMBB, this, Succ);
  }

  if (LIS) {
    // After splitting the edge and updating SlotIndexes, live intervals may be
    // in one of two situations, depending on whether this block was the last in
    // the function. If the original block was the last in the function, all live
    // intervals will end prior to the beginning of the new split block. If the
    // original block was not at the end of the function, all live intervals will
    // extend to the end of the new split block.

    bool isLastMBB =
      std::next(MachineFunction::iterator(NMBB)) == getParent()->end();

    SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
    SlotIndex PrevIndex = StartIndex.getPrevSlot();
    SlotIndex EndIndex = Indexes->getMBBEndIdx(NMBB);

    // Find the registers used from NMBB in PHIs in Succ.
    SmallSet<unsigned, 8> PHISrcRegs;
    for (MachineBasicBlock::instr_iterator
         I = Succ->instr_begin(), E = Succ->instr_end();
         I != E && I->isPHI(); ++I) {
      for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) {
        if (I->getOperand(ni+1).getMBB() == NMBB) {
          MachineOperand &MO = I->getOperand(ni);
          unsigned Reg = MO.getReg();
          PHISrcRegs.insert(Reg);
          if (MO.isUndef())
            continue;

          LiveInterval &LI = LIS->getInterval(Reg);
          VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
          assert(VNI && "PHI sources should be live out of their predecessors.");
          LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
        }
      }
    }

    MachineRegisterInfo *MRI = &getParent()->getRegInfo();
    for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
      unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
      if (PHISrcRegs.count(Reg) || !LIS->hasInterval(Reg))
        continue;

      LiveInterval &LI = LIS->getInterval(Reg);
      if (!LI.liveAt(PrevIndex))
        continue;

      bool isLiveOut = LI.liveAt(LIS->getMBBStartIdx(Succ));
      if (isLiveOut && isLastMBB) {
        VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
        assert(VNI && "LiveInterval should have VNInfo where it is live.");
        LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
      } else if (!isLiveOut && !isLastMBB) {
        LI.removeSegment(StartIndex, EndIndex);
      }
    }

    // Update all intervals for registers whose uses may have been modified by
    // updateTerminator().
    LIS->repairIntervalsInRange(this, getFirstTerminator(), end(), UsedRegs);
  }

  if (MachineDominatorTree *MDT =
      P->getAnalysisIfAvailable<MachineDominatorTree>())
    MDT->recordSplitCriticalEdge(this, Succ, NMBB);

  if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
    if (MachineLoop *TIL = MLI->getLoopFor(this)) {
      // If one or the other blocks were not in a loop, the new block is not
      // either, and thus LI doesn't need to be updated.
      if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
        if (TIL == DestLoop) {
          // Both in the same loop, the NMBB joins loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (TIL->contains(DestLoop)) {
          // Edge from an outer loop to an inner loop.  Add to the outer loop.
          TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else if (DestLoop->contains(TIL)) {
          // Edge from an inner loop to an outer loop.  Add to the outer loop.
          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
        } else {
          // Edge from two loops with no containment relation.  Because these
          // are natural loops, we know that the destination block must be the
          // header of its loop (adding a branch into a loop elsewhere would
          // create an irreducible loop).
          assert(DestLoop->getHeader() == Succ &&
                 "Should not create irreducible loops!");
          if (MachineLoop *P = DestLoop->getParentLoop())
            P->addBasicBlockToLoop(NMBB, MLI->getBase());
        }
      }
    }

  return NMBB;
}
示例#10
0
void LiveInterval::constructMainRangeFromSubranges(
    const SlotIndexes &Indexes, VNInfo::Allocator &VNIAllocator) {
  // The basic observations on which this algorithm is based:
  // - Each Def/ValNo in a subrange must have a corresponding def on the main
  //   range, but not further defs/valnos are necessary.
  // - If any of the subranges is live at a point the main liverange has to be
  //   live too, conversily if no subrange is live the main range mustn't be
  //   live either.
  // We do this by scannig through all the subranges simultaneously creating new
  // segments in the main range as segments start/ends come up in the subranges.
  assert(hasSubRanges() && "expected subranges to be present");
  assert(segments.empty() && valnos.empty() && "expected empty main range");

  // Collect subrange, iterator pairs for the walk and determine first and last
  // SlotIndex involved.
  SmallVector<std::pair<const SubRange*, const_iterator>, 4> SRs;
  SlotIndex First;
  SlotIndex Last;
  for (const SubRange &SR : subranges()) {
    if (SR.empty())
      continue;
    SRs.push_back(std::make_pair(&SR, SR.begin()));
    if (!First.isValid() || SR.segments.front().start < First)
      First = SR.segments.front().start;
    if (!Last.isValid() || SR.segments.back().end > Last)
      Last = SR.segments.back().end;
  }

  // Walk over all subranges simultaneously.
  Segment CurrentSegment;
  bool ConstructingSegment = false;
  bool NeedVNIFixup = false;
  unsigned ActiveMask = 0;
  SlotIndex Pos = First;
  while (true) {
    SlotIndex NextPos = Last;
    enum {
      NOTHING,
      BEGIN_SEGMENT,
      END_SEGMENT,
    } Event = NOTHING;
    // Which subregister lanes are affected by the current event.
    unsigned EventMask = 0;
    // Whether a BEGIN_SEGMENT is also a valno definition point.
    bool IsDef = false;
    // Find the next begin or end of a subrange segment. Combine masks if we
    // have multiple begins/ends at the same position. Ends take precedence over
    // Begins.
    for (auto &SRP : SRs) {
      const SubRange &SR = *SRP.first;
      const_iterator &I = SRP.second;
      // Advance iterator of subrange to a segment involving Pos; the earlier
      // segments are already merged at this point.
      while (I != SR.end() &&
             (I->end < Pos ||
              (I->end == Pos && (ActiveMask & SR.LaneMask) == 0)))
        ++I;
      if (I == SR.end())
        continue;
      if ((ActiveMask & SR.LaneMask) == 0 &&
          Pos <= I->start && I->start <= NextPos) {
        // Merge multiple begins at the same position.
        if (I->start == NextPos && Event == BEGIN_SEGMENT) {
          EventMask |= SR.LaneMask;
          IsDef |= I->valno->def == I->start;
        } else if (I->start < NextPos || Event != END_SEGMENT) {
          Event = BEGIN_SEGMENT;
          NextPos = I->start;
          EventMask = SR.LaneMask;
          IsDef = I->valno->def == I->start;
        }
      }
      if ((ActiveMask & SR.LaneMask) != 0 &&
          Pos <= I->end && I->end <= NextPos) {
        // Merge multiple ends at the same position.
        if (I->end == NextPos && Event == END_SEGMENT)
          EventMask |= SR.LaneMask;
        else {
          Event = END_SEGMENT;
          NextPos = I->end;
          EventMask = SR.LaneMask;
        }
      }
    }

    // Advance scan position.
    Pos = NextPos;
    if (Event == BEGIN_SEGMENT) {
      if (ConstructingSegment && IsDef) {
        // Finish previous segment because we have to start a new one.
        CurrentSegment.end = Pos;
        append(CurrentSegment);
        ConstructingSegment = false;
      }

      // Start a new segment if necessary.
      if (!ConstructingSegment) {
        // Determine value number for the segment.
        VNInfo *VNI;
        if (IsDef) {
          VNI = getNextValue(Pos, VNIAllocator);
        } else {
          // We have to reuse an existing value number, if we are lucky
          // then we already passed one of the predecessor blocks and determined
          // its value number (with blocks in reverse postorder this would be
          // always true but we have no such guarantee).
          assert(Pos.isBlock());
          const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Pos);
          // See if any of the predecessor blocks has a lower number and a VNI
          for (const MachineBasicBlock *Pred : MBB->predecessors()) {
            SlotIndex PredEnd = Indexes.getMBBEndIdx(Pred);
            VNI = getVNInfoBefore(PredEnd);
            if (VNI != nullptr)
              break;
          }
          // Def will come later: We have to do an extra fixup pass.
          if (VNI == nullptr)
            NeedVNIFixup = true;
        }

        // In rare cases we can produce adjacent segments with the same value
        // number (if they come from different subranges, but happen to have
        // the same defining instruction). VNIFixup will fix those cases.
        if (!empty() && segments.back().end == Pos &&
            segments.back().valno == VNI)
          NeedVNIFixup = true;
        CurrentSegment.start = Pos;
        CurrentSegment.valno = VNI;
        ConstructingSegment = true;
      }
      ActiveMask |= EventMask;
    } else if (Event == END_SEGMENT) {
      assert(ConstructingSegment);
      // Finish segment if no lane is active anymore.
      ActiveMask &= ~EventMask;
      if (ActiveMask == 0) {
        CurrentSegment.end = Pos;
        append(CurrentSegment);
        ConstructingSegment = false;
      }
    } else {
      // We reached the end of the last subranges and can stop.
      assert(Event == NOTHING);
      break;
    }
  }

  // We might not be able to assign new valnos for all segments if the basic
  // block containing the definition comes after a segment using the valno.
  // Do a fixup pass for this uncommon case.
  if (NeedVNIFixup)
    determineMissingVNIs(Indexes, *this);

  assert(ActiveMask == 0 && !ConstructingSegment && "all segments ended");
  verify();
}