void LiveRangeCalc::extend(LiveRange &LR, SlotIndex Kill, unsigned PhysReg) {
  assert(Kill.isValid() && "Invalid SlotIndex");
  assert(Indexes && "Missing SlotIndexes");
  assert(DomTree && "Missing dominator tree");

  MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill.getPrevSlot());
  assert(KillMBB && "No MBB at Kill");

  // Is there a def in the same MBB we can extend?
  if (LR.extendInBlock(Indexes->getMBBStartIdx(KillMBB), Kill))
    return;

  // Find the single reaching def, or determine if Kill is jointly dominated by
  // multiple values, and we may need to create even more phi-defs to preserve
  // VNInfo SSA form.  Perform a search for all predecessor blocks where we
  // know the dominating VNInfo.
  if (findReachingDefs(LR, *KillMBB, Kill, PhysReg))
    return;

  // When there were multiple different values, we may need new PHIs.
  calculateValues();
}
Example #2
0
void LiveRangeCalc::createDeadDefs(LiveRange &LR, unsigned Reg) {
  assert(MRI && Indexes && "call reset() first");

  // Visit all def operands. If the same instruction has multiple defs of Reg,
  // LR.createDeadDef() will deduplicate.
  for (MachineRegisterInfo::def_iterator
       I = MRI->def_begin(Reg), E = MRI->def_end(); I != E; ++I) {
    const MachineInstr *MI = &*I;
    // Find the corresponding slot index.
    SlotIndex Idx;
    if (MI->isPHI())
      // PHI defs begin at the basic block start index.
      Idx = Indexes->getMBBStartIdx(MI->getParent());
    else
      // Instructions are either normal 'r', or early clobber 'e'.
      Idx = Indexes->getInstructionIndex(MI)
        .getRegSlot(I.getOperand().isEarlyClobber());

    // Create the def in LR. This may find an existing def.
    LR.createDeadDef(Idx, *Alloc);
  }
}
Example #3
0
void LiveRangeCalc::extend(LiveRange &LR, SlotIndex Use, unsigned PhysReg,
                           ArrayRef<SlotIndex> Undefs) {
  assert(Use.isValid() && "Invalid SlotIndex");
  assert(Indexes && "Missing SlotIndexes");
  assert(DomTree && "Missing dominator tree");

  MachineBasicBlock *UseMBB = Indexes->getMBBFromIndex(Use.getPrevSlot());
  assert(UseMBB && "No MBB at Use");

  // Is there a def in the same MBB we can extend?
  auto EP = LR.extendInBlock(Undefs, Indexes->getMBBStartIdx(UseMBB), Use);
  if (EP.first != nullptr || EP.second)
    return;

  // Find the single reaching def, or determine if Use is jointly dominated by
  // multiple values, and we may need to create even more phi-defs to preserve
  // VNInfo SSA form.  Perform a search for all predecessor blocks where we
  // know the dominating VNInfo.
  if (findReachingDefs(LR, *UseMBB, Use, PhysReg, Undefs))
    return;

  // When there were multiple different values, we may need new PHIs.
  calculateValues();
}
// overlaps - Return true if the intersection of the two live ranges is
// not empty.
//
// An example for overlaps():
//
// 0: A = ...
// 4: B = ...
// 8: C = A + B ;; last use of A
//
// The live ranges should look like:
//
// A = [3, 11)
// B = [7, x)
// C = [11, y)
//
// A->overlaps(C) should return false since we want to be able to join
// A and C.
//
bool LiveRange::overlapsFrom(const LiveRange& other,
                             const_iterator StartPos) const {
  assert(!empty() && "empty range");
  const_iterator i = begin();
  const_iterator ie = end();
  const_iterator j = StartPos;
  const_iterator je = other.end();

  assert((StartPos->start <= i->start || StartPos == other.begin()) &&
         StartPos != other.end() && "Bogus start position hint!");

  if (i->start < j->start) {
    i = std::upper_bound(i, ie, j->start);
    if (i != begin()) --i;
  } else if (j->start < i->start) {
    ++StartPos;
    if (StartPos != other.end() && StartPos->start <= i->start) {
      assert(StartPos < other.end() && i < end());
      j = std::upper_bound(j, je, i->start);
      if (j != other.begin()) --j;
    }
  } else {
    return true;
  }

  if (j == je) return false;

  while (i != ie) {
    if (i->start > j->start) {
      std::swap(i, j);
      std::swap(ie, je);
    }

    if (i->end > j->start)
      return true;
    ++i;
  }

  return false;
}
Example #5
0
void PSMarkSweepDecorator::precompact() {
  // Reset our own compact top.
  set_compaction_top(space()->bottom());

  /* We allow some amount of garbage towards the bottom of the space, so
   * we don't start compacting before there is a significant gain to be made.
   * Occasionally, we want to ensure a full compaction, which is determined
   * by the MarkSweepAlwaysCompactCount parameter. This is a significant
   * performance improvement!
   */
  bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);

  size_t allowed_deadspace = 0;
  if (skip_dead) {
    const size_t ratio = allowed_dead_ratio();
    allowed_deadspace = space()->capacity_in_words() * ratio / 100;
  }

  // Fetch the current destination decorator
  PSMarkSweepDecorator* dest = destination_decorator();
  ObjectStartArray* start_array = dest->start_array();

  HeapWord* compact_top = dest->compaction_top();
  HeapWord* compact_end = dest->space()->end();

  HeapWord* q = space()->bottom();
  HeapWord* t = space()->top();

  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last
                                   live object. */
  HeapWord*  first_dead = space()->end(); /* The first dead object. */
  LiveRange* liveRange  = NULL; /* The current live range, recorded in the
                                   first header of preceding free area. */
  _first_dead = first_dead;

  const intx interval = PrefetchScanIntervalInBytes;

  while (q < t) {
    assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
           oop(q)->mark()->has_bias_pattern(),
           "these are the only valid states during a mark sweep");
    if (oop(q)->is_gc_marked()) {
      /* prefetch beyond q */
      Prefetch::write(q, interval);
      size_t size = oop(q)->size();

      size_t compaction_max_size = pointer_delta(compact_end, compact_top);

      // This should only happen if a space in the young gen overflows the
      // old gen. If that should happen, we null out the start_array, because
      // the young spaces are not covered by one.
      while(size > compaction_max_size) {
        // First record the last compact_top
        dest->set_compaction_top(compact_top);

        // Advance to the next compaction decorator
        advance_destination_decorator();
        dest = destination_decorator();

        // Update compaction info
        start_array = dest->start_array();
        compact_top = dest->compaction_top();
        compact_end = dest->space()->end();
        assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
        assert(compact_end > compact_top, "Must always be space remaining");
        compaction_max_size =
          pointer_delta(compact_end, compact_top);
      }

      // store the forwarding pointer into the mark word
      if (q != compact_top) {
        oop(q)->forward_to(oop(compact_top));
        assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
      } else {
        // if the object isn't moving we can just set the mark to the default
        // mark and handle it specially later on.
        oop(q)->init_mark();
        assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
      }

      // Update object start array
      if (start_array) {
        start_array->allocate_block(compact_top);
      }

      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
      compact_top += size;
      assert(compact_top <= dest->space()->end(),
        "Exceeding space in destination");

      q += size;
      end_of_live = q;
    } else {
      /* run over all the contiguous dead objects */
      HeapWord* end = q;
      do {
        /* prefetch beyond end */
        Prefetch::write(end, interval);
        end += oop(end)->size();
      } while (end < t && (!oop(end)->is_gc_marked()));

      /* see if we might want to pretend this object is alive so that
       * we don't have to compact quite as often.
       */
      if (allowed_deadspace > 0 && q == compact_top) {
        size_t sz = pointer_delta(end, q);
        if (insert_deadspace(allowed_deadspace, q, sz)) {
          size_t compaction_max_size = pointer_delta(compact_end, compact_top);

          // This should only happen if a space in the young gen overflows the
          // old gen. If that should happen, we null out the start_array, because
          // the young spaces are not covered by one.
          while (sz > compaction_max_size) {
            // First record the last compact_top
            dest->set_compaction_top(compact_top);

            // Advance to the next compaction decorator
            advance_destination_decorator();
            dest = destination_decorator();

            // Update compaction info
            start_array = dest->start_array();
            compact_top = dest->compaction_top();
            compact_end = dest->space()->end();
            assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
            assert(compact_end > compact_top, "Must always be space remaining");
            compaction_max_size =
              pointer_delta(compact_end, compact_top);
          }

          // store the forwarding pointer into the mark word
          if (q != compact_top) {
            oop(q)->forward_to(oop(compact_top));
            assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
          } else {
            // if the object isn't moving we can just set the mark to the default
            // mark and handle it specially later on.
            oop(q)->init_mark();
            assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
          }

          // Update object start array
          if (start_array) {
            start_array->allocate_block(compact_top);
          }

          VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
          compact_top += sz;
          assert(compact_top <= dest->space()->end(),
            "Exceeding space in destination");

          q = end;
          end_of_live = end;
          continue;
        }
      }

      /* for the previous LiveRange, record the end of the live objects. */
      if (liveRange) {
        liveRange->set_end(q);
      }

      /* record the current LiveRange object.
       * liveRange->start() is overlaid on the mark word.
       */
      liveRange = (LiveRange*)q;
      liveRange->set_start(end);
      liveRange->set_end(end);

      /* see if this is the first dead region. */
      if (q < first_dead) {
        first_dead = q;
      }

      /* move on to the next object */
      q = end;
    }
  }

  assert(q == t, "just checking");
  if (liveRange != NULL) {
    liveRange->set_end(q);
  }
  _end_of_live = end_of_live;
  if (end_of_live < first_dead) {
    first_dead = end_of_live;
  }
  _first_dead = first_dead;

  // Update compaction top
  dest->set_compaction_top(compact_top);
}
Example #6
0
void UserValue::computeIntervals(MachineRegisterInfo &MRI,
                                 const TargetRegisterInfo &TRI,
                                 LiveIntervals &LIS, LexicalScopes &LS) {
  SmallVector<std::pair<SlotIndex, unsigned>, 16> Defs;

  // Collect all defs to be extended (Skipping undefs).
  for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I)
    if (I.value() != UndefLocNo)
      Defs.push_back(std::make_pair(I.start(), I.value()));

  // Extend all defs, and possibly add new ones along the way.
  for (unsigned i = 0; i != Defs.size(); ++i) {
    SlotIndex Idx = Defs[i].first;
    unsigned LocNo = Defs[i].second;
    const MachineOperand &Loc = locations[LocNo];

    if (!Loc.isReg()) {
      extendDef(Idx, LocNo, nullptr, nullptr, nullptr, LIS);
      continue;
    }

    // Register locations are constrained to where the register value is live.
    if (TargetRegisterInfo::isVirtualRegister(Loc.getReg())) {
      LiveInterval *LI = nullptr;
      const VNInfo *VNI = nullptr;
      if (LIS.hasInterval(Loc.getReg())) {
        LI = &LIS.getInterval(Loc.getReg());
        VNI = LI->getVNInfoAt(Idx);
      }
      SmallVector<SlotIndex, 16> Kills;
      extendDef(Idx, LocNo, LI, VNI, &Kills, LIS);
      if (LI)
        addDefsFromCopies(LI, LocNo, Kills, Defs, MRI, LIS);
      continue;
    }

    // For physregs, use the live range of the first regunit as a guide.
    unsigned Unit = *MCRegUnitIterator(Loc.getReg(), &TRI);
    LiveRange *LR = &LIS.getRegUnit(Unit);
    const VNInfo *VNI = LR->getVNInfoAt(Idx);
    // Don't track copies from physregs, it is too expensive.
    extendDef(Idx, LocNo, LR, VNI, nullptr, LIS);
  }

  // Erase all the undefs.
  for (LocMap::iterator I = locInts.begin(); I.valid();)
    if (I.value() == UndefLocNo)
      I.erase();
    else
      ++I;

  // The computed intervals may extend beyond the range of the debug
  // location's lexical scope. In this case, splitting of an interval
  // can result in an interval outside of the scope being created,
  // causing extra unnecessary DBG_VALUEs to be emitted. To prevent
  // this, trim the intervals to the lexical scope.

  LexicalScope *Scope = LS.findLexicalScope(dl);
  if (!Scope)
    return;

  SlotIndex PrevEnd;
  LocMap::iterator I = locInts.begin();

  // Iterate over the lexical scope ranges. Each time round the loop
  // we check the intervals for overlap with the end of the previous
  // range and the start of the next. The first range is handled as
  // a special case where there is no PrevEnd.
  for (const InsnRange &Range : Scope->getRanges()) {
    SlotIndex RStart = LIS.getInstructionIndex(*Range.first);
    SlotIndex REnd = LIS.getInstructionIndex(*Range.second);

    // At the start of each iteration I has been advanced so that
    // I.stop() >= PrevEnd. Check for overlap.
    if (PrevEnd && I.start() < PrevEnd) {
      SlotIndex IStop = I.stop();
      unsigned LocNo = I.value();

      // Stop overlaps previous end - trim the end of the interval to
      // the scope range.
      I.setStopUnchecked(PrevEnd);
      ++I;

      // If the interval also overlaps the start of the "next" (i.e.
      // current) range create a new interval for the remainder (which
      // may be further trimmed).
      if (RStart < IStop)
        I.insert(RStart, IStop, LocNo);
    }

    // Advance I so that I.stop() >= RStart, and check for overlap.
    I.advanceTo(RStart);
    if (!I.valid())
      return;

    if (I.start() < RStart) {
      // Interval start overlaps range - trim to the scope range.
      I.setStartUnchecked(RStart);
      // Remember that this interval was trimmed.
      trimmedDefs.insert(RStart);
    }

    // The end of a lexical scope range is the last instruction in the
    // range. To convert to an interval we need the index of the
    // instruction after it.
    REnd = REnd.getNextIndex();

    // Advance I to first interval outside current range.
    I.advanceTo(REnd);
    if (!I.valid())
      return;

    PrevEnd = REnd;
  }

  // Check for overlap with end of final range.
  if (PrevEnd && I.start() < PrevEnd)
    I.setStopUnchecked(PrevEnd);
}
/// Merge all of the segments in RHS into this live range as the specified
/// value number.  The segments in RHS are allowed to overlap with segments in
/// the current range, but only if the overlapping segments have the
/// specified value number.
void LiveRange::MergeSegmentsInAsValue(const LiveRange &RHS,
                                       VNInfo *LHSValNo) {
  LiveRangeUpdater Updater(this);
  for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I)
    Updater.add(I->start, I->end, LHSValNo);
}
void LiveRange::join(LiveRange &Other,
                     const int *LHSValNoAssignments,
                     const int *RHSValNoAssignments,
                     SmallVectorImpl<VNInfo *> &NewVNInfo) {
  verify();

  // Determine if any of our values are mapped.  This is uncommon, so we want
  // to avoid the range scan if not.
  bool MustMapCurValNos = false;
  unsigned NumVals = getNumValNums();
  unsigned NumNewVals = NewVNInfo.size();
  for (unsigned i = 0; i != NumVals; ++i) {
    unsigned LHSValID = LHSValNoAssignments[i];
    if (i != LHSValID ||
        (NewVNInfo[LHSValID] && NewVNInfo[LHSValID] != getValNumInfo(i))) {
      MustMapCurValNos = true;
      break;
    }
  }

  // If we have to apply a mapping to our base range assignment, rewrite it now.
  if (MustMapCurValNos && !empty()) {
    // Map the first live range.

    iterator OutIt = begin();
    OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
    for (iterator I = std::next(OutIt), E = end(); I != E; ++I) {
      VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
      assert(nextValNo != 0 && "Huh?");

      // If this live range has the same value # as its immediate predecessor,
      // and if they are neighbors, remove one Segment.  This happens when we
      // have [0,4:0)[4,7:1) and map 0/1 onto the same value #.
      if (OutIt->valno == nextValNo && OutIt->end == I->start) {
        OutIt->end = I->end;
      } else {
        // Didn't merge. Move OutIt to the next segment,
        ++OutIt;
        OutIt->valno = nextValNo;
        if (OutIt != I) {
          OutIt->start = I->start;
          OutIt->end = I->end;
        }
      }
    }
    // If we merge some segments, chop off the end.
    ++OutIt;
    segments.erase(OutIt, end());
  }

  // Rewrite Other values before changing the VNInfo ids.
  // This can leave Other in an invalid state because we're not coalescing
  // touching segments that now have identical values. That's OK since Other is
  // not supposed to be valid after calling join();
  for (iterator I = Other.begin(), E = Other.end(); I != E; ++I)
    I->valno = NewVNInfo[RHSValNoAssignments[I->valno->id]];

  // Update val# info. Renumber them and make sure they all belong to this
  // LiveRange now. Also remove dead val#'s.
  unsigned NumValNos = 0;
  for (unsigned i = 0; i < NumNewVals; ++i) {
    VNInfo *VNI = NewVNInfo[i];
    if (VNI) {
      if (NumValNos >= NumVals)
        valnos.push_back(VNI);
      else
        valnos[NumValNos] = VNI;
      VNI->id = NumValNos++;  // Renumber val#.
    }
  }
  if (NumNewVals < NumVals)
    valnos.resize(NumNewVals);  // shrinkify

  // Okay, now insert the RHS live segments into the LHS.
  LiveRangeUpdater Updater(this);
  for (iterator I = Other.begin(), E = Other.end(); I != E; ++I)
    Updater.add(*I);
}
Example #9
0
bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
                                     SlotIndex Use, unsigned PhysReg,
                                     ArrayRef<SlotIndex> Undefs) {
  unsigned UseMBBNum = UseMBB.getNumber();

  // Block numbers where LR should be live-in.
  SmallVector<unsigned, 16> WorkList(1, UseMBBNum);

  // Remember if we have seen more than one value.
  bool UniqueVNI = true;
  VNInfo *TheVNI = nullptr;

  bool FoundUndef = false;

  // Using Seen as a visited set, perform a BFS for all reaching defs.
  for (unsigned i = 0; i != WorkList.size(); ++i) {
    MachineBasicBlock *MBB = MF->getBlockNumbered(WorkList[i]);

#ifndef NDEBUG
    if (MBB->pred_empty()) {
      MBB->getParent()->verify();
      errs() << "Use of " << printReg(PhysReg)
             << " does not have a corresponding definition on every path:\n";
      const MachineInstr *MI = Indexes->getInstructionFromIndex(Use);
      if (MI != nullptr)
        errs() << Use << " " << *MI;
      report_fatal_error("Use not jointly dominated by defs.");
    }

    if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
        !MBB->isLiveIn(PhysReg)) {
      MBB->getParent()->verify();
      const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
      errs() << "The register " << printReg(PhysReg, TRI)
             << " needs to be live in to " << printMBBReference(*MBB)
             << ", but is missing from the live-in list.\n";
      report_fatal_error("Invalid global physical register");
    }
#endif
    FoundUndef |= MBB->pred_empty();

    for (MachineBasicBlock *Pred : MBB->predecessors()) {
       // Is this a known live-out block?
       if (Seen.test(Pred->getNumber())) {
         if (VNInfo *VNI = Map[Pred].first) {
           if (TheVNI && TheVNI != VNI)
             UniqueVNI = false;
           TheVNI = VNI;
         }
         continue;
       }

       SlotIndex Start, End;
       std::tie(Start, End) = Indexes->getMBBRange(Pred);

       // First time we see Pred.  Try to determine the live-out value, but set
       // it as null if Pred is live-through with an unknown value.
       auto EP = LR.extendInBlock(Undefs, Start, End);
       VNInfo *VNI = EP.first;
       FoundUndef |= EP.second;
       setLiveOutValue(Pred, EP.second ? &UndefVNI : VNI);
       if (VNI) {
         if (TheVNI && TheVNI != VNI)
           UniqueVNI = false;
         TheVNI = VNI;
       }
       if (VNI || EP.second)
         continue;

       // No, we need a live-in value for Pred as well
       if (Pred != &UseMBB)
         WorkList.push_back(Pred->getNumber());
       else
          // Loopback to UseMBB, so value is really live through.
         Use = SlotIndex();
    }
  }

  LiveIn.clear();
  FoundUndef |= (TheVNI == nullptr || TheVNI == &UndefVNI);
  if (!Undefs.empty() && FoundUndef)
    UniqueVNI = false;

  // Both updateSSA() and LiveRangeUpdater benefit from ordered blocks, but
  // neither require it. Skip the sorting overhead for small updates.
  if (WorkList.size() > 4)
    array_pod_sort(WorkList.begin(), WorkList.end());

  // If a unique reaching def was found, blit in the live ranges immediately.
  if (UniqueVNI) {
    assert(TheVNI != nullptr && TheVNI != &UndefVNI);
    LiveRangeUpdater Updater(&LR);
    for (unsigned BN : WorkList) {
      SlotIndex Start, End;
      std::tie(Start, End) = Indexes->getMBBRange(BN);
      // Trim the live range in UseMBB.
      if (BN == UseMBBNum && Use.isValid())
        End = Use;
      else
        Map[MF->getBlockNumbered(BN)] = LiveOutPair(TheVNI, nullptr);
      Updater.add(Start, End, TheVNI);
    }
    return true;
  }

  // Prepare the defined/undefined bit vectors.
  EntryInfoMap::iterator Entry;
  bool DidInsert;
  std::tie(Entry, DidInsert) = EntryInfos.insert(
      std::make_pair(&LR, std::make_pair(BitVector(), BitVector())));
  if (DidInsert) {
    // Initialize newly inserted entries.
    unsigned N = MF->getNumBlockIDs();
    Entry->second.first.resize(N);
    Entry->second.second.resize(N);
  }
  BitVector &DefOnEntry = Entry->second.first;
  BitVector &UndefOnEntry = Entry->second.second;

  // Multiple values were found, so transfer the work list to the LiveIn array
  // where UpdateSSA will use it as a work list.
  LiveIn.reserve(WorkList.size());
  for (unsigned BN : WorkList) {
    MachineBasicBlock *MBB = MF->getBlockNumbered(BN);
    if (!Undefs.empty() &&
        !isDefOnEntry(LR, Undefs, *MBB, DefOnEntry, UndefOnEntry))
      continue;
    addLiveInBlock(LR, DomTree->getNode(MBB));
    if (MBB == &UseMBB)
      LiveIn.back().Kill = Use;
  }

  return false;
}
Example #10
0
bool LiveRangeCalc::isDefOnEntry(LiveRange &LR, ArrayRef<SlotIndex> Undefs,
                                 MachineBasicBlock &MBB, BitVector &DefOnEntry,
                                 BitVector &UndefOnEntry) {
  unsigned BN = MBB.getNumber();
  if (DefOnEntry[BN])
    return true;
  if (UndefOnEntry[BN])
    return false;

  auto MarkDefined = [BN, &DefOnEntry](MachineBasicBlock &B) -> bool {
    for (MachineBasicBlock *S : B.successors())
      DefOnEntry[S->getNumber()] = true;
    DefOnEntry[BN] = true;
    return true;
  };

  SetVector<unsigned> WorkList;
  // Checking if the entry of MBB is reached by some def: add all predecessors
  // that are potentially defined-on-exit to the work list.
  for (MachineBasicBlock *P : MBB.predecessors())
    WorkList.insert(P->getNumber());

  for (unsigned i = 0; i != WorkList.size(); ++i) {
    // Determine if the exit from the block is reached by some def.
    unsigned N = WorkList[i];
    MachineBasicBlock &B = *MF->getBlockNumbered(N);
    if (Seen[N]) {
      const LiveOutPair &LOB = Map[&B];
      if (LOB.first != nullptr && LOB.first != &UndefVNI)
        return MarkDefined(B);
    }
    SlotIndex Begin, End;
    std::tie(Begin, End) = Indexes->getMBBRange(&B);
    // Treat End as not belonging to B.
    // If LR has a segment S that starts at the next block, i.e. [End, ...),
    // std::upper_bound will return the segment following S. Instead,
    // S should be treated as the first segment that does not overlap B.
    LiveRange::iterator UB = std::upper_bound(LR.begin(), LR.end(),
                                              End.getPrevSlot());
    if (UB != LR.begin()) {
      LiveRange::Segment &Seg = *std::prev(UB);
      if (Seg.end > Begin) {
        // There is a segment that overlaps B. If the range is not explicitly
        // undefined between the end of the segment and the end of the block,
        // treat the block as defined on exit. If it is, go to the next block
        // on the work list.
        if (LR.isUndefIn(Undefs, Seg.end, End))
          continue;
        return MarkDefined(B);
      }
    }

    // No segment overlaps with this block. If this block is not defined on
    // entry, or it undefines the range, do not process its predecessors.
    if (UndefOnEntry[N] || LR.isUndefIn(Undefs, Begin, End)) {
      UndefOnEntry[N] = true;
      continue;
    }
    if (DefOnEntry[N])
      return MarkDefined(B);

    // Still don't know: add all predecessors to the work list.
    for (MachineBasicBlock *P : B.predecessors())
      WorkList.insert(P->getNumber());
  }

  UndefOnEntry[BN] = true;
  return false;
}
Example #11
0
bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
                                     SlotIndex Use, unsigned PhysReg) {
  unsigned UseMBBNum = UseMBB.getNumber();

  // Block numbers where LR should be live-in.
  SmallVector<unsigned, 16> WorkList(1, UseMBBNum);

  // Remember if we have seen more than one value.
  bool UniqueVNI = true;
  VNInfo *TheVNI = nullptr;

  // Using Seen as a visited set, perform a BFS for all reaching defs.
  for (unsigned i = 0; i != WorkList.size(); ++i) {
    MachineBasicBlock *MBB = MF->getBlockNumbered(WorkList[i]);

#ifndef NDEBUG
    if (MBB->pred_empty()) {
      MBB->getParent()->verify();
      errs() << "Use of " << PrintReg(PhysReg)
             << " does not have a corresponding definition on every path:\n";
      const MachineInstr *MI = Indexes->getInstructionFromIndex(Use);
      if (MI != nullptr)
        errs() << Use << " " << *MI;
      llvm_unreachable("Use not jointly dominated by defs.");
    }

    if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
        !MBB->isLiveIn(PhysReg)) {
      MBB->getParent()->verify();
      errs() << "The register " << PrintReg(PhysReg)
             << " needs to be live in to BB#" << MBB->getNumber()
             << ", but is missing from the live-in list.\n";
      llvm_unreachable("Invalid global physical register");
    }
#endif

    for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
         PE = MBB->pred_end(); PI != PE; ++PI) {
       MachineBasicBlock *Pred = *PI;

       // Is this a known live-out block?
       if (Seen.test(Pred->getNumber())) {
         if (VNInfo *VNI = Map[Pred].first) {
           if (TheVNI && TheVNI != VNI)
             UniqueVNI = false;
           TheVNI = VNI;
         }
         continue;
       }

       SlotIndex Start, End;
       std::tie(Start, End) = Indexes->getMBBRange(Pred);

       // First time we see Pred.  Try to determine the live-out value, but set
       // it as null if Pred is live-through with an unknown value.
       VNInfo *VNI = LR.extendInBlock(Start, End);
       setLiveOutValue(Pred, VNI);
       if (VNI) {
         if (TheVNI && TheVNI != VNI)
           UniqueVNI = false;
         TheVNI = VNI;
         continue;
       }

       // No, we need a live-in value for Pred as well
       if (Pred != &UseMBB)
          WorkList.push_back(Pred->getNumber());
       else
          // Loopback to UseMBB, so value is really live through.
         Use = SlotIndex();
    }
  }

  LiveIn.clear();

  // Both updateSSA() and LiveRangeUpdater benefit from ordered blocks, but
  // neither require it. Skip the sorting overhead for small updates.
  if (WorkList.size() > 4)
    array_pod_sort(WorkList.begin(), WorkList.end());

  // If a unique reaching def was found, blit in the live ranges immediately.
  if (UniqueVNI) {
    LiveRangeUpdater Updater(&LR);
    for (SmallVectorImpl<unsigned>::const_iterator I = WorkList.begin(),
         E = WorkList.end(); I != E; ++I) {
       SlotIndex Start, End;
       std::tie(Start, End) = Indexes->getMBBRange(*I);
       // Trim the live range in UseMBB.
       if (*I == UseMBBNum && Use.isValid())
         End = Use;
       else
         Map[MF->getBlockNumbered(*I)] = LiveOutPair(TheVNI, nullptr);
       Updater.add(Start, End, TheVNI);
    }
    return true;
  }

  // Multiple values were found, so transfer the work list to the LiveIn array
  // where UpdateSSA will use it as a work list.
  LiveIn.reserve(WorkList.size());
  for (SmallVectorImpl<unsigned>::const_iterator
       I = WorkList.begin(), E = WorkList.end(); I != E; ++I) {
    MachineBasicBlock *MBB = MF->getBlockNumbered(*I);
    addLiveInBlock(LR, DomTree->getNode(MBB));
    if (MBB == &UseMBB)
      LiveIn.back().Kill = Use;
  }

  return false;
}
/// shrinkToUses - After removing some uses of a register, shrink its live
/// range to just the remaining uses. This method does not compute reaching
/// defs for new uses, and it doesn't remove dead defs.
bool LiveIntervals::shrinkToUses(LiveInterval *li,
                                 SmallVectorImpl<MachineInstr*> *dead) {
    DEBUG(dbgs() << "Shrink: " << *li << '\n');
    assert(TargetRegisterInfo::isVirtualRegister(li->reg)
           && "Can only shrink virtual registers");
    // Find all the values used, including PHI kills.
    SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;

    // Blocks that have already been added to WorkList as live-out.
    SmallPtrSet<MachineBasicBlock*, 16> LiveOut;

    // Visit all instructions reading li->reg.
    for (MachineRegisterInfo::reg_instr_iterator
            I = MRI->reg_instr_begin(li->reg), E = MRI->reg_instr_end();
            I != E; ) {
        MachineInstr *UseMI = &*(I++);
        if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
            continue;
        SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
        LiveQueryResult LRQ = li->Query(Idx);
        VNInfo *VNI = LRQ.valueIn();
        if (!VNI) {
            // This shouldn't happen: readsVirtualRegister returns true, but there is
            // no live value. It is likely caused by a target getting <undef> flags
            // wrong.
            DEBUG(dbgs() << Idx << '\t' << *UseMI
                  << "Warning: Instr claims to read non-existent value in "
                  << *li << '\n');
            continue;
        }
        // Special case: An early-clobber tied operand reads and writes the
        // register one slot early.
        if (VNInfo *DefVNI = LRQ.valueDefined())
            Idx = DefVNI->def;

        WorkList.push_back(std::make_pair(Idx, VNI));
    }

    // Create new live ranges with only minimal live segments per def.
    LiveRange NewLR;
    for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
            I != E; ++I) {
        VNInfo *VNI = *I;
        if (VNI->isUnused())
            continue;
        NewLR.addSegment(LiveRange::Segment(VNI->def, VNI->def.getDeadSlot(), VNI));
    }

    // Keep track of the PHIs that are in use.
    SmallPtrSet<VNInfo*, 8> UsedPHIs;

    // Extend intervals to reach all uses in WorkList.
    while (!WorkList.empty()) {
        SlotIndex Idx = WorkList.back().first;
        VNInfo *VNI = WorkList.back().second;
        WorkList.pop_back();
        const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
        SlotIndex BlockStart = getMBBStartIdx(MBB);

        // Extend the live range for VNI to be live at Idx.
        if (VNInfo *ExtVNI = NewLR.extendInBlock(BlockStart, Idx)) {
            (void)ExtVNI;
            assert(ExtVNI == VNI && "Unexpected existing value number");
            // Is this a PHIDef we haven't seen before?
            if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
                continue;
            // The PHI is live, make sure the predecessors are live-out.
            for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
                    PE = MBB->pred_end(); PI != PE; ++PI) {
                if (!LiveOut.insert(*PI))
                    continue;
                SlotIndex Stop = getMBBEndIdx(*PI);
                // A predecessor is not required to have a live-out value for a PHI.
                if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
                    WorkList.push_back(std::make_pair(Stop, PVNI));
            }
            continue;
        }

        // VNI is live-in to MBB.
        DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
        NewLR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));

        // Make sure VNI is live-out from the predecessors.
        for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
                PE = MBB->pred_end(); PI != PE; ++PI) {
            if (!LiveOut.insert(*PI))
                continue;
            SlotIndex Stop = getMBBEndIdx(*PI);
            assert(li->getVNInfoBefore(Stop) == VNI &&
                   "Wrong value out of predecessor");
            WorkList.push_back(std::make_pair(Stop, VNI));
        }
    }

    // Handle dead values.
    bool CanSeparate = false;
    for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
            I != E; ++I) {
        VNInfo *VNI = *I;
        if (VNI->isUnused())
            continue;
        LiveRange::iterator LRI = NewLR.FindSegmentContaining(VNI->def);
        assert(LRI != NewLR.end() && "Missing segment for PHI");
        if (LRI->end != VNI->def.getDeadSlot())
            continue;
        if (VNI->isPHIDef()) {
            // This is a dead PHI. Remove it.
            VNI->markUnused();
            NewLR.removeSegment(LRI->start, LRI->end);
            DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
            CanSeparate = true;
        } else {
            // This is a dead def. Make sure the instruction knows.
            MachineInstr *MI = getInstructionFromIndex(VNI->def);
            assert(MI && "No instruction defining live value");
            MI->addRegisterDead(li->reg, TRI);
            if (dead && MI->allDefsAreDead()) {
                DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
                dead->push_back(MI);
            }
        }
    }

    // Move the trimmed segments back.
    li->segments.swap(NewLR.segments);
    DEBUG(dbgs() << "Shrunk: " << *li << '\n');
    return CanSeparate;
}
Example #13
0
void InterferenceCache::Entry::update(unsigned MBBNum) {
  SlotIndex Start, Stop;
  tie(Start, Stop) = Indexes->getMBBRange(MBBNum);

  // Use advanceTo only when possible.
  if (PrevPos != Start) {
    if (!PrevPos.isValid() || Start < PrevPos) {
      for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
        RegUnitInfo &RUI = RegUnits[i];
        RUI.VirtI.find(Start);
        RUI.FixedI = RUI.Fixed->find(Start);
      }
    } else {
      for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
        RegUnitInfo &RUI = RegUnits[i];
        RUI.VirtI.advanceTo(Start);
        if (RUI.FixedI != RUI.Fixed->end())
          RUI.FixedI = RUI.Fixed->advanceTo(RUI.FixedI, Start);
      }
    }
    PrevPos = Start;
  }

  MachineFunction::const_iterator MFI = MF->getBlockNumbered(MBBNum);
  BlockInterference *BI = &Blocks[MBBNum];
  ArrayRef<SlotIndex> RegMaskSlots;
  ArrayRef<const uint32_t*> RegMaskBits;
  for (;;) {
    BI->Tag = Tag;
    BI->First = BI->Last = SlotIndex();

    // Check for first interference from virtregs.
    for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
      LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
      if (!I.valid())
        continue;
      SlotIndex StartI = I.start();
      if (StartI >= Stop)
        continue;
      if (!BI->First.isValid() || StartI < BI->First)
        BI->First = StartI;
    }

    // Same thing for fixed interference.
    for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
      LiveInterval::const_iterator I = RegUnits[i].FixedI;
      LiveInterval::const_iterator E = RegUnits[i].Fixed->end();
      if (I == E)
        continue;
      SlotIndex StartI = I->start;
      if (StartI >= Stop)
        continue;
      if (!BI->First.isValid() || StartI < BI->First)
        BI->First = StartI;
    }

    // Also check for register mask interference.
    RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
    RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
    SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
    for (unsigned i = 0, e = RegMaskSlots.size();
         i != e && RegMaskSlots[i] < Limit; ++i)
      if (MachineOperand::clobbersPhysReg(RegMaskBits[i], PhysReg)) {
        // Register mask i clobbers PhysReg before the LIU interference.
        BI->First = RegMaskSlots[i];
        break;
      }

    PrevPos = Stop;
    if (BI->First.isValid())
      break;

    // No interference in this block? Go ahead and precompute the next block.
    if (++MFI == MF->end())
      return;
    MBBNum = MFI->getNumber();
    BI = &Blocks[MBBNum];
    if (BI->Tag == Tag)
      return;
    tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
  }

  // Check for last interference in block.
  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
    LiveIntervalUnion::SegmentIter &I = RegUnits[i].VirtI;
    if (!I.valid() || I.start() >= Stop)
      continue;
    I.advanceTo(Stop);
    bool Backup = !I.valid() || I.start() >= Stop;
    if (Backup)
      --I;
    SlotIndex StopI = I.stop();
    if (!BI->Last.isValid() || StopI > BI->Last)
      BI->Last = StopI;
    if (Backup)
      ++I;
  }

  // Fixed interference.
  for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) {
    LiveInterval::iterator &I = RegUnits[i].FixedI;
    LiveRange *LR = RegUnits[i].Fixed;
    if (I == LR->end() || I->start >= Stop)
      continue;
    I = LR->advanceTo(I, Stop);
    bool Backup = I == LR->end() || I->start >= Stop;
    if (Backup)
      --I;
    SlotIndex StopI = I->end;
    if (!BI->Last.isValid() || StopI > BI->Last)
      BI->Last = StopI;
    if (Backup)
      ++I;
  }

  // Also check for register mask interference.
  SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
  for (unsigned i = RegMaskSlots.size();
       i && RegMaskSlots[i-1].getDeadSlot() > Limit; --i)
    if (MachineOperand::clobbersPhysReg(RegMaskBits[i-1], PhysReg)) {
      // Register mask i-1 clobbers PhysReg after the LIU interference.
      // Model the regmask clobber as a dead def.
      BI->Last = RegMaskSlots[i-1].getDeadSlot();
      break;
    }
}
Example #14
0
void HexagonExpandCondsets::updateDeadsInRange(unsigned Reg, LaneBitmask LM,
      LiveRange &Range) {
  assert(TargetRegisterInfo::isVirtualRegister(Reg));
  if (Range.empty())
    return;

  // Return two booleans: { def-modifes-reg, def-covers-reg }.
  auto IsRegDef = [this,Reg,LM] (MachineOperand &Op) -> std::pair<bool,bool> {
    if (!Op.isReg() || !Op.isDef())
      return { false, false };
    unsigned DR = Op.getReg(), DSR = Op.getSubReg();
    if (!TargetRegisterInfo::isVirtualRegister(DR) || DR != Reg)
      return { false, false };
    LaneBitmask SLM = getLaneMask(DR, DSR);
    LaneBitmask A = SLM & LM;
    return { A.any(), A == SLM };
  };

  // The splitting step will create pairs of predicated definitions without
  // any implicit uses (since implicit uses would interfere with predication).
  // This can cause the reaching defs to become dead after live range
  // recomputation, even though they are not really dead.
  // We need to identify predicated defs that need implicit uses, and
  // dead defs that are not really dead, and correct both problems.

  auto Dominate = [this] (SetVector<MachineBasicBlock*> &Defs,
                          MachineBasicBlock *Dest) -> bool {
    for (MachineBasicBlock *D : Defs)
      if (D != Dest && MDT->dominates(D, Dest))
        return true;

    MachineBasicBlock *Entry = &Dest->getParent()->front();
    SetVector<MachineBasicBlock*> Work(Dest->pred_begin(), Dest->pred_end());
    for (unsigned i = 0; i < Work.size(); ++i) {
      MachineBasicBlock *B = Work[i];
      if (Defs.count(B))
        continue;
      if (B == Entry)
        return false;
      for (auto *P : B->predecessors())
        Work.insert(P);
    }
    return true;
  };

  // First, try to extend live range within individual basic blocks. This
  // will leave us only with dead defs that do not reach any predicated
  // defs in the same block.
  SetVector<MachineBasicBlock*> Defs;
  SmallVector<SlotIndex,4> PredDefs;
  for (auto &Seg : Range) {
    if (!Seg.start.isRegister())
      continue;
    MachineInstr *DefI = LIS->getInstructionFromIndex(Seg.start);
    Defs.insert(DefI->getParent());
    if (HII->isPredicated(*DefI))
      PredDefs.push_back(Seg.start);
  }

  SmallVector<SlotIndex,8> Undefs;
  LiveInterval &LI = LIS->getInterval(Reg);
  LI.computeSubRangeUndefs(Undefs, LM, *MRI, *LIS->getSlotIndexes());

  for (auto &SI : PredDefs) {
    MachineBasicBlock *BB = LIS->getMBBFromIndex(SI);
    auto P = Range.extendInBlock(Undefs, LIS->getMBBStartIdx(BB), SI);
    if (P.first != nullptr || P.second)
      SI = SlotIndex();
  }

  // Calculate reachability for those predicated defs that were not handled
  // by the in-block extension.
  SmallVector<SlotIndex,4> ExtTo;
  for (auto &SI : PredDefs) {
    if (!SI.isValid())
      continue;
    MachineBasicBlock *BB = LIS->getMBBFromIndex(SI);
    if (BB->pred_empty())
      continue;
    // If the defs from this range reach SI via all predecessors, it is live.
    // It can happen that SI is reached by the defs through some paths, but
    // not all. In the IR coming into this optimization, SI would not be
    // considered live, since the defs would then not jointly dominate SI.
    // That means that SI is an overwriting def, and no implicit use is
    // needed at this point. Do not add SI to the extension points, since
    // extendToIndices will abort if there is no joint dominance.
    // If the abort was avoided by adding extra undefs added to Undefs,
    // extendToIndices could actually indicate that SI is live, contrary
    // to the original IR.
    if (Dominate(Defs, BB))
      ExtTo.push_back(SI);
  }

  if (!ExtTo.empty())
    LIS->extendToIndices(Range, ExtTo, Undefs);

  // Remove <dead> flags from all defs that are not dead after live range
  // extension, and collect all def operands. They will be used to generate
  // the necessary implicit uses.
  // At the same time, add <dead> flag to all defs that are actually dead.
  // This can happen, for example, when a mux with identical inputs is
  // replaced with a COPY: the use of the predicate register disappears and
  // the dead can become dead.
  std::set<RegisterRef> DefRegs;
  for (auto &Seg : Range) {
    if (!Seg.start.isRegister())
      continue;
    MachineInstr *DefI = LIS->getInstructionFromIndex(Seg.start);
    for (auto &Op : DefI->operands()) {
      auto P = IsRegDef(Op);
      if (P.second && Seg.end.isDead()) {
        Op.setIsDead(true);
      } else if (P.first) {
        DefRegs.insert(Op);
        Op.setIsDead(false);
      }
    }
  }

  // Now, add implicit uses to each predicated def that is reached
  // by other defs.
  for (auto &Seg : Range) {
    if (!Seg.start.isRegister() || !Range.liveAt(Seg.start.getPrevSlot()))
      continue;
    MachineInstr *DefI = LIS->getInstructionFromIndex(Seg.start);
    if (!HII->isPredicated(*DefI))
      continue;
    // Construct the set of all necessary implicit uses, based on the def
    // operands in the instruction.
    std::set<RegisterRef> ImpUses;
    for (auto &Op : DefI->operands())
      if (Op.isReg() && Op.isDef() && DefRegs.count(Op))
        ImpUses.insert(Op);
    if (ImpUses.empty())
      continue;
    MachineFunction &MF = *DefI->getParent()->getParent();
    for (RegisterRef R : ImpUses)
      MachineInstrBuilder(MF, DefI).addReg(R.Reg, RegState::Implicit, R.Sub);
  }

}